problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_67224 | rasdani/github-patches | git_diff | svthalia__concrexit-1677 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: 'view site' from event detail admin forwards to wrong url
In GitLab by @JobDoesburg on Dec 4, 2019, 19:43
<!--
You want something new.
-->
### One-sentence description
Improve navigation between admin views for event admin
### Motivation
Currently it works terrible.
### Desired functionality
On save, go to the view that makes sense instead of very often go back to the event overview.
Also maybe provide buttons that take you to the event detail overview page, or to the frontend view, etc etc
I don't yet have clear suggestions but I think everyone can think of their own what makes sense.
### Suggested implementation
<!--
If you have any notes on how we could achieve this feature,
share them here.
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/admin_views.py`
Content:
```
1 import csv
2
3 from django.conf import settings
4 from django.contrib import messages
5 from django.contrib.admin import helpers
6 from django.contrib.admin.views.decorators import staff_member_required
7 from django.contrib.auth.mixins import PermissionRequiredMixin
8 from django.http import HttpResponse
9 from django.shortcuts import get_object_or_404, redirect
10 from django.utils import timezone
11 from django.utils.decorators import method_decorator
12 from django.utils.text import slugify
13 from django.utils.translation import pgettext_lazy
14 from django.utils.translation import gettext_lazy as _
15 from django.views import View
16 from django.views.generic import DetailView, FormView
17
18 from events import services
19 from events.decorators import organiser_only
20 from events.exceptions import RegistrationError
21 from events.forms import FieldsForm, EventMessageForm
22 from payments.models import Payment
23 from pushnotifications.models import Message, Category
24 from .models import Event, EventRegistration
25
26
27 @method_decorator(staff_member_required, name="dispatch")
28 @method_decorator(organiser_only, name="dispatch")
29 class EventAdminDetails(DetailView, PermissionRequiredMixin):
30 """Render an overview of registrations for the specified event."""
31
32 template_name = "events/admin/details.html"
33 model = Event
34 context_object_name = "event"
35 permission_required = "events.change_event"
36
37 def get_context_data(self, **kwargs):
38 context = super().get_context_data(**kwargs)
39
40 context.update({"payment": Payment, "has_permission": True, "site_url": True})
41
42 return context
43
44
45 @method_decorator(staff_member_required, name="dispatch")
46 @method_decorator(organiser_only, name="dispatch")
47 class RegistrationAdminFields(FormView):
48 """Render a form that allows the user to change the details of their registration.
49
50 The user should be authenticated.
51 """
52
53 form_class = FieldsForm
54 template_name = "admin/change_form.html"
55 registration = None
56 admin = None
57
58 def get_context_data(self, **kwargs):
59 context = super().get_context_data(**kwargs)
60 context.update(
61 {
62 **self.admin.admin_site.each_context(self.request),
63 "add": False,
64 "change": True,
65 "has_view_permission": True,
66 "has_add_permission": False,
67 "has_change_permission": self.request.user.has_perms(
68 "events.change_registration"
69 ),
70 "has_delete_permission": False,
71 "has_editable_inline_admin_formsets": False,
72 "app_label": "events",
73 "opts": self.registration._meta,
74 "is_popup": False,
75 "save_as": False,
76 "save_on_top": False,
77 "original": self.registration,
78 "obj_id": self.registration.pk,
79 "title": _("Change registration fields"),
80 "adminform": helpers.AdminForm(
81 context["form"],
82 ((None, {"fields": context["form"].fields.keys()}),),
83 {},
84 ),
85 }
86 )
87 return context
88
89 def get_form_kwargs(self):
90 kwargs = super().get_form_kwargs()
91 kwargs["fields"] = services.registration_fields(
92 self.request, registration=self.registration
93 )
94 return kwargs
95
96 def form_valid(self, form):
97 values = form.field_values()
98 try:
99 services.update_registration(
100 registration=self.registration, field_values=values
101 )
102 messages.success(self.request, _("Registration successfully saved."))
103 if "_save" in self.request.POST:
104 return redirect(
105 "admin:events_eventregistration_change", self.registration.pk
106 )
107 except RegistrationError as e:
108 messages.error(self.request, e)
109 return self.render_to_response(self.get_context_data(form=form))
110
111 def dispatch(self, request, *args, **kwargs):
112 self.registration = get_object_or_404(
113 EventRegistration, pk=self.kwargs["registration"]
114 )
115 try:
116 if self.registration.event.has_fields:
117 return super().dispatch(request, *args, **kwargs)
118 except RegistrationError:
119 pass
120 return redirect("admin:events_eventregistration_change", self.registration.pk)
121
122
123 @method_decorator(staff_member_required, name="dispatch")
124 @method_decorator(organiser_only, name="dispatch")
125 class EventMessage(FormView):
126 """Renders a form that allows the user to create a push notification for all users registers to the event."""
127
128 form_class = EventMessageForm
129 template_name = "events/admin/message_form.html"
130 admin = None
131 event = None
132
133 def get_context_data(self, **kwargs):
134 context = super().get_context_data(**kwargs)
135 context.update(
136 {
137 **self.admin.admin_site.each_context(self.request),
138 "add": False,
139 "change": True,
140 "has_view_permission": True,
141 "has_add_permission": False,
142 "has_change_permission": self.request.user.has_perms(
143 "events.change_event"
144 ),
145 "has_delete_permission": False,
146 "has_editable_inline_admin_formsets": False,
147 "app_label": "events",
148 "opts": self.event._meta,
149 "is_popup": False,
150 "save_as": False,
151 "save_on_top": False,
152 "original": self.event,
153 "obj_id": self.event.pk,
154 "title": _("Send push notification"),
155 "adminform": helpers.AdminForm(
156 context["form"],
157 ((None, {"fields": context["form"].fields.keys()}),),
158 {},
159 ),
160 }
161 )
162 return context
163
164 def form_valid(self, form):
165 values = form.cleaned_data
166 if not values["url"]:
167 values["url"] = settings.BASE_URL + self.event.get_absolute_url()
168 message = Message(
169 title_en=values["title_en"],
170 body_en=values["body_en"],
171 url=values["url"],
172 category=Category.objects.get(key=Category.EVENT),
173 )
174 message.save()
175 message.users.set([r.member for r in self.event.participants if r.member])
176 message.send()
177
178 messages.success(self.request, _("Message sent successfully."))
179 if "_save" in self.request.POST:
180 return redirect("admin:events_event_details", self.event.pk)
181 return super().form_valid(form)
182
183 def dispatch(self, request, *args, **kwargs):
184 self.event = get_object_or_404(Event, pk=self.kwargs["pk"])
185 return super().dispatch(request, *args, **kwargs)
186
187
188 @method_decorator(staff_member_required, name="dispatch")
189 @method_decorator(organiser_only, name="dispatch")
190 class EventRegistrationsExport(View, PermissionRequiredMixin):
191 """View to export registrations."""
192
193 template_name = "events/admin/details.html"
194 permission_required = "events.change_event"
195
196 def get(self, request, pk):
197 """Export the registration of a specified event.
198
199 :param request: the request object
200 :param pk: the primary key of the event
201 :return: A CSV containing all registrations for the event
202 """
203 event = get_object_or_404(Event, pk=pk)
204 extra_fields = event.registrationinformationfield_set.all()
205 registrations = event.eventregistration_set.all()
206
207 header_fields = (
208 [
209 _("Name"),
210 _("Email"),
211 _("Paid"),
212 _("Present"),
213 _("Status"),
214 _("Phone number"),
215 ]
216 + [field.name for field in extra_fields]
217 + [_("Date"), _("Date cancelled")]
218 )
219
220 rows = []
221 if event.price == 0:
222 header_fields.remove(_("Paid"))
223 for registration in registrations:
224 if registration.member:
225 name = registration.member.get_full_name()
226 else:
227 name = registration.name
228 status = pgettext_lazy("registration status", "registered").capitalize()
229 cancelled = None
230 if registration.date_cancelled:
231
232 if registration.is_late_cancellation():
233 status = pgettext_lazy(
234 "registration status", "late cancellation"
235 ).capitalize()
236 else:
237 status = pgettext_lazy(
238 "registration status", "cancelled"
239 ).capitalize()
240 cancelled = timezone.localtime(registration.date_cancelled)
241
242 elif registration.queue_position:
243 status = pgettext_lazy("registration status", "waiting")
244 data = {
245 _("Name"): name,
246 _("Date"): timezone.localtime(registration.date),
247 _("Present"): _("Yes") if registration.present else "",
248 _("Phone number"): (
249 registration.member.profile.phone_number
250 if registration.member
251 else ""
252 ),
253 _("Email"): (registration.member.email if registration.member else ""),
254 _("Status"): status,
255 _("Date cancelled"): cancelled,
256 }
257 if event.price > 0:
258 if registration.is_paid():
259 data[_("Paid")] = registration.payment.get_type_display()
260 else:
261 data[_("Paid")] = _("No")
262
263 data.update(
264 {
265 field["field"].name: field["value"]
266 for field in registration.information_fields
267 }
268 )
269 rows.append(data)
270
271 response = HttpResponse(content_type="text/csv")
272 writer = csv.DictWriter(response, header_fields)
273 writer.writeheader()
274
275 rows = sorted(
276 rows,
277 key=lambda row: (
278 row[_("Status")]
279 == pgettext_lazy(
280 "registration status", "late cancellation"
281 ).capitalize(),
282 row[_("Date")],
283 ),
284 reverse=True,
285 )
286
287 for row in rows:
288 writer.writerow(row)
289
290 response["Content-Disposition"] = 'attachment; filename="{}.csv"'.format(
291 slugify(event.title)
292 )
293 return response
294
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/events/admin_views.py b/website/events/admin_views.py
--- a/website/events/admin_views.py
+++ b/website/events/admin_views.py
@@ -37,7 +37,7 @@
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
- context.update({"payment": Payment, "has_permission": True, "site_url": True})
+ context.update({"payment": Payment, "has_permission": True, "site_url": "/"})
return context
| {"golden_diff": "diff --git a/website/events/admin_views.py b/website/events/admin_views.py\n--- a/website/events/admin_views.py\n+++ b/website/events/admin_views.py\n@@ -37,7 +37,7 @@\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n \n- context.update({\"payment\": Payment, \"has_permission\": True, \"site_url\": True})\n+ context.update({\"payment\": Payment, \"has_permission\": True, \"site_url\": \"/\"})\n \n return context\n", "issue": "Bug: 'view site' from event detail admin forwards to wrong url\nIn GitLab by @JobDoesburg on Dec 4, 2019, 19:43\n\n<!--\n You want something new.\n-->\n\n### One-sentence description\n\nImprove navigation between admin views for event admin\n\n### Motivation\n\nCurrently it works terrible. \n\n### Desired functionality\n\nOn save, go to the view that makes sense instead of very often go back to the event overview. \n\nAlso maybe provide buttons that take you to the event detail overview page, or to the frontend view, etc etc\n\nI don't yet have clear suggestions but I think everyone can think of their own what makes sense.\n\n\n### Suggested implementation\n\n<!--\n If you have any notes on how we could achieve this feature,\n share them here.\n-->\n", "before_files": [{"content": "import csv\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin import helpers\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.text import slugify\nfrom django.utils.translation import pgettext_lazy\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django.views.generic import DetailView, FormView\n\nfrom events import services\nfrom events.decorators import organiser_only\nfrom events.exceptions import RegistrationError\nfrom events.forms import FieldsForm, EventMessageForm\nfrom payments.models import Payment\nfrom pushnotifications.models import Message, Category\nfrom .models import Event, EventRegistration\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventAdminDetails(DetailView, PermissionRequiredMixin):\n \"\"\"Render an overview of registrations for the specified event.\"\"\"\n\n template_name = \"events/admin/details.html\"\n model = Event\n context_object_name = \"event\"\n permission_required = \"events.change_event\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update({\"payment\": Payment, \"has_permission\": True, \"site_url\": True})\n\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass RegistrationAdminFields(FormView):\n \"\"\"Render a form that allows the user to change the details of their registration.\n\n The user should be authenticated.\n \"\"\"\n\n form_class = FieldsForm\n template_name = \"admin/change_form.html\"\n registration = None\n admin = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_registration\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.registration._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.registration,\n \"obj_id\": self.registration.pk,\n \"title\": _(\"Change registration fields\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"fields\"] = services.registration_fields(\n self.request, registration=self.registration\n )\n return kwargs\n\n def form_valid(self, form):\n values = form.field_values()\n try:\n services.update_registration(\n registration=self.registration, field_values=values\n )\n messages.success(self.request, _(\"Registration successfully saved.\"))\n if \"_save\" in self.request.POST:\n return redirect(\n \"admin:events_eventregistration_change\", self.registration.pk\n )\n except RegistrationError as e:\n messages.error(self.request, e)\n return self.render_to_response(self.get_context_data(form=form))\n\n def dispatch(self, request, *args, **kwargs):\n self.registration = get_object_or_404(\n EventRegistration, pk=self.kwargs[\"registration\"]\n )\n try:\n if self.registration.event.has_fields:\n return super().dispatch(request, *args, **kwargs)\n except RegistrationError:\n pass\n return redirect(\"admin:events_eventregistration_change\", self.registration.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventMessage(FormView):\n \"\"\"Renders a form that allows the user to create a push notification for all users registers to the event.\"\"\"\n\n form_class = EventMessageForm\n template_name = \"events/admin/message_form.html\"\n admin = None\n event = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_event\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.event._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.event,\n \"obj_id\": self.event.pk,\n \"title\": _(\"Send push notification\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def form_valid(self, form):\n values = form.cleaned_data\n if not values[\"url\"]:\n values[\"url\"] = settings.BASE_URL + self.event.get_absolute_url()\n message = Message(\n title_en=values[\"title_en\"],\n body_en=values[\"body_en\"],\n url=values[\"url\"],\n category=Category.objects.get(key=Category.EVENT),\n )\n message.save()\n message.users.set([r.member for r in self.event.participants if r.member])\n message.send()\n\n messages.success(self.request, _(\"Message sent successfully.\"))\n if \"_save\" in self.request.POST:\n return redirect(\"admin:events_event_details\", self.event.pk)\n return super().form_valid(form)\n\n def dispatch(self, request, *args, **kwargs):\n self.event = get_object_or_404(Event, pk=self.kwargs[\"pk\"])\n return super().dispatch(request, *args, **kwargs)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventRegistrationsExport(View, PermissionRequiredMixin):\n \"\"\"View to export registrations.\"\"\"\n\n template_name = \"events/admin/details.html\"\n permission_required = \"events.change_event\"\n\n def get(self, request, pk):\n \"\"\"Export the registration of a specified event.\n\n :param request: the request object\n :param pk: the primary key of the event\n :return: A CSV containing all registrations for the event\n \"\"\"\n event = get_object_or_404(Event, pk=pk)\n extra_fields = event.registrationinformationfield_set.all()\n registrations = event.eventregistration_set.all()\n\n header_fields = (\n [\n _(\"Name\"),\n _(\"Email\"),\n _(\"Paid\"),\n _(\"Present\"),\n _(\"Status\"),\n _(\"Phone number\"),\n ]\n + [field.name for field in extra_fields]\n + [_(\"Date\"), _(\"Date cancelled\")]\n )\n\n rows = []\n if event.price == 0:\n header_fields.remove(_(\"Paid\"))\n for registration in registrations:\n if registration.member:\n name = registration.member.get_full_name()\n else:\n name = registration.name\n status = pgettext_lazy(\"registration status\", \"registered\").capitalize()\n cancelled = None\n if registration.date_cancelled:\n\n if registration.is_late_cancellation():\n status = pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize()\n else:\n status = pgettext_lazy(\n \"registration status\", \"cancelled\"\n ).capitalize()\n cancelled = timezone.localtime(registration.date_cancelled)\n\n elif registration.queue_position:\n status = pgettext_lazy(\"registration status\", \"waiting\")\n data = {\n _(\"Name\"): name,\n _(\"Date\"): timezone.localtime(registration.date),\n _(\"Present\"): _(\"Yes\") if registration.present else \"\",\n _(\"Phone number\"): (\n registration.member.profile.phone_number\n if registration.member\n else \"\"\n ),\n _(\"Email\"): (registration.member.email if registration.member else \"\"),\n _(\"Status\"): status,\n _(\"Date cancelled\"): cancelled,\n }\n if event.price > 0:\n if registration.is_paid():\n data[_(\"Paid\")] = registration.payment.get_type_display()\n else:\n data[_(\"Paid\")] = _(\"No\")\n\n data.update(\n {\n field[\"field\"].name: field[\"value\"]\n for field in registration.information_fields\n }\n )\n rows.append(data)\n\n response = HttpResponse(content_type=\"text/csv\")\n writer = csv.DictWriter(response, header_fields)\n writer.writeheader()\n\n rows = sorted(\n rows,\n key=lambda row: (\n row[_(\"Status\")]\n == pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize(),\n row[_(\"Date\")],\n ),\n reverse=True,\n )\n\n for row in rows:\n writer.writerow(row)\n\n response[\"Content-Disposition\"] = 'attachment; filename=\"{}.csv\"'.format(\n slugify(event.title)\n )\n return response\n", "path": "website/events/admin_views.py"}], "after_files": [{"content": "import csv\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin import helpers\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.text import slugify\nfrom django.utils.translation import pgettext_lazy\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django.views.generic import DetailView, FormView\n\nfrom events import services\nfrom events.decorators import organiser_only\nfrom events.exceptions import RegistrationError\nfrom events.forms import FieldsForm, EventMessageForm\nfrom payments.models import Payment\nfrom pushnotifications.models import Message, Category\nfrom .models import Event, EventRegistration\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventAdminDetails(DetailView, PermissionRequiredMixin):\n \"\"\"Render an overview of registrations for the specified event.\"\"\"\n\n template_name = \"events/admin/details.html\"\n model = Event\n context_object_name = \"event\"\n permission_required = \"events.change_event\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update({\"payment\": Payment, \"has_permission\": True, \"site_url\": \"/\"})\n\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass RegistrationAdminFields(FormView):\n \"\"\"Render a form that allows the user to change the details of their registration.\n\n The user should be authenticated.\n \"\"\"\n\n form_class = FieldsForm\n template_name = \"admin/change_form.html\"\n registration = None\n admin = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_registration\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.registration._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.registration,\n \"obj_id\": self.registration.pk,\n \"title\": _(\"Change registration fields\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"fields\"] = services.registration_fields(\n self.request, registration=self.registration\n )\n return kwargs\n\n def form_valid(self, form):\n values = form.field_values()\n try:\n services.update_registration(\n registration=self.registration, field_values=values\n )\n messages.success(self.request, _(\"Registration successfully saved.\"))\n if \"_save\" in self.request.POST:\n return redirect(\n \"admin:events_eventregistration_change\", self.registration.pk\n )\n except RegistrationError as e:\n messages.error(self.request, e)\n return self.render_to_response(self.get_context_data(form=form))\n\n def dispatch(self, request, *args, **kwargs):\n self.registration = get_object_or_404(\n EventRegistration, pk=self.kwargs[\"registration\"]\n )\n try:\n if self.registration.event.has_fields:\n return super().dispatch(request, *args, **kwargs)\n except RegistrationError:\n pass\n return redirect(\"admin:events_eventregistration_change\", self.registration.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventMessage(FormView):\n \"\"\"Renders a form that allows the user to create a push notification for all users registers to the event.\"\"\"\n\n form_class = EventMessageForm\n template_name = \"events/admin/message_form.html\"\n admin = None\n event = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_event\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.event._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.event,\n \"obj_id\": self.event.pk,\n \"title\": _(\"Send push notification\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def form_valid(self, form):\n values = form.cleaned_data\n if not values[\"url\"]:\n values[\"url\"] = settings.BASE_URL + self.event.get_absolute_url()\n message = Message(\n title_en=values[\"title_en\"],\n body_en=values[\"body_en\"],\n url=values[\"url\"],\n category=Category.objects.get(key=Category.EVENT),\n )\n message.save()\n message.users.set([r.member for r in self.event.participants if r.member])\n message.send()\n\n messages.success(self.request, _(\"Message sent successfully.\"))\n if \"_save\" in self.request.POST:\n return redirect(\"admin:events_event_details\", self.event.pk)\n return super().form_valid(form)\n\n def dispatch(self, request, *args, **kwargs):\n self.event = get_object_or_404(Event, pk=self.kwargs[\"pk\"])\n return super().dispatch(request, *args, **kwargs)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventRegistrationsExport(View, PermissionRequiredMixin):\n \"\"\"View to export registrations.\"\"\"\n\n template_name = \"events/admin/details.html\"\n permission_required = \"events.change_event\"\n\n def get(self, request, pk):\n \"\"\"Export the registration of a specified event.\n\n :param request: the request object\n :param pk: the primary key of the event\n :return: A CSV containing all registrations for the event\n \"\"\"\n event = get_object_or_404(Event, pk=pk)\n extra_fields = event.registrationinformationfield_set.all()\n registrations = event.eventregistration_set.all()\n\n header_fields = (\n [\n _(\"Name\"),\n _(\"Email\"),\n _(\"Paid\"),\n _(\"Present\"),\n _(\"Status\"),\n _(\"Phone number\"),\n ]\n + [field.name for field in extra_fields]\n + [_(\"Date\"), _(\"Date cancelled\")]\n )\n\n rows = []\n if event.price == 0:\n header_fields.remove(_(\"Paid\"))\n for registration in registrations:\n if registration.member:\n name = registration.member.get_full_name()\n else:\n name = registration.name\n status = pgettext_lazy(\"registration status\", \"registered\").capitalize()\n cancelled = None\n if registration.date_cancelled:\n\n if registration.is_late_cancellation():\n status = pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize()\n else:\n status = pgettext_lazy(\n \"registration status\", \"cancelled\"\n ).capitalize()\n cancelled = timezone.localtime(registration.date_cancelled)\n\n elif registration.queue_position:\n status = pgettext_lazy(\"registration status\", \"waiting\")\n data = {\n _(\"Name\"): name,\n _(\"Date\"): timezone.localtime(registration.date),\n _(\"Present\"): _(\"Yes\") if registration.present else \"\",\n _(\"Phone number\"): (\n registration.member.profile.phone_number\n if registration.member\n else \"\"\n ),\n _(\"Email\"): (registration.member.email if registration.member else \"\"),\n _(\"Status\"): status,\n _(\"Date cancelled\"): cancelled,\n }\n if event.price > 0:\n if registration.is_paid():\n data[_(\"Paid\")] = registration.payment.get_type_display()\n else:\n data[_(\"Paid\")] = _(\"No\")\n\n data.update(\n {\n field[\"field\"].name: field[\"value\"]\n for field in registration.information_fields\n }\n )\n rows.append(data)\n\n response = HttpResponse(content_type=\"text/csv\")\n writer = csv.DictWriter(response, header_fields)\n writer.writeheader()\n\n rows = sorted(\n rows,\n key=lambda row: (\n row[_(\"Status\")]\n == pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize(),\n row[_(\"Date\")],\n ),\n reverse=True,\n )\n\n for row in rows:\n writer.writerow(row)\n\n response[\"Content-Disposition\"] = 'attachment; filename=\"{}.csv\"'.format(\n slugify(event.title)\n )\n return response\n", "path": "website/events/admin_views.py"}]} | 3,195 | 117 |
gh_patches_debug_2230 | rasdani/github-patches | git_diff | getsentry__sentry-18644 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BufferError: Local: Queue full
I am receiving this error once every 2-4 days and I need to restart Sentry to fix it. This started after moving to the Docker version of Sentry.
I never noticed this being an issue on 9.1.2 also with Clickhouse and Snuba running, but without Kafka.
> https://observ.app/share/issue/4e4f208a500d48cc898770930706959a/
I am not sure where to look / poke / monitor to see this queue that is being spoken of and how I can flush it / enlarge it if needed.
`sentry queues list` showed all 0's so it's not looking like there is a massive backlog of events.
Any help is appreciated!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/utils/pubsub.py`
Content:
```
1 from __future__ import absolute_import
2
3 import redis
4 import logging
5
6 from threading import Thread
7 from six.moves.queue import Queue, Full
8
9
10 class QueuedPublisherService(object):
11 """
12 A publisher that queues items locally and publishes them to a
13 remote pubsub service on a background thread.
14
15 Maintains a lossy internal queue for posting, will discard the
16 value if the queue is full or not immediately available. Will also
17 drop items if the publish operation to the remote service fails.
18 """
19
20 def __init__(self, publisher):
21 self._started = False
22 self.publisher = publisher
23
24 def _start(self):
25 if self._started:
26 return True
27
28 self.q = q = Queue(maxsize=100)
29
30 def worker():
31 while True:
32 (channel, key, value) = q.get()
33 try:
34 self.publisher.publish(channel, key=key, value=value)
35 except Exception as e:
36 logger = logging.getLogger("sentry.errors")
37 logger.debug("could not submit event to pubsub: %s" % e)
38 finally:
39 q.task_done()
40
41 t = Thread(target=worker)
42 t.setDaemon(True)
43 t.start()
44
45 self._started = True
46 return True
47
48 def publish(self, channel, value, key=None):
49 if not self._start():
50 return
51
52 try:
53 self.q.put((channel, key, value), block=False)
54 except Full:
55 return
56
57
58 class RedisPublisher(object):
59 def __init__(self, connection):
60 self.rds = None if connection is None else redis.StrictRedis(**connection)
61
62 def publish(self, channel, value, key=None):
63 if self.rds is not None:
64 self.rds.publish(channel, value)
65
66
67 class KafkaPublisher(object):
68 def __init__(self, connection, asynchronous=True):
69 from confluent_kafka import Producer
70
71 self.producer = Producer(connection or {})
72 self.asynchronous = asynchronous
73
74 def publish(self, channel, value, key=None):
75 self.producer.produce(topic=channel, value=value, key=key)
76 if not self.asynchronous:
77 self.producer.flush()
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/utils/pubsub.py b/src/sentry/utils/pubsub.py
--- a/src/sentry/utils/pubsub.py
+++ b/src/sentry/utils/pubsub.py
@@ -73,5 +73,7 @@
def publish(self, channel, value, key=None):
self.producer.produce(topic=channel, value=value, key=key)
- if not self.asynchronous:
+ if self.asynchronous:
+ self.producer.poll(0)
+ else:
self.producer.flush()
| {"golden_diff": "diff --git a/src/sentry/utils/pubsub.py b/src/sentry/utils/pubsub.py\n--- a/src/sentry/utils/pubsub.py\n+++ b/src/sentry/utils/pubsub.py\n@@ -73,5 +73,7 @@\n \n def publish(self, channel, value, key=None):\n self.producer.produce(topic=channel, value=value, key=key)\n- if not self.asynchronous:\n+ if self.asynchronous:\n+ self.producer.poll(0)\n+ else:\n self.producer.flush()\n", "issue": "BufferError: Local: Queue full\nI am receiving this error once every 2-4 days and I need to restart Sentry to fix it. This started after moving to the Docker version of Sentry.\r\n\r\nI never noticed this being an issue on 9.1.2 also with Clickhouse and Snuba running, but without Kafka.\r\n\r\n> https://observ.app/share/issue/4e4f208a500d48cc898770930706959a/\r\n\r\nI am not sure where to look / poke / monitor to see this queue that is being spoken of and how I can flush it / enlarge it if needed.\r\n\r\n`sentry queues list` showed all 0's so it's not looking like there is a massive backlog of events.\r\n\r\nAny help is appreciated!\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport redis\nimport logging\n\nfrom threading import Thread\nfrom six.moves.queue import Queue, Full\n\n\nclass QueuedPublisherService(object):\n \"\"\"\n A publisher that queues items locally and publishes them to a\n remote pubsub service on a background thread.\n\n Maintains a lossy internal queue for posting, will discard the\n value if the queue is full or not immediately available. Will also\n drop items if the publish operation to the remote service fails.\n \"\"\"\n\n def __init__(self, publisher):\n self._started = False\n self.publisher = publisher\n\n def _start(self):\n if self._started:\n return True\n\n self.q = q = Queue(maxsize=100)\n\n def worker():\n while True:\n (channel, key, value) = q.get()\n try:\n self.publisher.publish(channel, key=key, value=value)\n except Exception as e:\n logger = logging.getLogger(\"sentry.errors\")\n logger.debug(\"could not submit event to pubsub: %s\" % e)\n finally:\n q.task_done()\n\n t = Thread(target=worker)\n t.setDaemon(True)\n t.start()\n\n self._started = True\n return True\n\n def publish(self, channel, value, key=None):\n if not self._start():\n return\n\n try:\n self.q.put((channel, key, value), block=False)\n except Full:\n return\n\n\nclass RedisPublisher(object):\n def __init__(self, connection):\n self.rds = None if connection is None else redis.StrictRedis(**connection)\n\n def publish(self, channel, value, key=None):\n if self.rds is not None:\n self.rds.publish(channel, value)\n\n\nclass KafkaPublisher(object):\n def __init__(self, connection, asynchronous=True):\n from confluent_kafka import Producer\n\n self.producer = Producer(connection or {})\n self.asynchronous = asynchronous\n\n def publish(self, channel, value, key=None):\n self.producer.produce(topic=channel, value=value, key=key)\n if not self.asynchronous:\n self.producer.flush()\n", "path": "src/sentry/utils/pubsub.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport redis\nimport logging\n\nfrom threading import Thread\nfrom six.moves.queue import Queue, Full\n\n\nclass QueuedPublisherService(object):\n \"\"\"\n A publisher that queues items locally and publishes them to a\n remote pubsub service on a background thread.\n\n Maintains a lossy internal queue for posting, will discard the\n value if the queue is full or not immediately available. Will also\n drop items if the publish operation to the remote service fails.\n \"\"\"\n\n def __init__(self, publisher):\n self._started = False\n self.publisher = publisher\n\n def _start(self):\n if self._started:\n return True\n\n self.q = q = Queue(maxsize=100)\n\n def worker():\n while True:\n (channel, key, value) = q.get()\n try:\n self.publisher.publish(channel, key=key, value=value)\n except Exception as e:\n logger = logging.getLogger(\"sentry.errors\")\n logger.debug(\"could not submit event to pubsub: %s\" % e)\n finally:\n q.task_done()\n\n t = Thread(target=worker)\n t.setDaemon(True)\n t.start()\n\n self._started = True\n return True\n\n def publish(self, channel, value, key=None):\n if not self._start():\n return\n\n try:\n self.q.put((channel, key, value), block=False)\n except Full:\n return\n\n\nclass RedisPublisher(object):\n def __init__(self, connection):\n self.rds = None if connection is None else redis.StrictRedis(**connection)\n\n def publish(self, channel, value, key=None):\n if self.rds is not None:\n self.rds.publish(channel, value)\n\n\nclass KafkaPublisher(object):\n def __init__(self, connection, asynchronous=True):\n from confluent_kafka import Producer\n\n self.producer = Producer(connection or {})\n self.asynchronous = asynchronous\n\n def publish(self, channel, value, key=None):\n self.producer.produce(topic=channel, value=value, key=key)\n if self.asynchronous:\n self.producer.poll(0)\n else:\n self.producer.flush()\n", "path": "src/sentry/utils/pubsub.py"}]} | 1,051 | 115 |
gh_patches_debug_28661 | rasdani/github-patches | git_diff | Kinto__kinto-696 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistency with 404 response on empty collections
- Set `read_only` to true
- Give readonly access to the postgresql user
- Give `read` permission to everyone on a bucket `foo`
- Going to `/buckets/foo/collections/unknown` gives 404
- Going to `/buckets/foo/collections/unknown/records` gives 503
Listing the records of an unknown collection should definitely give 404, except with the `default` bucket plugin.
Very related to https://github.com/Kinto/kinto/issues/558
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/records.py`
Content:
```
1 import copy
2
3 import jsonschema
4 from kinto.core import resource
5 from kinto.core.errors import raise_invalid
6 from jsonschema import exceptions as jsonschema_exceptions
7 from pyramid.security import Authenticated
8 from pyramid.settings import asbool
9
10 from kinto.views import RelaxedUUID, object_exists_or_404
11
12
13 class RecordSchema(resource.ResourceSchema):
14 class Options:
15 preserve_unknown = True
16
17
18 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
19
20
21 @resource.register(name='record',
22 collection_path=_parent_path + '/records',
23 record_path=_parent_path + '/records/{{id}}')
24 class Record(resource.ShareableResource):
25
26 mapping = RecordSchema()
27 schema_field = 'schema'
28
29 def __init__(self, *args, **kwargs):
30 super(Record, self).__init__(*args, **kwargs)
31
32 self.model.id_generator = RelaxedUUID()
33
34 # Check if already fetched before (in batch).
35 collections = self.request.bound_data.setdefault('collections', {})
36 collection_uri = self.get_parent_id(self.request)
37 if collection_uri not in collections:
38 # Unknown yet, fetch from storage.
39 collection_parent_id = '/buckets/%s' % self.bucket_id
40 collection = object_exists_or_404(self.request,
41 collection_id='collection',
42 parent_id=collection_parent_id,
43 object_id=self.collection_id)
44 collections[collection_uri] = collection
45
46 self._collection = collections[collection_uri]
47
48 def get_parent_id(self, request):
49 self.bucket_id = request.matchdict['bucket_id']
50 self.collection_id = request.matchdict['collection_id']
51 return '/buckets/%s/collections/%s' % (self.bucket_id,
52 self.collection_id)
53
54 def is_known_field(self, field_name):
55 """Without schema, any field is considered as known."""
56 return True
57
58 def process_record(self, new, old=None):
59 """Validate records against collection schema, if any."""
60 new = super(Record, self).process_record(new, old)
61
62 schema = self._collection.get('schema')
63 settings = self.request.registry.settings
64 schema_validation = 'experimental_collection_schema_validation'
65 if not schema or not asbool(settings.get(schema_validation)):
66 return new
67
68 collection_timestamp = self._collection[self.model.modified_field]
69
70 try:
71 stripped = copy.deepcopy(new)
72 stripped.pop(self.model.id_field, None)
73 stripped.pop(self.model.modified_field, None)
74 stripped.pop(self.model.permissions_field, None)
75 stripped.pop(self.schema_field, None)
76 jsonschema.validate(stripped, schema)
77 except jsonschema_exceptions.ValidationError as e:
78 try:
79 field = e.path.pop() if e.path else e.validator_value.pop()
80 except AttributeError:
81 field = None
82 raise_invalid(self.request, name=field, description=e.message)
83
84 new[self.schema_field] = collection_timestamp
85 return new
86
87 def collection_get(self):
88 result = super(Record, self).collection_get()
89 self._handle_cache_expires(self.request.response)
90 return result
91
92 def get(self):
93 result = super(Record, self).get()
94 self._handle_cache_expires(self.request.response)
95 return result
96
97 def _handle_cache_expires(self, response):
98 """If the parent collection defines a ``cache_expires`` attribute,
99 then cache-control response headers are sent.
100
101 .. note::
102
103 Those headers are also sent if the
104 ``kinto.record_cache_expires_seconds`` setting is defined.
105 """
106 is_anonymous = Authenticated not in self.request.effective_principals
107 if not is_anonymous:
108 return
109
110 cache_expires = self._collection.get('cache_expires')
111 if cache_expires is None:
112 by_bucket = '%s_record_cache_expires_seconds' % (self.bucket_id)
113 by_collection = '%s_%s_record_cache_expires_seconds' % (
114 self.bucket_id, self.collection_id)
115 settings = self.request.registry.settings
116 cache_expires = settings.get(by_collection,
117 settings.get(by_bucket))
118
119 if cache_expires is not None:
120 response.cache_expires(seconds=int(cache_expires))
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -26,23 +26,22 @@
mapping = RecordSchema()
schema_field = 'schema'
- def __init__(self, *args, **kwargs):
- super(Record, self).__init__(*args, **kwargs)
-
- self.model.id_generator = RelaxedUUID()
-
+ def __init__(self, request, **kwargs):
+ # Before all, first check that the parent collection exists.
# Check if already fetched before (in batch).
- collections = self.request.bound_data.setdefault('collections', {})
- collection_uri = self.get_parent_id(self.request)
+ collections = request.bound_data.setdefault('collections', {})
+ collection_uri = self.get_parent_id(request)
if collection_uri not in collections:
# Unknown yet, fetch from storage.
collection_parent_id = '/buckets/%s' % self.bucket_id
- collection = object_exists_or_404(self.request,
+ collection = object_exists_or_404(request,
collection_id='collection',
parent_id=collection_parent_id,
object_id=self.collection_id)
collections[collection_uri] = collection
+ super(Record, self).__init__(request, **kwargs)
+ self.model.id_generator = RelaxedUUID()
self._collection = collections[collection_uri]
def get_parent_id(self, request):
| {"golden_diff": "diff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -26,23 +26,22 @@\n mapping = RecordSchema()\n schema_field = 'schema'\n \n- def __init__(self, *args, **kwargs):\n- super(Record, self).__init__(*args, **kwargs)\n-\n- self.model.id_generator = RelaxedUUID()\n-\n+ def __init__(self, request, **kwargs):\n+ # Before all, first check that the parent collection exists.\n # Check if already fetched before (in batch).\n- collections = self.request.bound_data.setdefault('collections', {})\n- collection_uri = self.get_parent_id(self.request)\n+ collections = request.bound_data.setdefault('collections', {})\n+ collection_uri = self.get_parent_id(request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n- collection = object_exists_or_404(self.request,\n+ collection = object_exists_or_404(request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n \n+ super(Record, self).__init__(request, **kwargs)\n+ self.model.id_generator = RelaxedUUID()\n self._collection = collections[collection_uri]\n \n def get_parent_id(self, request):\n", "issue": "Inconsistency with 404 response on empty collections\n- Set `read_only` to true\n- Give readonly access to the postgresql user\n- Give `read` permission to everyone on a bucket `foo`\n- Going to `/buckets/foo/collections/unknown` gives 404\n- Going to `/buckets/foo/collections/unknown/records` gives 503\n\nListing the records of an unknown collection should definitely give 404, except with the `default` bucket plugin.\n\nVery related to https://github.com/Kinto/kinto/issues/558\n\n", "before_files": [{"content": "import copy\n\nimport jsonschema\nfrom kinto.core import resource\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import RelaxedUUID, object_exists_or_404\n\n\nclass RecordSchema(resource.ResourceSchema):\n class Options:\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n self.model.id_generator = RelaxedUUID()\n\n # Check if already fetched before (in batch).\n collections = self.request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(self.request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (self.bucket_id,\n self.collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super(Record, self).process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n try:\n field = e.path.pop() if e.path else e.validator_value.pop()\n except AttributeError:\n field = None\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super(Record, self).collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super(Record, self).get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '%s_record_cache_expires_seconds' % (self.bucket_id)\n by_collection = '%s_%s_record_cache_expires_seconds' % (\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}], "after_files": [{"content": "import copy\n\nimport jsonschema\nfrom kinto.core import resource\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import RelaxedUUID, object_exists_or_404\n\n\nclass RecordSchema(resource.ResourceSchema):\n class Options:\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, request, **kwargs):\n # Before all, first check that the parent collection exists.\n # Check if already fetched before (in batch).\n collections = request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n collection = object_exists_or_404(request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n super(Record, self).__init__(request, **kwargs)\n self.model.id_generator = RelaxedUUID()\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (self.bucket_id,\n self.collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super(Record, self).process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n try:\n field = e.path.pop() if e.path else e.validator_value.pop()\n except AttributeError:\n field = None\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super(Record, self).collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super(Record, self).get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '%s_record_cache_expires_seconds' % (self.bucket_id)\n by_collection = '%s_%s_record_cache_expires_seconds' % (\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}]} | 1,534 | 329 |
gh_patches_debug_39616 | rasdani/github-patches | git_diff | wagtail__wagtail-1516 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tabbed interface in admin doesn't work with i18n

I can't toggle tabs in the page editor when wagtail is configured with USE_I18N = True and language isn't english (russian, in my case). It seems that tabbed_interface template uses slugify template tag to produce ids to elements, it works fine unless other than english language is used. In that case slugify produces empty output, and js bindings do not work correctly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django.conf import settings
4 from django import template
5 from django.contrib.humanize.templatetags.humanize import intcomma
6
7 from wagtail.wagtailcore import hooks
8 from wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy, PageViewRestriction
9 from wagtail.wagtailcore.utils import camelcase_to_underscore, escape_script
10 from wagtail.wagtailadmin.menu import admin_menu
11
12
13 register = template.Library()
14
15 register.filter('intcomma', intcomma)
16
17 @register.inclusion_tag('wagtailadmin/shared/explorer_nav.html')
18 def explorer_nav():
19 return {
20 'nodes': get_navigation_menu_items()
21 }
22
23
24 @register.inclusion_tag('wagtailadmin/shared/explorer_nav_child.html')
25 def explorer_subnav(nodes):
26 return {
27 'nodes': nodes
28 }
29
30
31 @register.inclusion_tag('wagtailadmin/shared/main_nav.html', takes_context=True)
32 def main_nav(context):
33 request = context['request']
34
35 return {
36 'menu_html': admin_menu.render_html(request),
37 'request': request,
38 }
39
40 @register.simple_tag
41 def main_nav_js():
42 return admin_menu.media['js']
43
44
45 @register.filter("ellipsistrim")
46 def ellipsistrim(value, max_length):
47 if len(value) > max_length:
48 truncd_val = value[:max_length]
49 if not len(value) == (max_length + 1) and value[max_length + 1] != " ":
50 truncd_val = truncd_val[:truncd_val.rfind(" ")]
51 return truncd_val + "..."
52 return value
53
54
55 @register.filter
56 def fieldtype(bound_field):
57 try:
58 return camelcase_to_underscore(bound_field.field.__class__.__name__)
59 except AttributeError:
60 try:
61 return camelcase_to_underscore(bound_field.__class__.__name__)
62 except AttributeError:
63 return ""
64
65
66 @register.filter
67 def widgettype(bound_field):
68 try:
69 return camelcase_to_underscore(bound_field.field.widget.__class__.__name__)
70 except AttributeError:
71 try:
72 return camelcase_to_underscore(bound_field.widget.__class__.__name__)
73 except AttributeError:
74 return ""
75
76
77
78 @register.filter
79 def meta_description(model):
80 try:
81 return model.model_class()._meta.description
82 except:
83 return ""
84
85
86 @register.assignment_tag(takes_context=True)
87 def page_permissions(context, page):
88 """
89 Usage: {% page_permissions page as page_perms %}
90 Sets the variable 'page_perms' to a PagePermissionTester object that can be queried to find out
91 what actions the current logged-in user can perform on the given page.
92 """
93 # Create a UserPagePermissionsProxy object to represent the user's global permissions, and
94 # cache it in the context for the duration of the page request, if one does not exist already
95 if 'user_page_permissions' not in context:
96 context['user_page_permissions'] = UserPagePermissionsProxy(context['request'].user)
97
98 # Now retrieve a PagePermissionTester from it, specific to the given page
99 return context['user_page_permissions'].for_page(page)
100
101
102 @register.assignment_tag(takes_context=True)
103 def test_page_is_public(context, page):
104 """
105 Usage: {% test_page_is_public page as is_public %}
106 Sets 'is_public' to True iff there are no page view restrictions in place on
107 this page.
108 Caches the list of page view restrictions in the context, to avoid repeated
109 DB queries on repeated calls.
110 """
111 if 'all_page_view_restriction_paths' not in context:
112 context['all_page_view_restriction_paths'] = PageViewRestriction.objects.select_related('page').values_list('page__path', flat=True)
113
114 is_private = any([
115 page.path.startswith(restricted_path)
116 for restricted_path in context['all_page_view_restriction_paths']
117 ])
118
119 return not is_private
120
121
122 @register.simple_tag
123 def hook_output(hook_name):
124 """
125 Example: {% hook_output 'insert_editor_css' %}
126 Whenever we have a hook whose functions take no parameters and return a string, this tag can be used
127 to output the concatenation of all of those return values onto the page.
128 Note that the output is not escaped - it is the hook function's responsibility to escape unsafe content.
129 """
130 snippets = [fn() for fn in hooks.get_hooks(hook_name)]
131 return ''.join(snippets)
132
133
134 @register.assignment_tag
135 def usage_count_enabled():
136 return getattr(settings, 'WAGTAIL_USAGE_COUNT_ENABLED', False)
137
138
139 @register.assignment_tag
140 def base_url_setting():
141 return getattr(settings, 'BASE_URL', None)
142
143
144 class EscapeScriptNode(template.Node):
145 TAG_NAME = 'escapescript'
146
147 def __init__(self, nodelist):
148 super(EscapeScriptNode, self).__init__()
149 self.nodelist = nodelist
150
151 def render(self, context):
152 out = self.nodelist.render(context)
153 return escape_script(out)
154
155 @classmethod
156 def handle(cls, parser, token):
157 nodelist = parser.parse(('end' + EscapeScriptNode.TAG_NAME,))
158 parser.delete_first_token()
159 return cls(nodelist)
160
161 register.tag(EscapeScriptNode.TAG_NAME, EscapeScriptNode.handle)
162
163
164 # Helpers for Widget.render_with_errors, our extension to the Django widget API that allows widgets to
165 # take on the responsibility of rendering their own error messages
166 @register.filter
167 def render_with_errors(bound_field):
168 """
169 Usage: {{ field|render_with_errors }} as opposed to {{ field }}.
170 If the field (a BoundField instance) has errors on it, and the associated widget implements
171 a render_with_errors method, call that; otherwise, call the regular widget rendering mechanism.
172 """
173 widget = bound_field.field.widget
174 if bound_field.errors and hasattr(widget, 'render_with_errors'):
175 return widget.render_with_errors(bound_field.html_name, bound_field.value(), attrs={'id': bound_field.auto_id}, errors=bound_field.errors)
176 else:
177 return bound_field.as_widget()
178
179 @register.filter
180 def has_unrendered_errors(bound_field):
181 """
182 Return true if this field has errors that were not accounted for by render_with_errors, because
183 the widget does not support the render_with_errors method
184 """
185 return bound_field.errors and not hasattr(bound_field.field.widget, 'render_with_errors')
186
```
Path: `wagtail/wagtailcore/utils.py`
Content:
```
1 import re
2 from six import string_types
3
4 from django.db.models import Model
5 from django.apps import apps
6
7
8 def camelcase_to_underscore(str):
9 # http://djangosnippets.org/snippets/585/
10 return re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', str).lower().strip('_')
11
12
13 def resolve_model_string(model_string, default_app=None):
14 """
15 Resolve an 'app_label.model_name' string into an actual model class.
16 If a model class is passed in, just return that.
17 """
18 if isinstance(model_string, string_types):
19 try:
20 app_label, model_name = model_string.split(".")
21 except ValueError:
22 if default_app is not None:
23 # If we can't split, assume a model in current app
24 app_label = default_app
25 model_name = model_string
26 else:
27 raise ValueError("Can not resolve {0!r} into a model. Model names "
28 "should be in the form app_label.model_name".format(
29 model_string), model_string)
30
31 return apps.get_model(app_label, model_name)
32
33 elif isinstance(model_string, type) and issubclass(model_string, Model):
34 return model_string
35
36 else:
37 raise LookupError("Can not resolve {0!r} into a model".format(model_string), model_string)
38
39
40 SCRIPT_RE = re.compile(r'<(-*)/script>')
41 def escape_script(text):
42 """
43 Escape `</script>` tags in 'text' so that it can be placed within a `<script>` block without
44 accidentally closing it. A '-' character will be inserted for each time it is escaped:
45 `<-/script>`, `<--/script>` etc.
46 """
47 return SCRIPT_RE.sub(r'<-\1/script>', text)
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py b/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py
--- a/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py
+++ b/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py
@@ -3,10 +3,12 @@
from django.conf import settings
from django import template
from django.contrib.humanize.templatetags.humanize import intcomma
+from django.template.defaultfilters import stringfilter
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy, PageViewRestriction
from wagtail.wagtailcore.utils import camelcase_to_underscore, escape_script
+from wagtail.wagtailcore.utils import cautious_slugify as _cautious_slugify
from wagtail.wagtailadmin.menu import admin_menu
@@ -183,3 +185,9 @@
the widget does not support the render_with_errors method
"""
return bound_field.errors and not hasattr(bound_field.field.widget, 'render_with_errors')
+
+
[email protected](is_safe=True)
+@stringfilter
+def cautious_slugify(value):
+ return _cautious_slugify(value)
diff --git a/wagtail/wagtailcore/utils.py b/wagtail/wagtailcore/utils.py
--- a/wagtail/wagtailcore/utils.py
+++ b/wagtail/wagtailcore/utils.py
@@ -1,8 +1,11 @@
import re
+import unicodedata
from six import string_types
from django.db.models import Model
from django.apps import apps
+from django.utils.encoding import force_text
+from django.utils.text import slugify
def camelcase_to_underscore(str):
@@ -45,3 +48,38 @@
`<-/script>`, `<--/script>` etc.
"""
return SCRIPT_RE.sub(r'<-\1/script>', text)
+
+
+SLUGIFY_RE = re.compile(r'[^\w\s-]', re.UNICODE)
+
+
+def cautious_slugify(value):
+ """
+ Convert a string to ASCII exactly as Django's slugify does, with the exception
+ that any non-ASCII alphanumeric characters (that cannot be ASCIIfied under Unicode
+ normalisation) are escaped into codes like 'u0421' instead of being deleted entirely.
+
+ This ensures that the result of slugifying e.g. Cyrillic text will not be an empty
+ string, and can thus be safely used as an identifier (albeit not a human-readable one).
+ """
+ value = force_text(value)
+
+ # Normalize the string to decomposed unicode form. This causes accented Latin
+ # characters to be split into 'base character' + 'accent modifier'; the latter will
+ # be stripped out by the regexp, resulting in an ASCII-clean character that doesn't
+ # need to be escaped
+ value = unicodedata.normalize('NFKD', value)
+
+ # Strip out characters that aren't letterlike, underscores or hyphens,
+ # using the same regexp that slugify uses. This ensures that non-ASCII non-letters
+ # (e.g. accent modifiers, fancy punctuation) get stripped rather than escaped
+ value = SLUGIFY_RE.sub('', value)
+
+ # Encode as ASCII, escaping non-ASCII characters with backslashreplace, then convert
+ # back to a unicode string (which is what slugify expects)
+ value = value.encode('ascii', 'backslashreplace').decode('ascii')
+
+ # Pass to slugify to perform final conversion (whitespace stripping, applying
+ # mark_safe); this will also strip out the backslashes from the 'backslashreplace'
+ # conversion
+ return slugify(value)
| {"golden_diff": "diff --git a/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py b/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py\n--- a/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py\n+++ b/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py\n@@ -3,10 +3,12 @@\n from django.conf import settings\n from django import template\n from django.contrib.humanize.templatetags.humanize import intcomma\n+from django.template.defaultfilters import stringfilter\n \n from wagtail.wagtailcore import hooks\n from wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy, PageViewRestriction\n from wagtail.wagtailcore.utils import camelcase_to_underscore, escape_script\n+from wagtail.wagtailcore.utils import cautious_slugify as _cautious_slugify\n from wagtail.wagtailadmin.menu import admin_menu\n \n \n@@ -183,3 +185,9 @@\n the widget does not support the render_with_errors method\n \"\"\"\n return bound_field.errors and not hasattr(bound_field.field.widget, 'render_with_errors')\n+\n+\[email protected](is_safe=True)\n+@stringfilter\n+def cautious_slugify(value):\n+ return _cautious_slugify(value)\ndiff --git a/wagtail/wagtailcore/utils.py b/wagtail/wagtailcore/utils.py\n--- a/wagtail/wagtailcore/utils.py\n+++ b/wagtail/wagtailcore/utils.py\n@@ -1,8 +1,11 @@\n import re\n+import unicodedata\n from six import string_types\n \n from django.db.models import Model\n from django.apps import apps\n+from django.utils.encoding import force_text\n+from django.utils.text import slugify\n \n \n def camelcase_to_underscore(str):\n@@ -45,3 +48,38 @@\n `<-/script>`, `<--/script>` etc.\n \"\"\"\n return SCRIPT_RE.sub(r'<-\\1/script>', text)\n+\n+\n+SLUGIFY_RE = re.compile(r'[^\\w\\s-]', re.UNICODE)\n+\n+\n+def cautious_slugify(value):\n+ \"\"\"\n+ Convert a string to ASCII exactly as Django's slugify does, with the exception\n+ that any non-ASCII alphanumeric characters (that cannot be ASCIIfied under Unicode\n+ normalisation) are escaped into codes like 'u0421' instead of being deleted entirely.\n+\n+ This ensures that the result of slugifying e.g. Cyrillic text will not be an empty\n+ string, and can thus be safely used as an identifier (albeit not a human-readable one).\n+ \"\"\"\n+ value = force_text(value)\n+\n+ # Normalize the string to decomposed unicode form. This causes accented Latin\n+ # characters to be split into 'base character' + 'accent modifier'; the latter will\n+ # be stripped out by the regexp, resulting in an ASCII-clean character that doesn't\n+ # need to be escaped\n+ value = unicodedata.normalize('NFKD', value)\n+\n+ # Strip out characters that aren't letterlike, underscores or hyphens,\n+ # using the same regexp that slugify uses. This ensures that non-ASCII non-letters\n+ # (e.g. accent modifiers, fancy punctuation) get stripped rather than escaped\n+ value = SLUGIFY_RE.sub('', value)\n+\n+ # Encode as ASCII, escaping non-ASCII characters with backslashreplace, then convert\n+ # back to a unicode string (which is what slugify expects)\n+ value = value.encode('ascii', 'backslashreplace').decode('ascii')\n+\n+ # Pass to slugify to perform final conversion (whitespace stripping, applying\n+ # mark_safe); this will also strip out the backslashes from the 'backslashreplace'\n+ # conversion\n+ return slugify(value)\n", "issue": "Tabbed interface in admin doesn't work with i18n\n\nI can't toggle tabs in the page editor when wagtail is configured with USE_I18N = True and language isn't english (russian, in my case). It seems that tabbed_interface template uses slugify template tag to produce ids to elements, it works fine unless other than english language is used. In that case slugify produces empty output, and js bindings do not work correctly.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django import template\nfrom django.contrib.humanize.templatetags.humanize import intcomma\n\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy, PageViewRestriction\nfrom wagtail.wagtailcore.utils import camelcase_to_underscore, escape_script\nfrom wagtail.wagtailadmin.menu import admin_menu\n\n\nregister = template.Library()\n\nregister.filter('intcomma', intcomma)\n\[email protected]_tag('wagtailadmin/shared/explorer_nav.html')\ndef explorer_nav():\n return {\n 'nodes': get_navigation_menu_items()\n }\n\n\[email protected]_tag('wagtailadmin/shared/explorer_nav_child.html')\ndef explorer_subnav(nodes):\n return {\n 'nodes': nodes\n }\n\n\[email protected]_tag('wagtailadmin/shared/main_nav.html', takes_context=True)\ndef main_nav(context):\n request = context['request']\n\n return {\n 'menu_html': admin_menu.render_html(request),\n 'request': request,\n }\n\[email protected]_tag\ndef main_nav_js():\n return admin_menu.media['js']\n\n\[email protected](\"ellipsistrim\")\ndef ellipsistrim(value, max_length):\n if len(value) > max_length:\n truncd_val = value[:max_length]\n if not len(value) == (max_length + 1) and value[max_length + 1] != \" \":\n truncd_val = truncd_val[:truncd_val.rfind(\" \")]\n return truncd_val + \"...\"\n return value\n\n\[email protected]\ndef fieldtype(bound_field):\n try:\n return camelcase_to_underscore(bound_field.field.__class__.__name__)\n except AttributeError:\n try:\n return camelcase_to_underscore(bound_field.__class__.__name__)\n except AttributeError:\n return \"\"\n\n\[email protected]\ndef widgettype(bound_field):\n try:\n return camelcase_to_underscore(bound_field.field.widget.__class__.__name__)\n except AttributeError:\n try:\n return camelcase_to_underscore(bound_field.widget.__class__.__name__)\n except AttributeError:\n return \"\"\n\n\n\[email protected]\ndef meta_description(model):\n try:\n return model.model_class()._meta.description\n except:\n return \"\"\n\n\[email protected]_tag(takes_context=True)\ndef page_permissions(context, page):\n \"\"\"\n Usage: {% page_permissions page as page_perms %}\n Sets the variable 'page_perms' to a PagePermissionTester object that can be queried to find out\n what actions the current logged-in user can perform on the given page.\n \"\"\"\n # Create a UserPagePermissionsProxy object to represent the user's global permissions, and\n # cache it in the context for the duration of the page request, if one does not exist already\n if 'user_page_permissions' not in context:\n context['user_page_permissions'] = UserPagePermissionsProxy(context['request'].user)\n\n # Now retrieve a PagePermissionTester from it, specific to the given page\n return context['user_page_permissions'].for_page(page)\n\n\[email protected]_tag(takes_context=True)\ndef test_page_is_public(context, page):\n \"\"\"\n Usage: {% test_page_is_public page as is_public %}\n Sets 'is_public' to True iff there are no page view restrictions in place on\n this page.\n Caches the list of page view restrictions in the context, to avoid repeated\n DB queries on repeated calls.\n \"\"\"\n if 'all_page_view_restriction_paths' not in context:\n context['all_page_view_restriction_paths'] = PageViewRestriction.objects.select_related('page').values_list('page__path', flat=True)\n\n is_private = any([\n page.path.startswith(restricted_path)\n for restricted_path in context['all_page_view_restriction_paths']\n ])\n\n return not is_private\n\n\[email protected]_tag\ndef hook_output(hook_name):\n \"\"\"\n Example: {% hook_output 'insert_editor_css' %}\n Whenever we have a hook whose functions take no parameters and return a string, this tag can be used\n to output the concatenation of all of those return values onto the page.\n Note that the output is not escaped - it is the hook function's responsibility to escape unsafe content.\n \"\"\"\n snippets = [fn() for fn in hooks.get_hooks(hook_name)]\n return ''.join(snippets)\n\n\[email protected]_tag\ndef usage_count_enabled():\n return getattr(settings, 'WAGTAIL_USAGE_COUNT_ENABLED', False)\n\n\[email protected]_tag\ndef base_url_setting():\n return getattr(settings, 'BASE_URL', None)\n\n\nclass EscapeScriptNode(template.Node):\n TAG_NAME = 'escapescript'\n\n def __init__(self, nodelist):\n super(EscapeScriptNode, self).__init__()\n self.nodelist = nodelist\n\n def render(self, context):\n out = self.nodelist.render(context)\n return escape_script(out)\n\n @classmethod\n def handle(cls, parser, token):\n nodelist = parser.parse(('end' + EscapeScriptNode.TAG_NAME,))\n parser.delete_first_token()\n return cls(nodelist)\n\nregister.tag(EscapeScriptNode.TAG_NAME, EscapeScriptNode.handle)\n\n\n# Helpers for Widget.render_with_errors, our extension to the Django widget API that allows widgets to\n# take on the responsibility of rendering their own error messages\[email protected]\ndef render_with_errors(bound_field):\n \"\"\"\n Usage: {{ field|render_with_errors }} as opposed to {{ field }}.\n If the field (a BoundField instance) has errors on it, and the associated widget implements\n a render_with_errors method, call that; otherwise, call the regular widget rendering mechanism.\n \"\"\"\n widget = bound_field.field.widget\n if bound_field.errors and hasattr(widget, 'render_with_errors'):\n return widget.render_with_errors(bound_field.html_name, bound_field.value(), attrs={'id': bound_field.auto_id}, errors=bound_field.errors)\n else:\n return bound_field.as_widget()\n\[email protected]\ndef has_unrendered_errors(bound_field):\n \"\"\"\n Return true if this field has errors that were not accounted for by render_with_errors, because\n the widget does not support the render_with_errors method\n \"\"\"\n return bound_field.errors and not hasattr(bound_field.field.widget, 'render_with_errors')\n", "path": "wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py"}, {"content": "import re\nfrom six import string_types\n\nfrom django.db.models import Model\nfrom django.apps import apps\n\n\ndef camelcase_to_underscore(str):\n # http://djangosnippets.org/snippets/585/\n return re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\\\1', str).lower().strip('_')\n\n\ndef resolve_model_string(model_string, default_app=None):\n \"\"\"\n Resolve an 'app_label.model_name' string into an actual model class.\n If a model class is passed in, just return that.\n \"\"\"\n if isinstance(model_string, string_types):\n try:\n app_label, model_name = model_string.split(\".\")\n except ValueError:\n if default_app is not None:\n # If we can't split, assume a model in current app\n app_label = default_app\n model_name = model_string\n else:\n raise ValueError(\"Can not resolve {0!r} into a model. Model names \"\n \"should be in the form app_label.model_name\".format(\n model_string), model_string)\n\n return apps.get_model(app_label, model_name)\n\n elif isinstance(model_string, type) and issubclass(model_string, Model):\n return model_string\n\n else:\n raise LookupError(\"Can not resolve {0!r} into a model\".format(model_string), model_string)\n\n\nSCRIPT_RE = re.compile(r'<(-*)/script>')\ndef escape_script(text):\n \"\"\"\n Escape `</script>` tags in 'text' so that it can be placed within a `<script>` block without\n accidentally closing it. A '-' character will be inserted for each time it is escaped:\n `<-/script>`, `<--/script>` etc.\n \"\"\"\n return SCRIPT_RE.sub(r'<-\\1/script>', text)\n", "path": "wagtail/wagtailcore/utils.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django import template\nfrom django.contrib.humanize.templatetags.humanize import intcomma\nfrom django.template.defaultfilters import stringfilter\n\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy, PageViewRestriction\nfrom wagtail.wagtailcore.utils import camelcase_to_underscore, escape_script\nfrom wagtail.wagtailcore.utils import cautious_slugify as _cautious_slugify\nfrom wagtail.wagtailadmin.menu import admin_menu\n\n\nregister = template.Library()\n\nregister.filter('intcomma', intcomma)\n\[email protected]_tag('wagtailadmin/shared/explorer_nav.html')\ndef explorer_nav():\n return {\n 'nodes': get_navigation_menu_items()\n }\n\n\[email protected]_tag('wagtailadmin/shared/explorer_nav_child.html')\ndef explorer_subnav(nodes):\n return {\n 'nodes': nodes\n }\n\n\[email protected]_tag('wagtailadmin/shared/main_nav.html', takes_context=True)\ndef main_nav(context):\n request = context['request']\n\n return {\n 'menu_html': admin_menu.render_html(request),\n 'request': request,\n }\n\[email protected]_tag\ndef main_nav_js():\n return admin_menu.media['js']\n\n\[email protected](\"ellipsistrim\")\ndef ellipsistrim(value, max_length):\n if len(value) > max_length:\n truncd_val = value[:max_length]\n if not len(value) == (max_length + 1) and value[max_length + 1] != \" \":\n truncd_val = truncd_val[:truncd_val.rfind(\" \")]\n return truncd_val + \"...\"\n return value\n\n\[email protected]\ndef fieldtype(bound_field):\n try:\n return camelcase_to_underscore(bound_field.field.__class__.__name__)\n except AttributeError:\n try:\n return camelcase_to_underscore(bound_field.__class__.__name__)\n except AttributeError:\n return \"\"\n\n\[email protected]\ndef widgettype(bound_field):\n try:\n return camelcase_to_underscore(bound_field.field.widget.__class__.__name__)\n except AttributeError:\n try:\n return camelcase_to_underscore(bound_field.widget.__class__.__name__)\n except AttributeError:\n return \"\"\n\n\n\[email protected]\ndef meta_description(model):\n try:\n return model.model_class()._meta.description\n except:\n return \"\"\n\n\[email protected]_tag(takes_context=True)\ndef page_permissions(context, page):\n \"\"\"\n Usage: {% page_permissions page as page_perms %}\n Sets the variable 'page_perms' to a PagePermissionTester object that can be queried to find out\n what actions the current logged-in user can perform on the given page.\n \"\"\"\n # Create a UserPagePermissionsProxy object to represent the user's global permissions, and\n # cache it in the context for the duration of the page request, if one does not exist already\n if 'user_page_permissions' not in context:\n context['user_page_permissions'] = UserPagePermissionsProxy(context['request'].user)\n\n # Now retrieve a PagePermissionTester from it, specific to the given page\n return context['user_page_permissions'].for_page(page)\n\n\[email protected]_tag(takes_context=True)\ndef test_page_is_public(context, page):\n \"\"\"\n Usage: {% test_page_is_public page as is_public %}\n Sets 'is_public' to True iff there are no page view restrictions in place on\n this page.\n Caches the list of page view restrictions in the context, to avoid repeated\n DB queries on repeated calls.\n \"\"\"\n if 'all_page_view_restriction_paths' not in context:\n context['all_page_view_restriction_paths'] = PageViewRestriction.objects.select_related('page').values_list('page__path', flat=True)\n\n is_private = any([\n page.path.startswith(restricted_path)\n for restricted_path in context['all_page_view_restriction_paths']\n ])\n\n return not is_private\n\n\[email protected]_tag\ndef hook_output(hook_name):\n \"\"\"\n Example: {% hook_output 'insert_editor_css' %}\n Whenever we have a hook whose functions take no parameters and return a string, this tag can be used\n to output the concatenation of all of those return values onto the page.\n Note that the output is not escaped - it is the hook function's responsibility to escape unsafe content.\n \"\"\"\n snippets = [fn() for fn in hooks.get_hooks(hook_name)]\n return ''.join(snippets)\n\n\[email protected]_tag\ndef usage_count_enabled():\n return getattr(settings, 'WAGTAIL_USAGE_COUNT_ENABLED', False)\n\n\[email protected]_tag\ndef base_url_setting():\n return getattr(settings, 'BASE_URL', None)\n\n\nclass EscapeScriptNode(template.Node):\n TAG_NAME = 'escapescript'\n\n def __init__(self, nodelist):\n super(EscapeScriptNode, self).__init__()\n self.nodelist = nodelist\n\n def render(self, context):\n out = self.nodelist.render(context)\n return escape_script(out)\n\n @classmethod\n def handle(cls, parser, token):\n nodelist = parser.parse(('end' + EscapeScriptNode.TAG_NAME,))\n parser.delete_first_token()\n return cls(nodelist)\n\nregister.tag(EscapeScriptNode.TAG_NAME, EscapeScriptNode.handle)\n\n\n# Helpers for Widget.render_with_errors, our extension to the Django widget API that allows widgets to\n# take on the responsibility of rendering their own error messages\[email protected]\ndef render_with_errors(bound_field):\n \"\"\"\n Usage: {{ field|render_with_errors }} as opposed to {{ field }}.\n If the field (a BoundField instance) has errors on it, and the associated widget implements\n a render_with_errors method, call that; otherwise, call the regular widget rendering mechanism.\n \"\"\"\n widget = bound_field.field.widget\n if bound_field.errors and hasattr(widget, 'render_with_errors'):\n return widget.render_with_errors(bound_field.html_name, bound_field.value(), attrs={'id': bound_field.auto_id}, errors=bound_field.errors)\n else:\n return bound_field.as_widget()\n\[email protected]\ndef has_unrendered_errors(bound_field):\n \"\"\"\n Return true if this field has errors that were not accounted for by render_with_errors, because\n the widget does not support the render_with_errors method\n \"\"\"\n return bound_field.errors and not hasattr(bound_field.field.widget, 'render_with_errors')\n\n\[email protected](is_safe=True)\n@stringfilter\ndef cautious_slugify(value):\n return _cautious_slugify(value)\n", "path": "wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py"}, {"content": "import re\nimport unicodedata\nfrom six import string_types\n\nfrom django.db.models import Model\nfrom django.apps import apps\nfrom django.utils.encoding import force_text\nfrom django.utils.text import slugify\n\n\ndef camelcase_to_underscore(str):\n # http://djangosnippets.org/snippets/585/\n return re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\\\1', str).lower().strip('_')\n\n\ndef resolve_model_string(model_string, default_app=None):\n \"\"\"\n Resolve an 'app_label.model_name' string into an actual model class.\n If a model class is passed in, just return that.\n \"\"\"\n if isinstance(model_string, string_types):\n try:\n app_label, model_name = model_string.split(\".\")\n except ValueError:\n if default_app is not None:\n # If we can't split, assume a model in current app\n app_label = default_app\n model_name = model_string\n else:\n raise ValueError(\"Can not resolve {0!r} into a model. Model names \"\n \"should be in the form app_label.model_name\".format(\n model_string), model_string)\n\n return apps.get_model(app_label, model_name)\n\n elif isinstance(model_string, type) and issubclass(model_string, Model):\n return model_string\n\n else:\n raise LookupError(\"Can not resolve {0!r} into a model\".format(model_string), model_string)\n\n\nSCRIPT_RE = re.compile(r'<(-*)/script>')\ndef escape_script(text):\n \"\"\"\n Escape `</script>` tags in 'text' so that it can be placed within a `<script>` block without\n accidentally closing it. A '-' character will be inserted for each time it is escaped:\n `<-/script>`, `<--/script>` etc.\n \"\"\"\n return SCRIPT_RE.sub(r'<-\\1/script>', text)\n\n\nSLUGIFY_RE = re.compile(r'[^\\w\\s-]', re.UNICODE)\n\n\ndef cautious_slugify(value):\n \"\"\"\n Convert a string to ASCII exactly as Django's slugify does, with the exception\n that any non-ASCII alphanumeric characters (that cannot be ASCIIfied under Unicode\n normalisation) are escaped into codes like 'u0421' instead of being deleted entirely.\n\n This ensures that the result of slugifying e.g. Cyrillic text will not be an empty\n string, and can thus be safely used as an identifier (albeit not a human-readable one).\n \"\"\"\n value = force_text(value)\n\n # Normalize the string to decomposed unicode form. This causes accented Latin\n # characters to be split into 'base character' + 'accent modifier'; the latter will\n # be stripped out by the regexp, resulting in an ASCII-clean character that doesn't\n # need to be escaped\n value = unicodedata.normalize('NFKD', value)\n\n # Strip out characters that aren't letterlike, underscores or hyphens,\n # using the same regexp that slugify uses. This ensures that non-ASCII non-letters\n # (e.g. accent modifiers, fancy punctuation) get stripped rather than escaped\n value = SLUGIFY_RE.sub('', value)\n\n # Encode as ASCII, escaping non-ASCII characters with backslashreplace, then convert\n # back to a unicode string (which is what slugify expects)\n value = value.encode('ascii', 'backslashreplace').decode('ascii')\n\n # Pass to slugify to perform final conversion (whitespace stripping, applying\n # mark_safe); this will also strip out the backslashes from the 'backslashreplace'\n # conversion\n return slugify(value)\n", "path": "wagtail/wagtailcore/utils.py"}]} | 2,782 | 869 |
gh_patches_debug_50453 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-3837 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Maybe a bug about module checking
### Bug description
<!-- Use this section to clearly and concisely describe the bug. -->
If I use conda to install only jupyterhub and python (conda install -c conda-forge python=3.9 jupyterhub), the following message showed as someone try to login:
```
Failed to set groups [Errno 1] Operation not permitted
Traceback (most recent call last):
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser", line 7, in <module>
from jupyterhub.singleuser import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py", line 5, in <module>
from .app import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py", line 38, in <module>
raise _import_error
TypeError: exceptions must derive from BaseException
```
I think the problem is the lines from 32 to 36 in jupyterhub/singleuser/app.py
```
except ImportError as e:
continue
if _import_error is None:
_import_error = e
else:
break
```
I changed that with:
```
except ImportError as e:
if _import_error is None:
_import_error = e
else:
break
continue
```
then the better message showed:
```
Failed to set groups [Errno 1] Operation not permitted
Traceback (most recent call last):
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser", line 7, in <module>
from jupyterhub.singleuser import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py", line 5, in <module>
from .app import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py", line 38, in <module>
raise _import_error
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py", line 30, in <module>
App = import_item(JUPYTERHUB_SINGLEUSER_APP)
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/traitlets/utils/importstring.py", line 30, in import_item
module = __import__(package, fromlist=[obj])
ModuleNotFoundError: No module named 'jupyter_server'
```
The above message let me know that I have to install jupyter_server.
This issue can be closed anytime.
Any suggestion is welcome.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/singleuser/app.py`
Content:
```
1 """Make a single-user app based on the environment:
2
3 - $JUPYTERHUB_SINGLEUSER_APP, the base Application class, to be wrapped in JupyterHub authentication.
4 default: jupyter_server.serverapp.ServerApp
5
6 .. versionchanged:: 2.0
7
8 Default app changed to launch `jupyter labhub`.
9 Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.
10 """
11 import os
12
13 from traitlets import import_item
14
15 from .mixins import make_singleuser_app
16
17 JUPYTERHUB_SINGLEUSER_APP = os.environ.get("JUPYTERHUB_SINGLEUSER_APP")
18
19
20 if JUPYTERHUB_SINGLEUSER_APP:
21 App = import_item(JUPYTERHUB_SINGLEUSER_APP)
22 else:
23 App = None
24 _import_error = None
25 for JUPYTERHUB_SINGLEUSER_APP in (
26 "jupyter_server.serverapp.ServerApp",
27 "notebook.notebookapp.NotebookApp",
28 ):
29 try:
30 App = import_item(JUPYTERHUB_SINGLEUSER_APP)
31 except ImportError as e:
32 continue
33 if _import_error is None:
34 _import_error = e
35 else:
36 break
37 if App is None:
38 raise _import_error
39
40
41 SingleUserNotebookApp = make_singleuser_app(App)
42
43
44 def main():
45 """Launch a jupyterhub single-user server"""
46 if not os.environ.get("JUPYTERHUB_SINGLEUSER_APP"):
47 # app not specified, launch jupyter-labhub by default,
48 # if jupyterlab is recent enough (3.1).
49 # This is a minimally extended ServerApp that does:
50 # 1. ensure lab extension is enabled, and
51 # 2. set default URL to `/lab`
52 import re
53
54 _version_pat = re.compile(r"(\d+)\.(\d+)")
55 try:
56 import jupyterlab
57 from jupyterlab.labhubapp import SingleUserLabApp
58
59 m = _version_pat.match(jupyterlab.__version__)
60 except Exception:
61 m = None
62
63 if m is not None:
64 version_tuple = tuple(int(v) for v in m.groups())
65 if version_tuple >= (3, 1):
66 return SingleUserLabApp.launch_instance()
67
68 return SingleUserNotebookApp.launch_instance()
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jupyterhub/singleuser/app.py b/jupyterhub/singleuser/app.py
--- a/jupyterhub/singleuser/app.py
+++ b/jupyterhub/singleuser/app.py
@@ -29,9 +29,9 @@
try:
App = import_item(JUPYTERHUB_SINGLEUSER_APP)
except ImportError as e:
- continue
if _import_error is None:
_import_error = e
+ continue
else:
break
if App is None:
| {"golden_diff": "diff --git a/jupyterhub/singleuser/app.py b/jupyterhub/singleuser/app.py\n--- a/jupyterhub/singleuser/app.py\n+++ b/jupyterhub/singleuser/app.py\n@@ -29,9 +29,9 @@\n try:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\n except ImportError as e:\n- continue\n if _import_error is None:\n _import_error = e\n+ continue\n else:\n break\n if App is None:\n", "issue": "Maybe a bug about module checking\n### Bug description\r\n<!-- Use this section to clearly and concisely describe the bug. -->\r\nIf I use conda to install only jupyterhub and python (conda install -c conda-forge python=3.9 jupyterhub), the following message showed as someone try to login:\r\n\r\n```\r\nFailed to set groups [Errno 1] Operation not permitted\r\nTraceback (most recent call last):\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser\", line 7, in <module>\r\n from jupyterhub.singleuser import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py\", line 5, in <module>\r\n from .app import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py\", line 38, in <module>\r\n raise _import_error\r\nTypeError: exceptions must derive from BaseException\r\n```\r\nI think the problem is the lines from 32 to 36 in jupyterhub/singleuser/app.py\r\n```\r\n except ImportError as e:\r\n continue\r\n if _import_error is None:\r\n _import_error = e\r\n else:\r\n break\r\n```\r\n\r\nI changed that with:\r\n```\r\n except ImportError as e:\r\n if _import_error is None:\r\n _import_error = e\r\n else:\r\n break\r\n continue\r\n```\r\nthen the better message showed:\r\n```\r\nFailed to set groups [Errno 1] Operation not permitted\r\nTraceback (most recent call last):\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser\", line 7, in <module>\r\n from jupyterhub.singleuser import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py\", line 5, in <module>\r\n from .app import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py\", line 38, in <module>\r\n raise _import_error\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py\", line 30, in <module>\r\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/traitlets/utils/importstring.py\", line 30, in import_item\r\n module = __import__(package, fromlist=[obj])\r\nModuleNotFoundError: No module named 'jupyter_server'\r\n```\r\nThe above message let me know that I have to install jupyter_server.\r\nThis issue can be closed anytime.\r\nAny suggestion is welcome.\r\n\n", "before_files": [{"content": "\"\"\"Make a single-user app based on the environment:\n\n- $JUPYTERHUB_SINGLEUSER_APP, the base Application class, to be wrapped in JupyterHub authentication.\n default: jupyter_server.serverapp.ServerApp\n\n.. versionchanged:: 2.0\n\n Default app changed to launch `jupyter labhub`.\n Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.\n\"\"\"\nimport os\n\nfrom traitlets import import_item\n\nfrom .mixins import make_singleuser_app\n\nJUPYTERHUB_SINGLEUSER_APP = os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\")\n\n\nif JUPYTERHUB_SINGLEUSER_APP:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\nelse:\n App = None\n _import_error = None\n for JUPYTERHUB_SINGLEUSER_APP in (\n \"jupyter_server.serverapp.ServerApp\",\n \"notebook.notebookapp.NotebookApp\",\n ):\n try:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\n except ImportError as e:\n continue\n if _import_error is None:\n _import_error = e\n else:\n break\n if App is None:\n raise _import_error\n\n\nSingleUserNotebookApp = make_singleuser_app(App)\n\n\ndef main():\n \"\"\"Launch a jupyterhub single-user server\"\"\"\n if not os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\"):\n # app not specified, launch jupyter-labhub by default,\n # if jupyterlab is recent enough (3.1).\n # This is a minimally extended ServerApp that does:\n # 1. ensure lab extension is enabled, and\n # 2. set default URL to `/lab`\n import re\n\n _version_pat = re.compile(r\"(\\d+)\\.(\\d+)\")\n try:\n import jupyterlab\n from jupyterlab.labhubapp import SingleUserLabApp\n\n m = _version_pat.match(jupyterlab.__version__)\n except Exception:\n m = None\n\n if m is not None:\n version_tuple = tuple(int(v) for v in m.groups())\n if version_tuple >= (3, 1):\n return SingleUserLabApp.launch_instance()\n\n return SingleUserNotebookApp.launch_instance()\n", "path": "jupyterhub/singleuser/app.py"}], "after_files": [{"content": "\"\"\"Make a single-user app based on the environment:\n\n- $JUPYTERHUB_SINGLEUSER_APP, the base Application class, to be wrapped in JupyterHub authentication.\n default: jupyter_server.serverapp.ServerApp\n\n.. versionchanged:: 2.0\n\n Default app changed to launch `jupyter labhub`.\n Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.\n\"\"\"\nimport os\n\nfrom traitlets import import_item\n\nfrom .mixins import make_singleuser_app\n\nJUPYTERHUB_SINGLEUSER_APP = os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\")\n\n\nif JUPYTERHUB_SINGLEUSER_APP:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\nelse:\n App = None\n _import_error = None\n for JUPYTERHUB_SINGLEUSER_APP in (\n \"jupyter_server.serverapp.ServerApp\",\n \"notebook.notebookapp.NotebookApp\",\n ):\n try:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\n except ImportError as e:\n if _import_error is None:\n _import_error = e\n continue\n else:\n break\n if App is None:\n raise _import_error\n\n\nSingleUserNotebookApp = make_singleuser_app(App)\n\n\ndef main():\n \"\"\"Launch a jupyterhub single-user server\"\"\"\n if not os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\"):\n # app not specified, launch jupyter-labhub by default,\n # if jupyterlab is recent enough (3.1).\n # This is a minimally extended ServerApp that does:\n # 1. ensure lab extension is enabled, and\n # 2. set default URL to `/lab`\n import re\n\n _version_pat = re.compile(r\"(\\d+)\\.(\\d+)\")\n try:\n import jupyterlab\n from jupyterlab.labhubapp import SingleUserLabApp\n\n m = _version_pat.match(jupyterlab.__version__)\n except Exception:\n m = None\n\n if m is not None:\n version_tuple = tuple(int(v) for v in m.groups())\n if version_tuple >= (3, 1):\n return SingleUserLabApp.launch_instance()\n\n return SingleUserNotebookApp.launch_instance()\n", "path": "jupyterhub/singleuser/app.py"}]} | 1,619 | 111 |
gh_patches_debug_39273 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3126 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider jimmy-johns is broken
During the global build at 2021-09-29-14-42-48, spider **jimmy-johns** failed with **0 features** and **1544 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/logs/jimmy-johns.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/jimmy_johns.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4
5 from locations.items import GeojsonPointItem
6
7 STATES = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
8 "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
9 "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
10 "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
11 "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
12 HEADERS = { 'Content-Type': 'application/json' }
13 JJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'
14 CITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')
15 STORES = JJBASE.format('GetStoreAddressesByCityAndState')
16
17 class JimmyJohnsSpider(scrapy.Spider):
18 name = "jimmy-johns"
19 item_attributes = { 'brand': "Jimmy John's", 'brand_wikidata': "Q1689380" }
20 allowed_domains = ["www.jimmyjohns.com"]
21 download_delay = 0.2
22
23 def start_requests(self):
24 for state in STATES:
25 current_state = json.dumps({ 'state': state })
26 request = scrapy.Request(
27 CITIES,
28 method='POST',
29 body=current_state,
30 headers=HEADERS,
31 callback=self.parse_cities
32 )
33 request.meta['state'] = state
34 yield request
35
36 def parse_cities(self, response):
37 cities = json.loads(response.body)
38 for city in cities['d']:
39 current_city = json.dumps({ 'state': response.meta['state'], 'city': city })
40 request = scrapy.Request(
41 STORES,
42 method='POST',
43 body=current_city,
44 headers=HEADERS,
45 callback=self.parse
46 )
47 yield request
48
49 def parse(self, response):
50 stores = json.loads(response.body)
51 for store in stores['d']:
52 full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])
53 yield GeojsonPointItem(
54 name=store['storename'],
55 addr_full=full,
56 opening_hours=store['hours'],
57 phone=store['telephone'],
58 ref=store['storeid'],
59 lon=float(store['lng']),
60 lat=float(store['lat']),
61 )
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/jimmy_johns.py b/locations/spiders/jimmy_johns.py
--- a/locations/spiders/jimmy_johns.py
+++ b/locations/spiders/jimmy_johns.py
@@ -1,61 +1,36 @@
# -*- coding: utf-8 -*-
+from os import stat
import scrapy
+from urllib import parse
import json
-
from locations.items import GeojsonPointItem
-STATES = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
- "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
- "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
- "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
- "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
-HEADERS = { 'Content-Type': 'application/json' }
-JJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'
-CITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')
-STORES = JJBASE.format('GetStoreAddressesByCityAndState')
+class TemplateSpider(scrapy.Spider):
+ name = "jimmy_johns"
+ allowed_domains = ["locations.jimmyjohns.com"]
+ start_urls = (
+ 'https://locations.jimmyjohns.com/sitemap.xml',
+ )
-class JimmyJohnsSpider(scrapy.Spider):
- name = "jimmy-johns"
- item_attributes = { 'brand': "Jimmy John's", 'brand_wikidata': "Q1689380" }
- allowed_domains = ["www.jimmyjohns.com"]
- download_delay = 0.2
+ def parse(self, response):
+ stores = response.xpath('//url/loc[contains(text(),"sandwiches")]/text()').extract()
+ for store in stores:
+ yield scrapy.Request(response.urljoin(store), callback=self.parse_store)
- def start_requests(self):
- for state in STATES:
- current_state = json.dumps({ 'state': state })
- request = scrapy.Request(
- CITIES,
- method='POST',
- body=current_state,
- headers=HEADERS,
- callback=self.parse_cities
- )
- request.meta['state'] = state
- yield request
+ def parse_store(self, response):
+ data = json.loads(response.xpath('//script[@type="application/ld+json"]//text()').extract_first())
- def parse_cities(self, response):
- cities = json.loads(response.body)
- for city in cities['d']:
- current_city = json.dumps({ 'state': response.meta['state'], 'city': city })
- request = scrapy.Request(
- STORES,
- method='POST',
- body=current_city,
- headers=HEADERS,
- callback=self.parse
- )
- yield request
+ properties = {
+ 'ref': data[0]['url'],
+ 'addr_full': data[0]['address']['streetAddress'],
+ 'city': data[0]['address']['addressLocality'],
+ 'state': data[0]['address']['addressRegion'],
+ 'postcode': data[0]['address']['postalCode'],
+ 'website': response.url,
+ 'lat': data[0]['geo']['latitude'],
+ 'lon': data[0]['geo']['longitude'],
+ }
+ if data[0]['address']['telephone']:
+ properties['phone'] = data[0]['address']['telephone']
- def parse(self, response):
- stores = json.loads(response.body)
- for store in stores['d']:
- full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])
- yield GeojsonPointItem(
- name=store['storename'],
- addr_full=full,
- opening_hours=store['hours'],
- phone=store['telephone'],
- ref=store['storeid'],
- lon=float(store['lng']),
- lat=float(store['lat']),
- )
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/jimmy_johns.py b/locations/spiders/jimmy_johns.py\n--- a/locations/spiders/jimmy_johns.py\n+++ b/locations/spiders/jimmy_johns.py\n@@ -1,61 +1,36 @@\n # -*- coding: utf-8 -*-\n+from os import stat\n import scrapy\n+from urllib import parse\n import json\n-\n from locations.items import GeojsonPointItem\n \n-STATES = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\",\n- \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\",\n- \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n- \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\",\n- \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\n-HEADERS = { 'Content-Type': 'application/json' }\n-JJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'\n-CITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')\n-STORES = JJBASE.format('GetStoreAddressesByCityAndState')\n+class TemplateSpider(scrapy.Spider):\n+ name = \"jimmy_johns\"\n+ allowed_domains = [\"locations.jimmyjohns.com\"]\n+ start_urls = (\n+ 'https://locations.jimmyjohns.com/sitemap.xml',\n+ )\n \n-class JimmyJohnsSpider(scrapy.Spider):\n- name = \"jimmy-johns\"\n- item_attributes = { 'brand': \"Jimmy John's\", 'brand_wikidata': \"Q1689380\" }\n- allowed_domains = [\"www.jimmyjohns.com\"]\n- download_delay = 0.2\n+ def parse(self, response):\n+ stores = response.xpath('//url/loc[contains(text(),\"sandwiches\")]/text()').extract()\n+ for store in stores:\n+ yield scrapy.Request(response.urljoin(store), callback=self.parse_store)\n \n- def start_requests(self):\n- for state in STATES:\n- current_state = json.dumps({ 'state': state })\n- request = scrapy.Request(\n- CITIES,\n- method='POST',\n- body=current_state,\n- headers=HEADERS,\n- callback=self.parse_cities\n- )\n- request.meta['state'] = state\n- yield request\n+ def parse_store(self, response):\n+ data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]//text()').extract_first())\n \n- def parse_cities(self, response):\n- cities = json.loads(response.body)\n- for city in cities['d']:\n- current_city = json.dumps({ 'state': response.meta['state'], 'city': city })\n- request = scrapy.Request(\n- STORES,\n- method='POST',\n- body=current_city,\n- headers=HEADERS,\n- callback=self.parse\n- )\n- yield request\n+ properties = {\n+ 'ref': data[0]['url'],\n+ 'addr_full': data[0]['address']['streetAddress'],\n+ 'city': data[0]['address']['addressLocality'],\n+ 'state': data[0]['address']['addressRegion'],\n+ 'postcode': data[0]['address']['postalCode'],\n+ 'website': response.url,\n+ 'lat': data[0]['geo']['latitude'],\n+ 'lon': data[0]['geo']['longitude'],\n+ }\n+ if data[0]['address']['telephone']:\n+ properties['phone'] = data[0]['address']['telephone']\n \n- def parse(self, response):\n- stores = json.loads(response.body)\n- for store in stores['d']:\n- full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])\n- yield GeojsonPointItem(\n- name=store['storename'],\n- addr_full=full,\n- opening_hours=store['hours'],\n- phone=store['telephone'],\n- ref=store['storeid'],\n- lon=float(store['lng']),\n- lat=float(store['lat']),\n- )\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider jimmy-johns is broken\nDuring the global build at 2021-09-29-14-42-48, spider **jimmy-johns** failed with **0 features** and **1544 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/logs/jimmy-johns.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\n\nSTATES = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\",\n \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\",\n \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\",\n \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\nHEADERS = { 'Content-Type': 'application/json' }\nJJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'\nCITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')\nSTORES = JJBASE.format('GetStoreAddressesByCityAndState')\n\nclass JimmyJohnsSpider(scrapy.Spider):\n name = \"jimmy-johns\"\n item_attributes = { 'brand': \"Jimmy John's\", 'brand_wikidata': \"Q1689380\" }\n allowed_domains = [\"www.jimmyjohns.com\"]\n download_delay = 0.2\n\n def start_requests(self):\n for state in STATES:\n current_state = json.dumps({ 'state': state })\n request = scrapy.Request(\n CITIES,\n method='POST',\n body=current_state,\n headers=HEADERS,\n callback=self.parse_cities\n )\n request.meta['state'] = state\n yield request\n\n def parse_cities(self, response):\n cities = json.loads(response.body)\n for city in cities['d']:\n current_city = json.dumps({ 'state': response.meta['state'], 'city': city })\n request = scrapy.Request(\n STORES,\n method='POST',\n body=current_city,\n headers=HEADERS,\n callback=self.parse\n )\n yield request\n\n def parse(self, response):\n stores = json.loads(response.body)\n for store in stores['d']:\n full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])\n yield GeojsonPointItem(\n name=store['storename'],\n addr_full=full,\n opening_hours=store['hours'],\n phone=store['telephone'],\n ref=store['storeid'],\n lon=float(store['lng']),\n lat=float(store['lat']),\n )\n", "path": "locations/spiders/jimmy_johns.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom os import stat\nimport scrapy\nfrom urllib import parse\nimport json\nfrom locations.items import GeojsonPointItem\n\nclass TemplateSpider(scrapy.Spider):\n name = \"jimmy_johns\"\n allowed_domains = [\"locations.jimmyjohns.com\"]\n start_urls = (\n 'https://locations.jimmyjohns.com/sitemap.xml',\n )\n\n def parse(self, response):\n stores = response.xpath('//url/loc[contains(text(),\"sandwiches\")]/text()').extract()\n for store in stores:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_store)\n\n def parse_store(self, response):\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]//text()').extract_first())\n\n properties = {\n 'ref': data[0]['url'],\n 'addr_full': data[0]['address']['streetAddress'],\n 'city': data[0]['address']['addressLocality'],\n 'state': data[0]['address']['addressRegion'],\n 'postcode': data[0]['address']['postalCode'],\n 'website': response.url,\n 'lat': data[0]['geo']['latitude'],\n 'lon': data[0]['geo']['longitude'],\n }\n if data[0]['address']['telephone']:\n properties['phone'] = data[0]['address']['telephone']\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/jimmy_johns.py"}]} | 1,177 | 1,020 |
gh_patches_debug_28457 | rasdani/github-patches | git_diff | aws__aws-cli-329 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't shell out to rst2man.py
We've seen that shelling out to `rst2man.py` can be problematic.
In the "ideal" case (installing from a completely brand new python env or a new virtualenv) everything works.
The issue is some distro packages will rename the executable scripts provided by a package (sometimes `rst2man` or `rst2man-2.7.py` for example). `docutils` specifies this script as `rst2man.py` which is what you get if you use `pip` to install _everything_. The problem is that even if you `pip install awscli` if you install this into the system site-packages, pip will notice that `docutils` is already installed (for example via "sudo apt-get install python-docutils" or via the distro's package manager) and skip it.
If pip says docutils is installed then `import docutils` will work, but both the location of the `rst2man.py` script as well as what it's named can vary. This example may not even be on the PATH by default (homebrew puts things in /usr/local/share/python for example).
The code for `rst2man` is simple enough that we can directly use the code via `docutils` rather than piping to `rst2man`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/help.py`
Content:
```
1 # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import sys
14 import logging
15 import os
16 import platform
17 from subprocess import Popen, PIPE
18
19 from docutils.core import publish_string
20 import bcdoc
21 from bcdoc.clidocs import ReSTDocument
22 from bcdoc.clidocs import ProviderDocumentEventHandler
23 from bcdoc.clidocs import ServiceDocumentEventHandler
24 from bcdoc.clidocs import OperationDocumentEventHandler
25 import bcdoc.clidocevents
26 from bcdoc.textwriter import TextWriter
27
28 from awscli.argprocess import ParamShorthand
29
30
31 LOG = logging.getLogger('awscli.help')
32
33
34 class ExecutableNotFoundError(Exception):
35 def __init__(self, executable_name):
36 super(ExecutableNotFoundError, self).__init__(
37 'Could not find executable named "%s"' % executable_name)
38
39
40 def get_renderer():
41 """
42 Return the appropriate HelpRenderer implementation for the
43 current platform.
44 """
45 if platform.system() == 'Windows':
46 return WindowsHelpRenderer()
47 else:
48 return PosixHelpRenderer()
49
50
51 class HelpRenderer(object):
52 """
53 Interface for a help renderer.
54
55 The renderer is responsible for displaying the help content on
56 a particular platform.
57 """
58
59 def render(self, contents):
60 """
61 Each implementation of HelpRenderer must implement this
62 render method.
63 """
64 pass
65
66
67 class PosixHelpRenderer(HelpRenderer):
68 """
69 Render help content on a Posix-like system. This includes
70 Linux and MacOS X.
71 """
72
73 PAGER = 'less'
74
75 def get_pager_cmdline(self):
76 pager = self.PAGER
77 if 'MANPAGER' in os.environ:
78 pager = os.environ['MANPAGER']
79 elif 'PAGER' in os.environ:
80 pager = os.environ['PAGER']
81 return pager.split()
82
83 def render(self, contents):
84 rst2man = self._get_rst2man_name()
85 cmdline = [rst2man]
86 LOG.debug("Running command: %s", cmdline)
87 p2 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)
88 p2.stdin.write(contents)
89 p2.stdin.close()
90 if not self._exists_on_path('groff'):
91 raise ExecutableNotFoundError('groff')
92 cmdline = ['groff', '-man', '-T', 'ascii']
93 LOG.debug("Running command: %s", cmdline)
94 p3 = self._popen(cmdline, stdin=p2.stdout, stdout=PIPE)
95 cmdline = self.get_pager_cmdline()
96 LOG.debug("Running command: %s", cmdline)
97 p4 = self._popen(cmdline, stdin=p3.stdout)
98 p4.communicate()
99 sys.exit(1)
100
101 def _get_rst2man_name(self):
102 if self._exists_on_path('rst2man.py'):
103 return 'rst2man.py'
104 elif self._exists_on_path('rst2man'):
105 # Some distros like ubuntu will rename rst2man.py to rst2man
106 # if you install their version (i.e. "apt-get install
107 # python-docutils"). Though they could technically rename
108 # this to anything we'll support it renamed to 'rst2man' by
109 # explicitly checking for this case ourself.
110 return 'rst2man'
111 else:
112 # Give them the original name as set from docutils.
113 raise ExecutableNotFoundError('rst2man.py')
114
115 def _exists_on_path(self, name):
116 # Since we're only dealing with POSIX systems, we can
117 # ignore things like PATHEXT.
118 return any([os.path.exists(os.path.join(p, name))
119 for p in os.environ.get('PATH', []).split(os.pathsep)])
120
121 def _popen(self, *args, **kwargs):
122 return Popen(*args, **kwargs)
123
124
125 class WindowsHelpRenderer(HelpRenderer):
126 """
127 Render help content on a Windows platform.
128 """
129
130 def render(self, contents):
131 text_output = publish_string(contents,
132 writer=TextWriter())
133 sys.stdout.write(text_output.decode('utf-8'))
134 sys.exit(1)
135
136
137 class RawRenderer(HelpRenderer):
138 """
139 Render help as the raw ReST document.
140 """
141
142 def render(self, contents):
143 sys.stdout.write(contents)
144 sys.exit(1)
145
146
147 class HelpCommand(object):
148 """
149 HelpCommand Interface
150 ---------------------
151 A HelpCommand object acts as the interface between objects in the
152 CLI (e.g. Providers, Services, Operations, etc.) and the documentation
153 system (bcdoc).
154
155 A HelpCommand object wraps the object from the CLI space and provides
156 a consistent interface to critical information needed by the
157 documentation pipeline such as the object's name, description, etc.
158
159 The HelpCommand object is passed to the component of the
160 documentation pipeline that fires documentation events. It is
161 then passed on to each document event handler that has registered
162 for the events.
163
164 All HelpCommand objects contain the following attributes:
165
166 + ``session`` - A ``botocore`` ``Session`` object.
167 + ``obj`` - The object that is being documented.
168 + ``command_table`` - A dict mapping command names to
169 callable objects.
170 + ``arg_table`` - A dict mapping argument names to callable objects.
171 + ``doc`` - A ``Document`` object that is used to collect the
172 generated documentation.
173
174 In addition, please note the `properties` defined below which are
175 required to allow the object to be used in the document pipeline.
176
177 Implementations of HelpCommand are provided here for Provider,
178 Service and Operation objects. Other implementations for other
179 types of objects might be needed for customization in plugins.
180 As long as the implementations conform to this basic interface
181 it should be possible to pass them to the documentation system
182 and generate interactive and static help files.
183 """
184
185 EventHandlerClass = None
186 """
187 Each subclass should define this class variable to point to the
188 EventHandler class used by this HelpCommand.
189 """
190
191 def __init__(self, session, obj, command_table, arg_table):
192 self.session = session
193 self.obj = obj
194 self.command_table = command_table
195 self.arg_table = arg_table
196 self.renderer = get_renderer()
197 self.doc = ReSTDocument(target='man')
198
199 @property
200 def event_class(self):
201 """
202 Return the ``event_class`` for this object.
203
204 The ``event_class`` is used by the documentation pipeline
205 when generating documentation events. For the event below::
206
207 doc-title.<event_class>.<name>
208
209 The document pipeline would use this property to determine
210 the ``event_class`` value.
211 """
212 pass
213
214 @property
215 def name(self):
216 """
217 Return the name of the wrapped object.
218
219 This would be called by the document pipeline to determine
220 the ``name`` to be inserted into the event, as shown above.
221 """
222 pass
223
224 def __call__(self, args, parsed_globals):
225 # Create an event handler for a Provider Document
226 instance = self.EventHandlerClass(self)
227 # Now generate all of the events for a Provider document.
228 # We pass ourselves along so that we can, in turn, get passed
229 # to all event handlers.
230 bcdoc.clidocevents.generate_events(self.session, self)
231 self.renderer.render(self.doc.getvalue())
232 instance.unregister()
233
234
235 class ProviderHelpCommand(HelpCommand):
236 """Implements top level help command.
237
238 This is what is called when ``aws help`` is run.
239
240 """
241 EventHandlerClass = ProviderDocumentEventHandler
242
243 def __init__(self, session, command_table, arg_table,
244 description, synopsis, usage):
245 HelpCommand.__init__(self, session, session.provider,
246 command_table, arg_table)
247 self.description = description
248 self.synopsis = synopsis
249 self.help_usage = usage
250
251 @property
252 def event_class(self):
253 return 'Provider'
254
255 @property
256 def name(self):
257 return self.obj.name
258
259
260 class ServiceHelpCommand(HelpCommand):
261 """Implements service level help.
262
263 This is the object invoked whenever a service command
264 help is implemented, e.g. ``aws ec2 help``.
265
266 """
267
268 EventHandlerClass = ServiceDocumentEventHandler
269
270 def __init__(self, session, obj, command_table, arg_table, name,
271 event_class):
272 super(ServiceHelpCommand, self).__init__(session, obj, command_table,
273 arg_table)
274 self._name = name
275 self._event_class = event_class
276
277 @property
278 def event_class(self):
279 return self._event_class
280
281 @property
282 def name(self):
283 return self._name
284
285
286 class OperationHelpCommand(HelpCommand):
287 """Implements operation level help.
288
289 This is the object invoked whenever help for a service is requested,
290 e.g. ``aws ec2 describe-instances help``.
291
292 """
293 EventHandlerClass = OperationDocumentEventHandler
294
295 def __init__(self, session, service, operation, arg_table, name,
296 event_class):
297 HelpCommand.__init__(self, session, operation, None, arg_table)
298 self.service = service
299 self.param_shorthand = ParamShorthand()
300 self._name = name
301 self._event_class = event_class
302
303 @property
304 def event_class(self):
305 return self._event_class
306
307 @property
308 def name(self):
309 return self._name
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/help.py b/awscli/help.py
--- a/awscli/help.py
+++ b/awscli/help.py
@@ -17,6 +17,7 @@
from subprocess import Popen, PIPE
from docutils.core import publish_string
+from docutils.writers import manpage
import bcdoc
from bcdoc.clidocs import ReSTDocument
from bcdoc.clidocs import ProviderDocumentEventHandler
@@ -81,21 +82,17 @@
return pager.split()
def render(self, contents):
- rst2man = self._get_rst2man_name()
- cmdline = [rst2man]
- LOG.debug("Running command: %s", cmdline)
- p2 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)
- p2.stdin.write(contents)
- p2.stdin.close()
+ man_contents = publish_string(contents, writer=manpage.Writer())
if not self._exists_on_path('groff'):
raise ExecutableNotFoundError('groff')
cmdline = ['groff', '-man', '-T', 'ascii']
LOG.debug("Running command: %s", cmdline)
- p3 = self._popen(cmdline, stdin=p2.stdout, stdout=PIPE)
+ p3 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)
+ groff_output = p3.communicate(input=man_contents)[0]
cmdline = self.get_pager_cmdline()
LOG.debug("Running command: %s", cmdline)
- p4 = self._popen(cmdline, stdin=p3.stdout)
- p4.communicate()
+ p4 = self._popen(cmdline, stdin=PIPE)
+ p4.communicate(input=groff_output)
sys.exit(1)
def _get_rst2man_name(self):
| {"golden_diff": "diff --git a/awscli/help.py b/awscli/help.py\n--- a/awscli/help.py\n+++ b/awscli/help.py\n@@ -17,6 +17,7 @@\n from subprocess import Popen, PIPE\n \n from docutils.core import publish_string\n+from docutils.writers import manpage\n import bcdoc\n from bcdoc.clidocs import ReSTDocument\n from bcdoc.clidocs import ProviderDocumentEventHandler\n@@ -81,21 +82,17 @@\n return pager.split()\n \n def render(self, contents):\n- rst2man = self._get_rst2man_name()\n- cmdline = [rst2man]\n- LOG.debug(\"Running command: %s\", cmdline)\n- p2 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)\n- p2.stdin.write(contents)\n- p2.stdin.close()\n+ man_contents = publish_string(contents, writer=manpage.Writer())\n if not self._exists_on_path('groff'):\n raise ExecutableNotFoundError('groff')\n cmdline = ['groff', '-man', '-T', 'ascii']\n LOG.debug(\"Running command: %s\", cmdline)\n- p3 = self._popen(cmdline, stdin=p2.stdout, stdout=PIPE)\n+ p3 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)\n+ groff_output = p3.communicate(input=man_contents)[0]\n cmdline = self.get_pager_cmdline()\n LOG.debug(\"Running command: %s\", cmdline)\n- p4 = self._popen(cmdline, stdin=p3.stdout)\n- p4.communicate()\n+ p4 = self._popen(cmdline, stdin=PIPE)\n+ p4.communicate(input=groff_output)\n sys.exit(1)\n \n def _get_rst2man_name(self):\n", "issue": "Don't shell out to rst2man.py\nWe've seen that shelling out to `rst2man.py` can be problematic.\n\nIn the \"ideal\" case (installing from a completely brand new python env or a new virtualenv) everything works.\n\nThe issue is some distro packages will rename the executable scripts provided by a package (sometimes `rst2man` or `rst2man-2.7.py` for example). `docutils` specifies this script as `rst2man.py` which is what you get if you use `pip` to install _everything_. The problem is that even if you `pip install awscli` if you install this into the system site-packages, pip will notice that `docutils` is already installed (for example via \"sudo apt-get install python-docutils\" or via the distro's package manager) and skip it.\n\nIf pip says docutils is installed then `import docutils` will work, but both the location of the `rst2man.py` script as well as what it's named can vary. This example may not even be on the PATH by default (homebrew puts things in /usr/local/share/python for example).\n\nThe code for `rst2man` is simple enough that we can directly use the code via `docutils` rather than piping to `rst2man`.\n\n", "before_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\nimport logging\nimport os\nimport platform\nfrom subprocess import Popen, PIPE\n\nfrom docutils.core import publish_string\nimport bcdoc\nfrom bcdoc.clidocs import ReSTDocument\nfrom bcdoc.clidocs import ProviderDocumentEventHandler\nfrom bcdoc.clidocs import ServiceDocumentEventHandler\nfrom bcdoc.clidocs import OperationDocumentEventHandler\nimport bcdoc.clidocevents\nfrom bcdoc.textwriter import TextWriter\n\nfrom awscli.argprocess import ParamShorthand\n\n\nLOG = logging.getLogger('awscli.help')\n\n\nclass ExecutableNotFoundError(Exception):\n def __init__(self, executable_name):\n super(ExecutableNotFoundError, self).__init__(\n 'Could not find executable named \"%s\"' % executable_name)\n\n\ndef get_renderer():\n \"\"\"\n Return the appropriate HelpRenderer implementation for the\n current platform.\n \"\"\"\n if platform.system() == 'Windows':\n return WindowsHelpRenderer()\n else:\n return PosixHelpRenderer()\n\n\nclass HelpRenderer(object):\n \"\"\"\n Interface for a help renderer.\n\n The renderer is responsible for displaying the help content on\n a particular platform.\n \"\"\"\n\n def render(self, contents):\n \"\"\"\n Each implementation of HelpRenderer must implement this\n render method.\n \"\"\"\n pass\n\n\nclass PosixHelpRenderer(HelpRenderer):\n \"\"\"\n Render help content on a Posix-like system. This includes\n Linux and MacOS X.\n \"\"\"\n\n PAGER = 'less'\n\n def get_pager_cmdline(self):\n pager = self.PAGER\n if 'MANPAGER' in os.environ:\n pager = os.environ['MANPAGER']\n elif 'PAGER' in os.environ:\n pager = os.environ['PAGER']\n return pager.split()\n\n def render(self, contents):\n rst2man = self._get_rst2man_name()\n cmdline = [rst2man]\n LOG.debug(\"Running command: %s\", cmdline)\n p2 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)\n p2.stdin.write(contents)\n p2.stdin.close()\n if not self._exists_on_path('groff'):\n raise ExecutableNotFoundError('groff')\n cmdline = ['groff', '-man', '-T', 'ascii']\n LOG.debug(\"Running command: %s\", cmdline)\n p3 = self._popen(cmdline, stdin=p2.stdout, stdout=PIPE)\n cmdline = self.get_pager_cmdline()\n LOG.debug(\"Running command: %s\", cmdline)\n p4 = self._popen(cmdline, stdin=p3.stdout)\n p4.communicate()\n sys.exit(1)\n\n def _get_rst2man_name(self):\n if self._exists_on_path('rst2man.py'):\n return 'rst2man.py'\n elif self._exists_on_path('rst2man'):\n # Some distros like ubuntu will rename rst2man.py to rst2man\n # if you install their version (i.e. \"apt-get install\n # python-docutils\"). Though they could technically rename\n # this to anything we'll support it renamed to 'rst2man' by\n # explicitly checking for this case ourself.\n return 'rst2man'\n else:\n # Give them the original name as set from docutils.\n raise ExecutableNotFoundError('rst2man.py')\n\n def _exists_on_path(self, name):\n # Since we're only dealing with POSIX systems, we can\n # ignore things like PATHEXT.\n return any([os.path.exists(os.path.join(p, name))\n for p in os.environ.get('PATH', []).split(os.pathsep)])\n\n def _popen(self, *args, **kwargs):\n return Popen(*args, **kwargs)\n\n\nclass WindowsHelpRenderer(HelpRenderer):\n \"\"\"\n Render help content on a Windows platform.\n \"\"\"\n\n def render(self, contents):\n text_output = publish_string(contents,\n writer=TextWriter())\n sys.stdout.write(text_output.decode('utf-8'))\n sys.exit(1)\n\n\nclass RawRenderer(HelpRenderer):\n \"\"\"\n Render help as the raw ReST document.\n \"\"\"\n\n def render(self, contents):\n sys.stdout.write(contents)\n sys.exit(1)\n\n\nclass HelpCommand(object):\n \"\"\"\n HelpCommand Interface\n ---------------------\n A HelpCommand object acts as the interface between objects in the\n CLI (e.g. Providers, Services, Operations, etc.) and the documentation\n system (bcdoc).\n\n A HelpCommand object wraps the object from the CLI space and provides\n a consistent interface to critical information needed by the\n documentation pipeline such as the object's name, description, etc.\n\n The HelpCommand object is passed to the component of the\n documentation pipeline that fires documentation events. It is\n then passed on to each document event handler that has registered\n for the events.\n\n All HelpCommand objects contain the following attributes:\n\n + ``session`` - A ``botocore`` ``Session`` object.\n + ``obj`` - The object that is being documented.\n + ``command_table`` - A dict mapping command names to\n callable objects.\n + ``arg_table`` - A dict mapping argument names to callable objects.\n + ``doc`` - A ``Document`` object that is used to collect the\n generated documentation.\n\n In addition, please note the `properties` defined below which are\n required to allow the object to be used in the document pipeline.\n\n Implementations of HelpCommand are provided here for Provider,\n Service and Operation objects. Other implementations for other\n types of objects might be needed for customization in plugins.\n As long as the implementations conform to this basic interface\n it should be possible to pass them to the documentation system\n and generate interactive and static help files.\n \"\"\"\n\n EventHandlerClass = None\n \"\"\"\n Each subclass should define this class variable to point to the\n EventHandler class used by this HelpCommand.\n \"\"\"\n\n def __init__(self, session, obj, command_table, arg_table):\n self.session = session\n self.obj = obj\n self.command_table = command_table\n self.arg_table = arg_table\n self.renderer = get_renderer()\n self.doc = ReSTDocument(target='man')\n\n @property\n def event_class(self):\n \"\"\"\n Return the ``event_class`` for this object.\n\n The ``event_class`` is used by the documentation pipeline\n when generating documentation events. For the event below::\n\n doc-title.<event_class>.<name>\n\n The document pipeline would use this property to determine\n the ``event_class`` value.\n \"\"\"\n pass\n\n @property\n def name(self):\n \"\"\"\n Return the name of the wrapped object.\n\n This would be called by the document pipeline to determine\n the ``name`` to be inserted into the event, as shown above.\n \"\"\"\n pass\n\n def __call__(self, args, parsed_globals):\n # Create an event handler for a Provider Document\n instance = self.EventHandlerClass(self)\n # Now generate all of the events for a Provider document.\n # We pass ourselves along so that we can, in turn, get passed\n # to all event handlers.\n bcdoc.clidocevents.generate_events(self.session, self)\n self.renderer.render(self.doc.getvalue())\n instance.unregister()\n\n\nclass ProviderHelpCommand(HelpCommand):\n \"\"\"Implements top level help command.\n\n This is what is called when ``aws help`` is run.\n\n \"\"\"\n EventHandlerClass = ProviderDocumentEventHandler\n\n def __init__(self, session, command_table, arg_table,\n description, synopsis, usage):\n HelpCommand.__init__(self, session, session.provider,\n command_table, arg_table)\n self.description = description\n self.synopsis = synopsis\n self.help_usage = usage\n\n @property\n def event_class(self):\n return 'Provider'\n\n @property\n def name(self):\n return self.obj.name\n\n\nclass ServiceHelpCommand(HelpCommand):\n \"\"\"Implements service level help.\n\n This is the object invoked whenever a service command\n help is implemented, e.g. ``aws ec2 help``.\n\n \"\"\"\n\n EventHandlerClass = ServiceDocumentEventHandler\n\n def __init__(self, session, obj, command_table, arg_table, name,\n event_class):\n super(ServiceHelpCommand, self).__init__(session, obj, command_table,\n arg_table)\n self._name = name\n self._event_class = event_class\n\n @property\n def event_class(self):\n return self._event_class\n\n @property\n def name(self):\n return self._name\n\n\nclass OperationHelpCommand(HelpCommand):\n \"\"\"Implements operation level help.\n\n This is the object invoked whenever help for a service is requested,\n e.g. ``aws ec2 describe-instances help``.\n\n \"\"\"\n EventHandlerClass = OperationDocumentEventHandler\n\n def __init__(self, session, service, operation, arg_table, name,\n event_class):\n HelpCommand.__init__(self, session, operation, None, arg_table)\n self.service = service\n self.param_shorthand = ParamShorthand()\n self._name = name\n self._event_class = event_class\n\n @property\n def event_class(self):\n return self._event_class\n\n @property\n def name(self):\n return self._name\n", "path": "awscli/help.py"}], "after_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\nimport logging\nimport os\nimport platform\nfrom subprocess import Popen, PIPE\n\nfrom docutils.core import publish_string\nfrom docutils.writers import manpage\nimport bcdoc\nfrom bcdoc.clidocs import ReSTDocument\nfrom bcdoc.clidocs import ProviderDocumentEventHandler\nfrom bcdoc.clidocs import ServiceDocumentEventHandler\nfrom bcdoc.clidocs import OperationDocumentEventHandler\nimport bcdoc.clidocevents\nfrom bcdoc.textwriter import TextWriter\n\nfrom awscli.argprocess import ParamShorthand\n\n\nLOG = logging.getLogger('awscli.help')\n\n\nclass ExecutableNotFoundError(Exception):\n def __init__(self, executable_name):\n super(ExecutableNotFoundError, self).__init__(\n 'Could not find executable named \"%s\"' % executable_name)\n\n\ndef get_renderer():\n \"\"\"\n Return the appropriate HelpRenderer implementation for the\n current platform.\n \"\"\"\n if platform.system() == 'Windows':\n return WindowsHelpRenderer()\n else:\n return PosixHelpRenderer()\n\n\nclass HelpRenderer(object):\n \"\"\"\n Interface for a help renderer.\n\n The renderer is responsible for displaying the help content on\n a particular platform.\n \"\"\"\n\n def render(self, contents):\n \"\"\"\n Each implementation of HelpRenderer must implement this\n render method.\n \"\"\"\n pass\n\n\nclass PosixHelpRenderer(HelpRenderer):\n \"\"\"\n Render help content on a Posix-like system. This includes\n Linux and MacOS X.\n \"\"\"\n\n PAGER = 'less'\n\n def get_pager_cmdline(self):\n pager = self.PAGER\n if 'MANPAGER' in os.environ:\n pager = os.environ['MANPAGER']\n elif 'PAGER' in os.environ:\n pager = os.environ['PAGER']\n return pager.split()\n\n def render(self, contents):\n man_contents = publish_string(contents, writer=manpage.Writer())\n if not self._exists_on_path('groff'):\n raise ExecutableNotFoundError('groff')\n cmdline = ['groff', '-man', '-T', 'ascii']\n LOG.debug(\"Running command: %s\", cmdline)\n p3 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)\n groff_output = p3.communicate(input=man_contents)[0]\n cmdline = self.get_pager_cmdline()\n LOG.debug(\"Running command: %s\", cmdline)\n p4 = self._popen(cmdline, stdin=PIPE)\n p4.communicate(input=groff_output)\n sys.exit(1)\n\n def _get_rst2man_name(self):\n if self._exists_on_path('rst2man.py'):\n return 'rst2man.py'\n elif self._exists_on_path('rst2man'):\n # Some distros like ubuntu will rename rst2man.py to rst2man\n # if you install their version (i.e. \"apt-get install\n # python-docutils\"). Though they could technically rename\n # this to anything we'll support it renamed to 'rst2man' by\n # explicitly checking for this case ourself.\n return 'rst2man'\n else:\n # Give them the original name as set from docutils.\n raise ExecutableNotFoundError('rst2man.py')\n\n def _exists_on_path(self, name):\n # Since we're only dealing with POSIX systems, we can\n # ignore things like PATHEXT.\n return any([os.path.exists(os.path.join(p, name))\n for p in os.environ.get('PATH', []).split(os.pathsep)])\n\n def _popen(self, *args, **kwargs):\n return Popen(*args, **kwargs)\n\n\nclass WindowsHelpRenderer(HelpRenderer):\n \"\"\"\n Render help content on a Windows platform.\n \"\"\"\n\n def render(self, contents):\n text_output = publish_string(contents,\n writer=TextWriter())\n sys.stdout.write(text_output.decode('utf-8'))\n sys.exit(1)\n\n\nclass RawRenderer(HelpRenderer):\n \"\"\"\n Render help as the raw ReST document.\n \"\"\"\n\n def render(self, contents):\n sys.stdout.write(contents)\n sys.exit(1)\n\n\nclass HelpCommand(object):\n \"\"\"\n HelpCommand Interface\n ---------------------\n A HelpCommand object acts as the interface between objects in the\n CLI (e.g. Providers, Services, Operations, etc.) and the documentation\n system (bcdoc).\n\n A HelpCommand object wraps the object from the CLI space and provides\n a consistent interface to critical information needed by the\n documentation pipeline such as the object's name, description, etc.\n\n The HelpCommand object is passed to the component of the\n documentation pipeline that fires documentation events. It is\n then passed on to each document event handler that has registered\n for the events.\n\n All HelpCommand objects contain the following attributes:\n\n + ``session`` - A ``botocore`` ``Session`` object.\n + ``obj`` - The object that is being documented.\n + ``command_table`` - A dict mapping command names to\n callable objects.\n + ``arg_table`` - A dict mapping argument names to callable objects.\n + ``doc`` - A ``Document`` object that is used to collect the\n generated documentation.\n\n In addition, please note the `properties` defined below which are\n required to allow the object to be used in the document pipeline.\n\n Implementations of HelpCommand are provided here for Provider,\n Service and Operation objects. Other implementations for other\n types of objects might be needed for customization in plugins.\n As long as the implementations conform to this basic interface\n it should be possible to pass them to the documentation system\n and generate interactive and static help files.\n \"\"\"\n\n EventHandlerClass = None\n \"\"\"\n Each subclass should define this class variable to point to the\n EventHandler class used by this HelpCommand.\n \"\"\"\n\n def __init__(self, session, obj, command_table, arg_table):\n self.session = session\n self.obj = obj\n self.command_table = command_table\n self.arg_table = arg_table\n self.renderer = get_renderer()\n self.doc = ReSTDocument(target='man')\n\n @property\n def event_class(self):\n \"\"\"\n Return the ``event_class`` for this object.\n\n The ``event_class`` is used by the documentation pipeline\n when generating documentation events. For the event below::\n\n doc-title.<event_class>.<name>\n\n The document pipeline would use this property to determine\n the ``event_class`` value.\n \"\"\"\n pass\n\n @property\n def name(self):\n \"\"\"\n Return the name of the wrapped object.\n\n This would be called by the document pipeline to determine\n the ``name`` to be inserted into the event, as shown above.\n \"\"\"\n pass\n\n def __call__(self, args, parsed_globals):\n # Create an event handler for a Provider Document\n instance = self.EventHandlerClass(self)\n # Now generate all of the events for a Provider document.\n # We pass ourselves along so that we can, in turn, get passed\n # to all event handlers.\n bcdoc.clidocevents.generate_events(self.session, self)\n self.renderer.render(self.doc.getvalue())\n instance.unregister()\n\n\nclass ProviderHelpCommand(HelpCommand):\n \"\"\"Implements top level help command.\n\n This is what is called when ``aws help`` is run.\n\n \"\"\"\n EventHandlerClass = ProviderDocumentEventHandler\n\n def __init__(self, session, command_table, arg_table,\n description, synopsis, usage):\n HelpCommand.__init__(self, session, session.provider,\n command_table, arg_table)\n self.description = description\n self.synopsis = synopsis\n self.help_usage = usage\n\n @property\n def event_class(self):\n return 'Provider'\n\n @property\n def name(self):\n return self.obj.name\n\n\nclass ServiceHelpCommand(HelpCommand):\n \"\"\"Implements service level help.\n\n This is the object invoked whenever a service command\n help is implemented, e.g. ``aws ec2 help``.\n\n \"\"\"\n\n EventHandlerClass = ServiceDocumentEventHandler\n\n def __init__(self, session, obj, command_table, arg_table, name,\n event_class):\n super(ServiceHelpCommand, self).__init__(session, obj, command_table,\n arg_table)\n self._name = name\n self._event_class = event_class\n\n @property\n def event_class(self):\n return self._event_class\n\n @property\n def name(self):\n return self._name\n\n\nclass OperationHelpCommand(HelpCommand):\n \"\"\"Implements operation level help.\n\n This is the object invoked whenever help for a service is requested,\n e.g. ``aws ec2 describe-instances help``.\n\n \"\"\"\n EventHandlerClass = OperationDocumentEventHandler\n\n def __init__(self, session, service, operation, arg_table, name,\n event_class):\n HelpCommand.__init__(self, session, operation, None, arg_table)\n self.service = service\n self.param_shorthand = ParamShorthand()\n self._name = name\n self._event_class = event_class\n\n @property\n def event_class(self):\n return self._event_class\n\n @property\n def name(self):\n return self._name\n", "path": "awscli/help.py"}]} | 3,557 | 408 |
gh_patches_debug_28170 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1019 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Run algorithm" page is missing the breadcrumb bar
Maybe this is on purpose, but I find it confusing that the page that allows users to upload images and run an algorithm does not show the breadcrumb bar to get back to the algorithm overview page.
Example: https://grand-challenge.org/algorithms/vertebra-segmentation/run/
To be consistent with the job list the title of that page could also read "Run this algorithm" instead "Run an algorithm"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/algorithms/views.py`
Content:
```
1 import logging
2
3 from dal import autocomplete
4 from django.conf import settings
5 from django.contrib.auth import get_user_model
6 from django.contrib.auth.mixins import (
7 PermissionRequiredMixin,
8 UserPassesTestMixin,
9 )
10 from django.contrib.messages.views import SuccessMessageMixin
11 from django.http import Http404
12 from django.views.generic import (
13 CreateView,
14 DetailView,
15 FormView,
16 ListView,
17 UpdateView,
18 )
19 from guardian.mixins import (
20 LoginRequiredMixin,
21 PermissionListMixin,
22 PermissionRequiredMixin as ObjectPermissionRequiredMixin,
23 )
24 from rest_framework.permissions import DjangoObjectPermissions
25 from rest_framework.viewsets import ReadOnlyModelViewSet
26 from rest_framework_guardian.filters import ObjectPermissionsFilter
27
28 from grandchallenge.algorithms.forms import (
29 AlgorithmForm,
30 AlgorithmImageForm,
31 AlgorithmImageUpdateForm,
32 EditorsForm,
33 UsersForm,
34 )
35 from grandchallenge.algorithms.models import (
36 Algorithm,
37 AlgorithmImage,
38 Job,
39 Result,
40 )
41 from grandchallenge.algorithms.serializers import (
42 AlgorithmImageSerializer,
43 AlgorithmSerializer,
44 JobSerializer,
45 ResultSerializer,
46 )
47 from grandchallenge.cases.forms import UploadRawImagesForm
48 from grandchallenge.cases.models import RawImageUploadSession
49 from grandchallenge.subdomains.utils import reverse
50
51 logger = logging.getLogger(__name__)
52
53
54 class AlgorithmCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
55 model = Algorithm
56 form_class = AlgorithmForm
57 permission_required = (
58 f"{Algorithm._meta.app_label}.add_{Algorithm._meta.model_name}"
59 )
60
61 def form_valid(self, form):
62 response = super().form_valid(form=form)
63 self.object.add_editor(self.request.user)
64 return response
65
66 def get_form_kwargs(self):
67 kwargs = super().get_form_kwargs()
68 kwargs.update({"user": self.request.user})
69 return kwargs
70
71
72 class AlgorithmList(PermissionListMixin, ListView):
73 model = Algorithm
74 permission_required = {
75 f"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}"
76 }
77
78 def get_queryset(self, *args, **kwargs):
79 # Add algorithms that are publicly visible
80 qs = super().get_queryset(*args, **kwargs)
81 qs |= Algorithm.objects.filter(visible_to_public=True)
82
83 return qs
84
85
86 class AlgorithmDetail(
87 LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
88 ):
89 model = Algorithm
90 permission_required = (
91 f"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}"
92 )
93 raise_exception = True
94
95
96 class AlgorithmUpdate(
97 LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView
98 ):
99 model = Algorithm
100 form_class = AlgorithmForm
101 permission_required = (
102 f"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}"
103 )
104 raise_exception = True
105
106 def get_form_kwargs(self):
107 kwargs = super().get_form_kwargs()
108 kwargs.update({"user": self.request.user})
109 return kwargs
110
111
112 class AlgorithmUserAutocomplete(
113 LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView
114 ):
115 def test_func(self):
116 group_pks = (
117 Algorithm.objects.all()
118 .select_related("editors_group")
119 .values_list("editors_group__pk", flat=True)
120 )
121 return (
122 self.request.user.is_superuser
123 or self.request.user.groups.filter(pk__in=group_pks).exists()
124 )
125
126 def get_queryset(self):
127 qs = (
128 get_user_model()
129 .objects.all()
130 .order_by("username")
131 .exclude(username=settings.ANONYMOUS_USER_NAME)
132 )
133
134 if self.q:
135 qs = qs.filter(username__istartswith=self.q)
136
137 return qs
138
139
140 class AlgorithmUserGroupUpdateMixin(
141 LoginRequiredMixin,
142 ObjectPermissionRequiredMixin,
143 SuccessMessageMixin,
144 FormView,
145 ):
146 template_name = "algorithms/algorithm_user_groups_form.html"
147 permission_required = (
148 f"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}"
149 )
150 raise_exception = True
151
152 def get_permission_object(self):
153 return self.algorithm
154
155 @property
156 def algorithm(self):
157 return Algorithm.objects.get(slug=self.kwargs["slug"])
158
159 def get_context_data(self, **kwargs):
160 context = super().get_context_data(**kwargs)
161 context.update(
162 {"object": self.algorithm, "role": self.get_form().role}
163 )
164 return context
165
166 def get_success_url(self):
167 return self.algorithm.get_absolute_url()
168
169 def form_valid(self, form):
170 form.add_or_remove_user(algorithm=self.algorithm)
171 return super().form_valid(form)
172
173
174 class EditorsUpdate(AlgorithmUserGroupUpdateMixin):
175 form_class = EditorsForm
176 success_message = "Editors successfully updated"
177
178
179 class UsersUpdate(AlgorithmUserGroupUpdateMixin):
180 form_class = UsersForm
181 success_message = "Users successfully updated"
182
183
184 class AlgorithmImageCreate(
185 LoginRequiredMixin, ObjectPermissionRequiredMixin, CreateView
186 ):
187 model = AlgorithmImage
188 form_class = AlgorithmImageForm
189 permission_required = (
190 f"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}"
191 )
192 raise_exception = True
193
194 def get_form_kwargs(self):
195 kwargs = super().get_form_kwargs()
196 kwargs.update({"user": self.request.user})
197 return kwargs
198
199 @property
200 def algorithm(self):
201 return Algorithm.objects.get(slug=self.kwargs["slug"])
202
203 def get_permission_object(self):
204 return self.algorithm
205
206 def form_valid(self, form):
207 form.instance.creator = self.request.user
208 form.instance.algorithm = self.algorithm
209
210 uploaded_file = form.cleaned_data["chunked_upload"][0]
211 form.instance.staged_image_uuid = uploaded_file.uuid
212
213 return super().form_valid(form)
214
215
216 class AlgorithmImageDetail(
217 LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
218 ):
219 model = AlgorithmImage
220 permission_required = f"{AlgorithmImage._meta.app_label}.view_{AlgorithmImage._meta.model_name}"
221 raise_exception = True
222
223
224 class AlgorithmImageUpdate(
225 LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView
226 ):
227 model = AlgorithmImage
228 form_class = AlgorithmImageUpdateForm
229 permission_required = f"{AlgorithmImage._meta.app_label}.change_{AlgorithmImage._meta.model_name}"
230 raise_exception = True
231
232
233 class AlgorithmExecutionSessionCreate(
234 LoginRequiredMixin,
235 ObjectPermissionRequiredMixin,
236 SuccessMessageMixin,
237 CreateView,
238 ):
239 model = RawImageUploadSession
240 form_class = UploadRawImagesForm
241 template_name = "algorithms/algorithm_execution_session_create.html"
242 success_message = (
243 "Your images have been uploaded, "
244 "please check back here to see the processing status."
245 )
246 permission_required = (
247 f"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}"
248 )
249 raise_exception = True
250
251 @property
252 def algorithm(self) -> Algorithm:
253 return Algorithm.objects.get(slug=self.kwargs["slug"])
254
255 def get_permission_object(self):
256 return self.algorithm
257
258 def get_initial(self):
259 if self.algorithm.latest_ready_image is None:
260 raise Http404()
261 return super().get_initial()
262
263 def get_form_kwargs(self):
264 kwargs = super().get_form_kwargs()
265 kwargs.update({"user": self.request.user})
266 return kwargs
267
268 def form_valid(self, form):
269 form.instance.creator = self.request.user
270 form.instance.algorithm_image = self.algorithm.latest_ready_image
271 return super().form_valid(form)
272
273 def get_success_url(self):
274 return reverse(
275 "algorithms:jobs-list", kwargs={"slug": self.kwargs["slug"]}
276 )
277
278
279 class AlgorithmJobsList(LoginRequiredMixin, PermissionListMixin, ListView):
280 model = Job
281 permission_required = f"{Job._meta.app_label}.view_{Job._meta.model_name}"
282
283 @property
284 def algorithm(self) -> Algorithm:
285 return Algorithm.objects.get(slug=self.kwargs["slug"])
286
287 def get_context_data(self, *args, **kwargs):
288 context = super().get_context_data(*args, **kwargs)
289 context.update({"algorithm": self.algorithm})
290 return context
291
292 def get_queryset(self, *args, **kwargs):
293 """Filter the jobs for this algorithm."""
294 qs = super().get_queryset(*args, **kwargs)
295 return qs.filter(algorithm_image__algorithm=self.algorithm)
296
297
298 class AlgorithmViewSet(ReadOnlyModelViewSet):
299 queryset = Algorithm.objects.all()
300 serializer_class = AlgorithmSerializer
301 permission_classes = [DjangoObjectPermissions]
302 filter_backends = [ObjectPermissionsFilter]
303
304
305 class AlgorithmImageViewSet(ReadOnlyModelViewSet):
306 queryset = AlgorithmImage.objects.all()
307 serializer_class = AlgorithmImageSerializer
308 permission_classes = [DjangoObjectPermissions]
309 filter_backends = [ObjectPermissionsFilter]
310
311
312 class JobViewSet(ReadOnlyModelViewSet):
313 queryset = Job.objects.all()
314 serializer_class = JobSerializer
315 permission_classes = [DjangoObjectPermissions]
316 filter_backends = [ObjectPermissionsFilter]
317
318
319 class ResultViewSet(ReadOnlyModelViewSet):
320 queryset = Result.objects.all()
321 serializer_class = ResultSerializer
322 permission_classes = [DjangoObjectPermissions]
323 filter_backends = [ObjectPermissionsFilter]
324
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/algorithms/views.py b/app/grandchallenge/algorithms/views.py
--- a/app/grandchallenge/algorithms/views.py
+++ b/app/grandchallenge/algorithms/views.py
@@ -212,6 +212,11 @@
return super().form_valid(form)
+ def get_context_data(self, *args, **kwargs):
+ context = super().get_context_data(*args, **kwargs)
+ context.update({"algorithm": self.algorithm})
+ return context
+
class AlgorithmImageDetail(
LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
@@ -229,6 +234,11 @@
permission_required = f"{AlgorithmImage._meta.app_label}.change_{AlgorithmImage._meta.model_name}"
raise_exception = True
+ def get_context_data(self, *args, **kwargs):
+ context = super().get_context_data(*args, **kwargs)
+ context.update({"algorithm": self.object.algorithm})
+ return context
+
class AlgorithmExecutionSessionCreate(
LoginRequiredMixin,
@@ -270,6 +280,11 @@
form.instance.algorithm_image = self.algorithm.latest_ready_image
return super().form_valid(form)
+ def get_context_data(self, *args, **kwargs):
+ context = super().get_context_data(*args, **kwargs)
+ context.update({"algorithm": self.algorithm})
+ return context
+
def get_success_url(self):
return reverse(
"algorithms:jobs-list", kwargs={"slug": self.kwargs["slug"]}
| {"golden_diff": "diff --git a/app/grandchallenge/algorithms/views.py b/app/grandchallenge/algorithms/views.py\n--- a/app/grandchallenge/algorithms/views.py\n+++ b/app/grandchallenge/algorithms/views.py\n@@ -212,6 +212,11 @@\n \n return super().form_valid(form)\n \n+ def get_context_data(self, *args, **kwargs):\n+ context = super().get_context_data(*args, **kwargs)\n+ context.update({\"algorithm\": self.algorithm})\n+ return context\n+\n \n class AlgorithmImageDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n@@ -229,6 +234,11 @@\n permission_required = f\"{AlgorithmImage._meta.app_label}.change_{AlgorithmImage._meta.model_name}\"\n raise_exception = True\n \n+ def get_context_data(self, *args, **kwargs):\n+ context = super().get_context_data(*args, **kwargs)\n+ context.update({\"algorithm\": self.object.algorithm})\n+ return context\n+\n \n class AlgorithmExecutionSessionCreate(\n LoginRequiredMixin,\n@@ -270,6 +280,11 @@\n form.instance.algorithm_image = self.algorithm.latest_ready_image\n return super().form_valid(form)\n \n+ def get_context_data(self, *args, **kwargs):\n+ context = super().get_context_data(*args, **kwargs)\n+ context.update({\"algorithm\": self.algorithm})\n+ return context\n+\n def get_success_url(self):\n return reverse(\n \"algorithms:jobs-list\", kwargs={\"slug\": self.kwargs[\"slug\"]}\n", "issue": "\"Run algorithm\" page is missing the breadcrumb bar\nMaybe this is on purpose, but I find it confusing that the page that allows users to upload images and run an algorithm does not show the breadcrumb bar to get back to the algorithm overview page.\r\n\r\nExample: https://grand-challenge.org/algorithms/vertebra-segmentation/run/\r\n\r\nTo be consistent with the job list the title of that page could also read \"Run this algorithm\" instead \"Run an algorithm\"\n", "before_files": [{"content": "import logging\n\nfrom dal import autocomplete\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import (\n PermissionRequiredMixin,\n UserPassesTestMixin,\n)\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import Http404\nfrom django.views.generic import (\n CreateView,\n DetailView,\n FormView,\n ListView,\n UpdateView,\n)\nfrom guardian.mixins import (\n LoginRequiredMixin,\n PermissionListMixin,\n PermissionRequiredMixin as ObjectPermissionRequiredMixin,\n)\nfrom rest_framework.permissions import DjangoObjectPermissions\nfrom rest_framework.viewsets import ReadOnlyModelViewSet\nfrom rest_framework_guardian.filters import ObjectPermissionsFilter\n\nfrom grandchallenge.algorithms.forms import (\n AlgorithmForm,\n AlgorithmImageForm,\n AlgorithmImageUpdateForm,\n EditorsForm,\n UsersForm,\n)\nfrom grandchallenge.algorithms.models import (\n Algorithm,\n AlgorithmImage,\n Job,\n Result,\n)\nfrom grandchallenge.algorithms.serializers import (\n AlgorithmImageSerializer,\n AlgorithmSerializer,\n JobSerializer,\n ResultSerializer,\n)\nfrom grandchallenge.cases.forms import UploadRawImagesForm\nfrom grandchallenge.cases.models import RawImageUploadSession\nfrom grandchallenge.subdomains.utils import reverse\n\nlogger = logging.getLogger(__name__)\n\n\nclass AlgorithmCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):\n model = Algorithm\n form_class = AlgorithmForm\n permission_required = (\n f\"{Algorithm._meta.app_label}.add_{Algorithm._meta.model_name}\"\n )\n\n def form_valid(self, form):\n response = super().form_valid(form=form)\n self.object.add_editor(self.request.user)\n return response\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n\nclass AlgorithmList(PermissionListMixin, ListView):\n model = Algorithm\n permission_required = {\n f\"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}\"\n }\n\n def get_queryset(self, *args, **kwargs):\n # Add algorithms that are publicly visible\n qs = super().get_queryset(*args, **kwargs)\n qs |= Algorithm.objects.filter(visible_to_public=True)\n\n return qs\n\n\nclass AlgorithmDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n):\n model = Algorithm\n permission_required = (\n f\"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n\nclass AlgorithmUpdate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView\n):\n model = Algorithm\n form_class = AlgorithmForm\n permission_required = (\n f\"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n\nclass AlgorithmUserAutocomplete(\n LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView\n):\n def test_func(self):\n group_pks = (\n Algorithm.objects.all()\n .select_related(\"editors_group\")\n .values_list(\"editors_group__pk\", flat=True)\n )\n return (\n self.request.user.is_superuser\n or self.request.user.groups.filter(pk__in=group_pks).exists()\n )\n\n def get_queryset(self):\n qs = (\n get_user_model()\n .objects.all()\n .order_by(\"username\")\n .exclude(username=settings.ANONYMOUS_USER_NAME)\n )\n\n if self.q:\n qs = qs.filter(username__istartswith=self.q)\n\n return qs\n\n\nclass AlgorithmUserGroupUpdateMixin(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n SuccessMessageMixin,\n FormView,\n):\n template_name = \"algorithms/algorithm_user_groups_form.html\"\n permission_required = (\n f\"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n def get_permission_object(self):\n return self.algorithm\n\n @property\n def algorithm(self):\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\"object\": self.algorithm, \"role\": self.get_form().role}\n )\n return context\n\n def get_success_url(self):\n return self.algorithm.get_absolute_url()\n\n def form_valid(self, form):\n form.add_or_remove_user(algorithm=self.algorithm)\n return super().form_valid(form)\n\n\nclass EditorsUpdate(AlgorithmUserGroupUpdateMixin):\n form_class = EditorsForm\n success_message = \"Editors successfully updated\"\n\n\nclass UsersUpdate(AlgorithmUserGroupUpdateMixin):\n form_class = UsersForm\n success_message = \"Users successfully updated\"\n\n\nclass AlgorithmImageCreate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, CreateView\n):\n model = AlgorithmImage\n form_class = AlgorithmImageForm\n permission_required = (\n f\"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n @property\n def algorithm(self):\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_permission_object(self):\n return self.algorithm\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.algorithm = self.algorithm\n\n uploaded_file = form.cleaned_data[\"chunked_upload\"][0]\n form.instance.staged_image_uuid = uploaded_file.uuid\n\n return super().form_valid(form)\n\n\nclass AlgorithmImageDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n):\n model = AlgorithmImage\n permission_required = f\"{AlgorithmImage._meta.app_label}.view_{AlgorithmImage._meta.model_name}\"\n raise_exception = True\n\n\nclass AlgorithmImageUpdate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView\n):\n model = AlgorithmImage\n form_class = AlgorithmImageUpdateForm\n permission_required = f\"{AlgorithmImage._meta.app_label}.change_{AlgorithmImage._meta.model_name}\"\n raise_exception = True\n\n\nclass AlgorithmExecutionSessionCreate(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n SuccessMessageMixin,\n CreateView,\n):\n model = RawImageUploadSession\n form_class = UploadRawImagesForm\n template_name = \"algorithms/algorithm_execution_session_create.html\"\n success_message = (\n \"Your images have been uploaded, \"\n \"please check back here to see the processing status.\"\n )\n permission_required = (\n f\"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n @property\n def algorithm(self) -> Algorithm:\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_permission_object(self):\n return self.algorithm\n\n def get_initial(self):\n if self.algorithm.latest_ready_image is None:\n raise Http404()\n return super().get_initial()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.algorithm_image = self.algorithm.latest_ready_image\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse(\n \"algorithms:jobs-list\", kwargs={\"slug\": self.kwargs[\"slug\"]}\n )\n\n\nclass AlgorithmJobsList(LoginRequiredMixin, PermissionListMixin, ListView):\n model = Job\n permission_required = f\"{Job._meta.app_label}.view_{Job._meta.model_name}\"\n\n @property\n def algorithm(self) -> Algorithm:\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context.update({\"algorithm\": self.algorithm})\n return context\n\n def get_queryset(self, *args, **kwargs):\n \"\"\"Filter the jobs for this algorithm.\"\"\"\n qs = super().get_queryset(*args, **kwargs)\n return qs.filter(algorithm_image__algorithm=self.algorithm)\n\n\nclass AlgorithmViewSet(ReadOnlyModelViewSet):\n queryset = Algorithm.objects.all()\n serializer_class = AlgorithmSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n\nclass AlgorithmImageViewSet(ReadOnlyModelViewSet):\n queryset = AlgorithmImage.objects.all()\n serializer_class = AlgorithmImageSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n\nclass JobViewSet(ReadOnlyModelViewSet):\n queryset = Job.objects.all()\n serializer_class = JobSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n\nclass ResultViewSet(ReadOnlyModelViewSet):\n queryset = Result.objects.all()\n serializer_class = ResultSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n", "path": "app/grandchallenge/algorithms/views.py"}], "after_files": [{"content": "import logging\n\nfrom dal import autocomplete\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import (\n PermissionRequiredMixin,\n UserPassesTestMixin,\n)\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import Http404\nfrom django.views.generic import (\n CreateView,\n DetailView,\n FormView,\n ListView,\n UpdateView,\n)\nfrom guardian.mixins import (\n LoginRequiredMixin,\n PermissionListMixin,\n PermissionRequiredMixin as ObjectPermissionRequiredMixin,\n)\nfrom rest_framework.permissions import DjangoObjectPermissions\nfrom rest_framework.viewsets import ReadOnlyModelViewSet\nfrom rest_framework_guardian.filters import ObjectPermissionsFilter\n\nfrom grandchallenge.algorithms.forms import (\n AlgorithmForm,\n AlgorithmImageForm,\n AlgorithmImageUpdateForm,\n EditorsForm,\n UsersForm,\n)\nfrom grandchallenge.algorithms.models import (\n Algorithm,\n AlgorithmImage,\n Job,\n Result,\n)\nfrom grandchallenge.algorithms.serializers import (\n AlgorithmImageSerializer,\n AlgorithmSerializer,\n JobSerializer,\n ResultSerializer,\n)\nfrom grandchallenge.cases.forms import UploadRawImagesForm\nfrom grandchallenge.cases.models import RawImageUploadSession\nfrom grandchallenge.subdomains.utils import reverse\n\nlogger = logging.getLogger(__name__)\n\n\nclass AlgorithmCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):\n model = Algorithm\n form_class = AlgorithmForm\n permission_required = (\n f\"{Algorithm._meta.app_label}.add_{Algorithm._meta.model_name}\"\n )\n\n def form_valid(self, form):\n response = super().form_valid(form=form)\n self.object.add_editor(self.request.user)\n return response\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n\nclass AlgorithmList(PermissionListMixin, ListView):\n model = Algorithm\n permission_required = {\n f\"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}\"\n }\n\n def get_queryset(self, *args, **kwargs):\n # Add algorithms that are publicly visible\n qs = super().get_queryset(*args, **kwargs)\n qs |= Algorithm.objects.filter(visible_to_public=True)\n\n return qs\n\n\nclass AlgorithmDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n):\n model = Algorithm\n permission_required = (\n f\"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n\nclass AlgorithmUpdate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView\n):\n model = Algorithm\n form_class = AlgorithmForm\n permission_required = (\n f\"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n\nclass AlgorithmUserAutocomplete(\n LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView\n):\n def test_func(self):\n group_pks = (\n Algorithm.objects.all()\n .select_related(\"editors_group\")\n .values_list(\"editors_group__pk\", flat=True)\n )\n return (\n self.request.user.is_superuser\n or self.request.user.groups.filter(pk__in=group_pks).exists()\n )\n\n def get_queryset(self):\n qs = (\n get_user_model()\n .objects.all()\n .order_by(\"username\")\n .exclude(username=settings.ANONYMOUS_USER_NAME)\n )\n\n if self.q:\n qs = qs.filter(username__istartswith=self.q)\n\n return qs\n\n\nclass AlgorithmUserGroupUpdateMixin(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n SuccessMessageMixin,\n FormView,\n):\n template_name = \"algorithms/algorithm_user_groups_form.html\"\n permission_required = (\n f\"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n def get_permission_object(self):\n return self.algorithm\n\n @property\n def algorithm(self):\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\"object\": self.algorithm, \"role\": self.get_form().role}\n )\n return context\n\n def get_success_url(self):\n return self.algorithm.get_absolute_url()\n\n def form_valid(self, form):\n form.add_or_remove_user(algorithm=self.algorithm)\n return super().form_valid(form)\n\n\nclass EditorsUpdate(AlgorithmUserGroupUpdateMixin):\n form_class = EditorsForm\n success_message = \"Editors successfully updated\"\n\n\nclass UsersUpdate(AlgorithmUserGroupUpdateMixin):\n form_class = UsersForm\n success_message = \"Users successfully updated\"\n\n\nclass AlgorithmImageCreate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, CreateView\n):\n model = AlgorithmImage\n form_class = AlgorithmImageForm\n permission_required = (\n f\"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n @property\n def algorithm(self):\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_permission_object(self):\n return self.algorithm\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.algorithm = self.algorithm\n\n uploaded_file = form.cleaned_data[\"chunked_upload\"][0]\n form.instance.staged_image_uuid = uploaded_file.uuid\n\n return super().form_valid(form)\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context.update({\"algorithm\": self.algorithm})\n return context\n\n\nclass AlgorithmImageDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n):\n model = AlgorithmImage\n permission_required = f\"{AlgorithmImage._meta.app_label}.view_{AlgorithmImage._meta.model_name}\"\n raise_exception = True\n\n\nclass AlgorithmImageUpdate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView\n):\n model = AlgorithmImage\n form_class = AlgorithmImageUpdateForm\n permission_required = f\"{AlgorithmImage._meta.app_label}.change_{AlgorithmImage._meta.model_name}\"\n raise_exception = True\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context.update({\"algorithm\": self.object.algorithm})\n return context\n\n\nclass AlgorithmExecutionSessionCreate(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n SuccessMessageMixin,\n CreateView,\n):\n model = RawImageUploadSession\n form_class = UploadRawImagesForm\n template_name = \"algorithms/algorithm_execution_session_create.html\"\n success_message = (\n \"Your images have been uploaded, \"\n \"please check back here to see the processing status.\"\n )\n permission_required = (\n f\"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n @property\n def algorithm(self) -> Algorithm:\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_permission_object(self):\n return self.algorithm\n\n def get_initial(self):\n if self.algorithm.latest_ready_image is None:\n raise Http404()\n return super().get_initial()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.algorithm_image = self.algorithm.latest_ready_image\n return super().form_valid(form)\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context.update({\"algorithm\": self.algorithm})\n return context\n\n def get_success_url(self):\n return reverse(\n \"algorithms:jobs-list\", kwargs={\"slug\": self.kwargs[\"slug\"]}\n )\n\n\nclass AlgorithmJobsList(LoginRequiredMixin, PermissionListMixin, ListView):\n model = Job\n permission_required = f\"{Job._meta.app_label}.view_{Job._meta.model_name}\"\n\n @property\n def algorithm(self) -> Algorithm:\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context.update({\"algorithm\": self.algorithm})\n return context\n\n def get_queryset(self, *args, **kwargs):\n \"\"\"Filter the jobs for this algorithm.\"\"\"\n qs = super().get_queryset(*args, **kwargs)\n return qs.filter(algorithm_image__algorithm=self.algorithm)\n\n\nclass AlgorithmViewSet(ReadOnlyModelViewSet):\n queryset = Algorithm.objects.all()\n serializer_class = AlgorithmSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n\nclass AlgorithmImageViewSet(ReadOnlyModelViewSet):\n queryset = AlgorithmImage.objects.all()\n serializer_class = AlgorithmImageSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n\nclass JobViewSet(ReadOnlyModelViewSet):\n queryset = Job.objects.all()\n serializer_class = JobSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n\nclass ResultViewSet(ReadOnlyModelViewSet):\n queryset = Result.objects.all()\n serializer_class = ResultSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n", "path": "app/grandchallenge/algorithms/views.py"}]} | 3,173 | 340 |
gh_patches_debug_4071 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-173 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug during saving probabilities
**Describe the bug**
Small bug is occurring during saving probabilities in classification tasks. This is due to file existence check. It should check if the file exists instead of checking if the directory exists.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `GANDLF/inference_manager.py`
Content:
```
1 from GANDLF.inference_loop import inference_loop
2 import os
3 import numpy as np
4 import torch
5 import torch.nn.functional as F
6
7
8 def InferenceManager(dataframe, outputDir, parameters, device):
9 """
10 This function takes in a dataframe, with some other parameters and performs the inference
11 """
12 # get the indeces for kfold splitting
13 inferenceData_full = dataframe
14
15 # # initialize parameters for inference
16 if not ("weights" in parameters):
17 parameters["weights"] = None # no need for loss weights for inference
18 if not ("class_weights" in parameters):
19 parameters["class_weights"] = None # no need for class weights for inference
20
21 n_folds = parameters["nested_training"]["validation"]
22
23 fold_dirs = []
24 if n_folds > 1:
25 directories = sorted(os.listdir(outputDir))
26 for d in directories:
27 if d.isdigit():
28 fold_dirs.append(os.path.join(outputDir, d, ""))
29 else:
30 fold_dirs = [outputDir]
31
32 probs_list = []
33
34 is_classification = parameters["problem_type"] == "classification"
35
36 for fold_dir in fold_dirs:
37 parameters["current_fold_dir"] = fold_dir
38 inference_loop(
39 inferenceDataFromPickle=inferenceData_full,
40 outputDir=fold_dir,
41 device=device,
42 parameters=parameters,
43 )
44
45 logits_dir = os.path.join(fold_dir, "logits.csv")
46 is_logits_dir_exist = os.path.isdir(logits_dir)
47
48 if is_classification and is_logits_dir_exist:
49 fold_logits = np.genfromtxt(logits_dir, delimiter=",")
50 fold_logits = torch.from_numpy(fold_logits)
51 fold_probs = F.softmax(fold_logits, dim=1)
52 probs_list.append(fold_probs)
53
54 if probs_list and is_classification:
55 probs_list = torch.stack(probs_list)
56 averaged_probs = torch.mean(probs_list, 0).numpy()
57 np.savetxt(
58 os.path.join(outputDir, "averaged_probabilities.csv"),
59 averaged_probs,
60 delimiter=",",
61 )
62
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/GANDLF/inference_manager.py b/GANDLF/inference_manager.py
--- a/GANDLF/inference_manager.py
+++ b/GANDLF/inference_manager.py
@@ -43,7 +43,7 @@
)
logits_dir = os.path.join(fold_dir, "logits.csv")
- is_logits_dir_exist = os.path.isdir(logits_dir)
+ is_logits_dir_exist = os.path.isfile(logits_dir)
if is_classification and is_logits_dir_exist:
fold_logits = np.genfromtxt(logits_dir, delimiter=",")
| {"golden_diff": "diff --git a/GANDLF/inference_manager.py b/GANDLF/inference_manager.py\n--- a/GANDLF/inference_manager.py\n+++ b/GANDLF/inference_manager.py\n@@ -43,7 +43,7 @@\n )\n \n logits_dir = os.path.join(fold_dir, \"logits.csv\")\n- is_logits_dir_exist = os.path.isdir(logits_dir)\n+ is_logits_dir_exist = os.path.isfile(logits_dir)\n \n if is_classification and is_logits_dir_exist:\n fold_logits = np.genfromtxt(logits_dir, delimiter=\",\")\n", "issue": "Bug during saving probabilities\n**Describe the bug**\r\nSmall bug is occurring during saving probabilities in classification tasks. This is due to file existence check. It should check if the file exists instead of checking if the directory exists.\r\n\n", "before_files": [{"content": "from GANDLF.inference_loop import inference_loop\nimport os\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\ndef InferenceManager(dataframe, outputDir, parameters, device):\n \"\"\"\n This function takes in a dataframe, with some other parameters and performs the inference\n \"\"\"\n # get the indeces for kfold splitting\n inferenceData_full = dataframe\n\n # # initialize parameters for inference\n if not (\"weights\" in parameters):\n parameters[\"weights\"] = None # no need for loss weights for inference\n if not (\"class_weights\" in parameters):\n parameters[\"class_weights\"] = None # no need for class weights for inference\n\n n_folds = parameters[\"nested_training\"][\"validation\"]\n\n fold_dirs = []\n if n_folds > 1:\n directories = sorted(os.listdir(outputDir))\n for d in directories:\n if d.isdigit():\n fold_dirs.append(os.path.join(outputDir, d, \"\"))\n else:\n fold_dirs = [outputDir]\n\n probs_list = []\n\n is_classification = parameters[\"problem_type\"] == \"classification\"\n\n for fold_dir in fold_dirs:\n parameters[\"current_fold_dir\"] = fold_dir\n inference_loop(\n inferenceDataFromPickle=inferenceData_full,\n outputDir=fold_dir,\n device=device,\n parameters=parameters,\n )\n\n logits_dir = os.path.join(fold_dir, \"logits.csv\")\n is_logits_dir_exist = os.path.isdir(logits_dir)\n\n if is_classification and is_logits_dir_exist:\n fold_logits = np.genfromtxt(logits_dir, delimiter=\",\")\n fold_logits = torch.from_numpy(fold_logits)\n fold_probs = F.softmax(fold_logits, dim=1)\n probs_list.append(fold_probs)\n\n if probs_list and is_classification:\n probs_list = torch.stack(probs_list)\n averaged_probs = torch.mean(probs_list, 0).numpy()\n np.savetxt(\n os.path.join(outputDir, \"averaged_probabilities.csv\"),\n averaged_probs,\n delimiter=\",\",\n )\n\n", "path": "GANDLF/inference_manager.py"}], "after_files": [{"content": "from GANDLF.inference_loop import inference_loop\nimport os\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\ndef InferenceManager(dataframe, outputDir, parameters, device):\n \"\"\"\n This function takes in a dataframe, with some other parameters and performs the inference\n \"\"\"\n # get the indeces for kfold splitting\n inferenceData_full = dataframe\n\n # # initialize parameters for inference\n if not (\"weights\" in parameters):\n parameters[\"weights\"] = None # no need for loss weights for inference\n if not (\"class_weights\" in parameters):\n parameters[\"class_weights\"] = None # no need for class weights for inference\n\n n_folds = parameters[\"nested_training\"][\"validation\"]\n\n fold_dirs = []\n if n_folds > 1:\n directories = sorted(os.listdir(outputDir))\n for d in directories:\n if d.isdigit():\n fold_dirs.append(os.path.join(outputDir, d, \"\"))\n else:\n fold_dirs = [outputDir]\n\n probs_list = []\n\n is_classification = parameters[\"problem_type\"] == \"classification\"\n\n for fold_dir in fold_dirs:\n parameters[\"current_fold_dir\"] = fold_dir\n inference_loop(\n inferenceDataFromPickle=inferenceData_full,\n outputDir=fold_dir,\n device=device,\n parameters=parameters,\n )\n\n logits_dir = os.path.join(fold_dir, \"logits.csv\")\n is_logits_dir_exist = os.path.isfile(logits_dir)\n\n if is_classification and is_logits_dir_exist:\n fold_logits = np.genfromtxt(logits_dir, delimiter=\",\")\n fold_logits = torch.from_numpy(fold_logits)\n fold_probs = F.softmax(fold_logits, dim=1)\n probs_list.append(fold_probs)\n\n if probs_list and is_classification:\n probs_list = torch.stack(probs_list)\n averaged_probs = torch.mean(probs_list, 0).numpy()\n np.savetxt(\n os.path.join(outputDir, \"averaged_probabilities.csv\"),\n averaged_probs,\n delimiter=\",\",\n )\n\n", "path": "GANDLF/inference_manager.py"}]} | 866 | 122 |
gh_patches_debug_3037 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-4360 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows: Cannot bundle with debug if pkg_resources is a dependency
This issue happens when I try to bundle my project, in the Analysis.assemble phase and only when I try to do it with debug enabled. PyInstaller tries to compile a module that is part of an executable (pyinstaller.exe in this case) which fails because it cannot read the module.
This is with Windows 10, Python 3.6.6 (official from python.org) and PyInstaller 3.5.dev0+51429f8fc (which should be the latest develop version as of today).
Here is the traceback:
```
Traceback (most recent call last):
File "c:\python36-32\Lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\python36-32\Lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\RMYROY~1\VIRTUA~1\CDDA-G~3\Scripts\pyinstaller.exe\__main__.py", line 9, in <module>
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\__main__.py", line 111, in run
run_build(pyi_config, spec_file, **vars(args))
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\__main__.py", line 63, in run_build
PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 846, in main
build(specfile, kw.get('distpath'), kw.get('workpath'), kw.get('clean_build'))
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 793, in build
exec(code, spec_namespace)
File "launcher.spec", line 17, in <module>
noarchive=True)
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 243, in __init__
self.__postinit__()
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\datastruct.py", line 158, in __postinit__
self.assemble()
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 599, in assemble
for name, path, typecode in compile_py_files(new_toc, CONF['workpath']):
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\utils\misc.py", line 150, in compile_py_files
with open(obj_fnm, 'rb') as fh:
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\RMYROY~1\\VIRTUA~1\\CDDA-G~3\\Scripts\\pyinstaller.exe\\__main__.pyo'
```
For some reason, the following entry is added in Analysis.pure
```python
('__main__.pyc', 'C:\\Users\\RMYROY~1\\VIRTUA~1\\CDDA-G~3\\Scripts\\pyinstaller.exe\\__main__.py', 'PYMODULE')
```
**That entry is incorrect in that it shouldn't have been added in pure or it shouldn't be compiled in assemble which is the source of this issue.**
Here is my spec file:
```python
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['cddagl\\launcher.py'],
pathex=['C:\\Program Files (x86)\\Windows Kits\\10\\Redist\\ucrt\\DLLs\\x86\\', 'C:\\Users\\Rémy Roy\\Projects\\CDDA-Game-Launcher'],
binaries=[],
datas=[('alembic', 'alembic'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('cddagl/VERSION', 'cddagl'), ('C:\\Users\\Rémy Roy\\VirtualEnvs\\CDDA-Game-Launcher\\Scripts\\UnRAR.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ja/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ja/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],
hiddenimports=['lxml.cssselect', 'babel.numbers'],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=True)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[('v', None, 'OPTION')],
exclude_binaries=True,
name='launcher',
debug=True,
bootloader_ignore_signals=False,
strip=False,
upx=False,
console=True , icon='cddagl\\resources\\launcher.ico')
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=False,
upx_exclude=[],
name='launcher')
```
You can probably reproduce this issue easily by cloning [my project](https://github.com/remyroy/CDDA-Game-Launcher) and issuing the following command:
```
python setup.py freeze --debug=1
```
Here is the full pyinstaller log output: https://gist.github.com/remyroy/37f7f0a912d5d714a947cddfb78769d4
I'll investigate how that entry is added in Analysis to give more context to this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-pkg_resources.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2019, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 from PyInstaller.utils.hooks import collect_submodules
10
11 # pkg_resources keeps vendored modules in its _vendor subpackage, and does
12 # sys.meta_path based import magic to expose them as pkg_resources.extern.*
13 hiddenimports = collect_submodules('pkg_resources._vendor')
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py
--- a/PyInstaller/hooks/hook-pkg_resources.py
+++ b/PyInstaller/hooks/hook-pkg_resources.py
@@ -11,3 +11,5 @@
# pkg_resources keeps vendored modules in its _vendor subpackage, and does
# sys.meta_path based import magic to expose them as pkg_resources.extern.*
hiddenimports = collect_submodules('pkg_resources._vendor')
+
+excludedimports = ['__main__']
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py\n--- a/PyInstaller/hooks/hook-pkg_resources.py\n+++ b/PyInstaller/hooks/hook-pkg_resources.py\n@@ -11,3 +11,5 @@\n # pkg_resources keeps vendored modules in its _vendor subpackage, and does\n # sys.meta_path based import magic to expose them as pkg_resources.extern.*\n hiddenimports = collect_submodules('pkg_resources._vendor')\n+\n+excludedimports = ['__main__']\n", "issue": "Windows: Cannot bundle with debug if pkg_resources is a dependency\nThis issue happens when I try to bundle my project, in the Analysis.assemble phase and only when I try to do it with debug enabled. PyInstaller tries to compile a module that is part of an executable (pyinstaller.exe in this case) which fails because it cannot read the module.\r\n\r\nThis is with Windows 10, Python 3.6.6 (official from python.org) and PyInstaller 3.5.dev0+51429f8fc (which should be the latest develop version as of today).\r\n\r\nHere is the traceback:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"c:\\python36-32\\Lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\python36-32\\Lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\RMYROY~1\\VIRTUA~1\\CDDA-G~3\\Scripts\\pyinstaller.exe\\__main__.py\", line 9, in <module>\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\__main__.py\", line 111, in run\r\n run_build(pyi_config, spec_file, **vars(args))\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\__main__.py\", line 63, in run_build\r\n PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 846, in main\r\n build(specfile, kw.get('distpath'), kw.get('workpath'), kw.get('clean_build'))\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 793, in build\r\n exec(code, spec_namespace)\r\n File \"launcher.spec\", line 17, in <module>\r\n noarchive=True)\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 243, in __init__\r\n self.__postinit__()\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\datastruct.py\", line 158, in __postinit__\r\n self.assemble()\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 599, in assemble\r\n for name, path, typecode in compile_py_files(new_toc, CONF['workpath']):\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\utils\\misc.py\", line 150, in compile_py_files\r\n with open(obj_fnm, 'rb') as fh:\r\nFileNotFoundError: [Errno 2] No such file or directory: 'C:\\\\Users\\\\RMYROY~1\\\\VIRTUA~1\\\\CDDA-G~3\\\\Scripts\\\\pyinstaller.exe\\\\__main__.pyo'\r\n```\r\n\r\nFor some reason, the following entry is added in Analysis.pure\r\n\r\n```python\r\n('__main__.pyc', 'C:\\\\Users\\\\RMYROY~1\\\\VIRTUA~1\\\\CDDA-G~3\\\\Scripts\\\\pyinstaller.exe\\\\__main__.py', 'PYMODULE')\r\n```\r\n\r\n**That entry is incorrect in that it shouldn't have been added in pure or it shouldn't be compiled in assemble which is the source of this issue.**\r\n\r\nHere is my spec file:\r\n\r\n```python\r\n# -*- mode: python ; coding: utf-8 -*-\r\n\r\nblock_cipher = None\r\n\r\n\r\na = Analysis(['cddagl\\\\launcher.py'],\r\n pathex=['C:\\\\Program Files (x86)\\\\Windows Kits\\\\10\\\\Redist\\\\ucrt\\\\DLLs\\\\x86\\\\', 'C:\\\\Users\\\\R\u00e9my Roy\\\\Projects\\\\CDDA-Game-Launcher'],\r\n binaries=[],\r\n datas=[('alembic', 'alembic'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('cddagl/VERSION', 'cddagl'), ('C:\\\\Users\\\\R\u00e9my Roy\\\\VirtualEnvs\\\\CDDA-Game-Launcher\\\\Scripts\\\\UnRAR.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ja/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ja/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],\r\n hiddenimports=['lxml.cssselect', 'babel.numbers'],\r\n hookspath=[],\r\n runtime_hooks=[],\r\n excludes=[],\r\n win_no_prefer_redirects=False,\r\n win_private_assemblies=False,\r\n cipher=block_cipher,\r\n noarchive=True)\r\npyz = PYZ(a.pure, a.zipped_data,\r\n cipher=block_cipher)\r\nexe = EXE(pyz,\r\n a.scripts,\r\n [('v', None, 'OPTION')],\r\n exclude_binaries=True,\r\n name='launcher',\r\n debug=True,\r\n bootloader_ignore_signals=False,\r\n strip=False,\r\n upx=False,\r\n console=True , icon='cddagl\\\\resources\\\\launcher.ico')\r\ncoll = COLLECT(exe,\r\n a.binaries,\r\n a.zipfiles,\r\n a.datas,\r\n strip=False,\r\n upx=False,\r\n upx_exclude=[],\r\n name='launcher')\r\n```\r\n\r\nYou can probably reproduce this issue easily by cloning [my project](https://github.com/remyroy/CDDA-Game-Launcher) and issuing the following command:\r\n\r\n```\r\npython setup.py freeze --debug=1\r\n```\r\n\r\nHere is the full pyinstaller log output: https://gist.github.com/remyroy/37f7f0a912d5d714a947cddfb78769d4\r\n\r\nI'll investigate how that entry is added in Analysis to give more context to this issue.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2019, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import collect_submodules\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does\n# sys.meta_path based import magic to expose them as pkg_resources.extern.*\nhiddenimports = collect_submodules('pkg_resources._vendor')\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2019, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import collect_submodules\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does\n# sys.meta_path based import magic to expose them as pkg_resources.extern.*\nhiddenimports = collect_submodules('pkg_resources._vendor')\n\nexcludedimports = ['__main__']\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}]} | 1,920 | 119 |
gh_patches_debug_878 | rasdani/github-patches | git_diff | privacyidea__privacyidea-1746 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix typo in registration token
The example of the registration token contains a typo.
The toketype of course is a "registration" token, not a "register".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `privacyidea/lib/tokens/registrationtoken.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # privacyIDEA
4 # Aug 12, 2014 Cornelius Kölbel
5 # License: AGPLv3
6 # contact: http://www.privacyidea.org
7 #
8 # 2015-01-29 Adapt during migration to flask
9 # Cornelius Kölbel <[email protected]>
10 #
11 # This code is free software; you can redistribute it and/or
12 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
13 # License as published by the Free Software Foundation; either
14 # version 3 of the License, or any later version.
15 #
16 # This code is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
20 #
21 # You should have received a copy of the GNU Affero General Public
22 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #
24 """
25 This file contains the definition of the RegisterToken class.
26
27 The code is tested in test_lib_tokens_registration.py.
28 """
29
30 import logging
31
32 from privacyidea.lib.utils import to_unicode
33 from privacyidea.lib.tokens.passwordtoken import PasswordTokenClass
34 from privacyidea.lib.log import log_with
35 from privacyidea.lib.crypto import generate_password
36 from privacyidea.lib.decorators import check_token_locked
37 from privacyidea.lib import _
38
39 optional = True
40 required = False
41
42 log = logging.getLogger(__name__)
43
44
45 class RegistrationTokenClass(PasswordTokenClass):
46 """
47 Token to implement a registration code.
48 It can be used to create a registration code or a "TAN" which can be used
49 once by a user to authenticate somewhere. After this registration code is
50 used, the token is automatically deleted.
51
52 The idea is to provide a workflow, where the user can get a registration code
53 by e.g. postal mail and then use this code as the initial first factor to
54 authenticate to the UI to enroll real tokens.
55
56 A registration code can be created by an administrative task with the
57 token/init api like this:
58
59 **Example Authentication Request**:
60
61 .. sourcecode:: http
62
63 POST /token/init HTTP/1.1
64 Host: example.com
65 Accept: application/json
66
67 type=register
68 user=cornelius
69 realm=realm1
70
71 **Example response**:
72
73 .. sourcecode:: http
74
75 HTTP/1.1 200 OK
76 Content-Type: application/json
77
78 {
79 "detail": {
80 "registrationcode": "12345808124095097608"
81 },
82 "id": 1,
83 "jsonrpc": "2.0",
84 "result": {
85 "status": true,
86 "value": true
87 },
88 "version": "privacyIDEA unknown"
89 }
90
91 """
92
93 def __init__(self, aToken):
94 PasswordTokenClass.__init__(self, aToken)
95 self.hKeyRequired = False
96 self.set_type(u"registration")
97 self.otp_len = 24
98
99 @staticmethod
100 def get_class_type():
101 return "registration"
102
103 @staticmethod
104 def get_class_prefix():
105 return "REG"
106
107 @staticmethod
108 @log_with(log)
109 def get_class_info(key=None, ret='all'):
110 """
111 returns a subtree of the token definition
112
113 :param key: subsection identifier
114 :type key: string
115 :param ret: default return value, if nothing is found
116 :type ret: user defined
117 :return: subsection if key exists or user defined
118 :rtype: dict or scalar
119 """
120 res = {'type': 'registration',
121 'title': 'Registration Code Token',
122 'description': _('Registration: A token that creates a '
123 'registration code that '
124 'can be used as a second factor once.'),
125 'init': {},
126 'config': {},
127 'user': [],
128 # This tokentype is enrollable in the UI for...
129 'ui_enroll': ["admin"],
130 'policy': {},
131 }
132
133 if key:
134 ret = res.get(key)
135 else:
136 if ret == 'all':
137 ret = res
138 return ret
139
140 def update(self, param):
141 """
142 This method is called during the initialization process.
143 :param param: parameters from the token init
144 :type param: dict
145 :return: None
146 """
147 if "genkey" in param:
148 # We do not need the genkey! We generate anyway.
149 # Otherwise genkey and otpkey will raise an exception in
150 # PasswordTokenClass
151 del param["genkey"]
152 param["otpkey"] = generate_password(size=self.otp_len)
153 PasswordTokenClass.update(self, param)
154
155 @log_with(log, log_entry=False)
156 @check_token_locked
157 def inc_count_auth_success(self):
158 """
159 Increase the counter, that counts successful authentications
160 In case of successful authentication the token does needs to be deleted.
161 """
162 self.delete_token()
163 return 1
164
165 @log_with(log)
166 def get_init_detail(self, params=None, user=None):
167 """
168 At the end of the initialization we return the registration code.
169 """
170 response_detail = PasswordTokenClass.get_init_detail(self, params, user)
171 params = params or {}
172 secretHOtp = self.token.get_otpkey()
173 registrationcode = secretHOtp.getKey()
174 response_detail["registrationcode"] = to_unicode(registrationcode)
175 return response_detail
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/privacyidea/lib/tokens/registrationtoken.py b/privacyidea/lib/tokens/registrationtoken.py
--- a/privacyidea/lib/tokens/registrationtoken.py
+++ b/privacyidea/lib/tokens/registrationtoken.py
@@ -64,7 +64,7 @@
Host: example.com
Accept: application/json
- type=register
+ type=registration
user=cornelius
realm=realm1
| {"golden_diff": "diff --git a/privacyidea/lib/tokens/registrationtoken.py b/privacyidea/lib/tokens/registrationtoken.py\n--- a/privacyidea/lib/tokens/registrationtoken.py\n+++ b/privacyidea/lib/tokens/registrationtoken.py\n@@ -64,7 +64,7 @@\n Host: example.com\n Accept: application/json\n \n- type=register\n+ type=registration\n user=cornelius\n realm=realm1\n", "issue": "Fix typo in registration token\nThe example of the registration token contains a typo.\r\nThe toketype of course is a \"registration\" token, not a \"register\".\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA\n# Aug 12, 2014 Cornelius K\u00f6lbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# 2015-01-29 Adapt during migration to flask\n# Cornelius K\u00f6lbel <[email protected]>\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis file contains the definition of the RegisterToken class.\n\nThe code is tested in test_lib_tokens_registration.py.\n\"\"\"\n\nimport logging\n\nfrom privacyidea.lib.utils import to_unicode\nfrom privacyidea.lib.tokens.passwordtoken import PasswordTokenClass\nfrom privacyidea.lib.log import log_with\nfrom privacyidea.lib.crypto import generate_password\nfrom privacyidea.lib.decorators import check_token_locked\nfrom privacyidea.lib import _\n\noptional = True\nrequired = False\n\nlog = logging.getLogger(__name__)\n\n\nclass RegistrationTokenClass(PasswordTokenClass):\n \"\"\"\n Token to implement a registration code.\n It can be used to create a registration code or a \"TAN\" which can be used\n once by a user to authenticate somewhere. After this registration code is\n used, the token is automatically deleted.\n\n The idea is to provide a workflow, where the user can get a registration code\n by e.g. postal mail and then use this code as the initial first factor to\n authenticate to the UI to enroll real tokens.\n\n A registration code can be created by an administrative task with the\n token/init api like this:\n\n **Example Authentication Request**:\n\n .. sourcecode:: http\n\n POST /token/init HTTP/1.1\n Host: example.com\n Accept: application/json\n\n type=register\n user=cornelius\n realm=realm1\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Content-Type: application/json\n\n {\n \"detail\": {\n \"registrationcode\": \"12345808124095097608\"\n },\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": {\n \"status\": true,\n \"value\": true\n },\n \"version\": \"privacyIDEA unknown\"\n }\n\n \"\"\"\n\n def __init__(self, aToken):\n PasswordTokenClass.__init__(self, aToken)\n self.hKeyRequired = False\n self.set_type(u\"registration\")\n self.otp_len = 24\n\n @staticmethod\n def get_class_type():\n return \"registration\"\n\n @staticmethod\n def get_class_prefix():\n return \"REG\"\n\n @staticmethod\n @log_with(log)\n def get_class_info(key=None, ret='all'):\n \"\"\"\n returns a subtree of the token definition\n\n :param key: subsection identifier\n :type key: string\n :param ret: default return value, if nothing is found\n :type ret: user defined\n :return: subsection if key exists or user defined\n :rtype: dict or scalar\n \"\"\"\n res = {'type': 'registration',\n 'title': 'Registration Code Token',\n 'description': _('Registration: A token that creates a '\n 'registration code that '\n 'can be used as a second factor once.'),\n 'init': {},\n 'config': {},\n 'user': [],\n # This tokentype is enrollable in the UI for...\n 'ui_enroll': [\"admin\"],\n 'policy': {},\n }\n\n if key:\n ret = res.get(key)\n else:\n if ret == 'all':\n ret = res\n return ret\n\n def update(self, param):\n \"\"\"\n This method is called during the initialization process.\n :param param: parameters from the token init\n :type param: dict\n :return: None\n \"\"\"\n if \"genkey\" in param:\n # We do not need the genkey! We generate anyway.\n # Otherwise genkey and otpkey will raise an exception in\n # PasswordTokenClass\n del param[\"genkey\"]\n param[\"otpkey\"] = generate_password(size=self.otp_len)\n PasswordTokenClass.update(self, param)\n\n @log_with(log, log_entry=False)\n @check_token_locked\n def inc_count_auth_success(self):\n \"\"\"\n Increase the counter, that counts successful authentications\n In case of successful authentication the token does needs to be deleted.\n \"\"\"\n self.delete_token()\n return 1\n\n @log_with(log)\n def get_init_detail(self, params=None, user=None):\n \"\"\"\n At the end of the initialization we return the registration code.\n \"\"\"\n response_detail = PasswordTokenClass.get_init_detail(self, params, user)\n params = params or {}\n secretHOtp = self.token.get_otpkey()\n registrationcode = secretHOtp.getKey()\n response_detail[\"registrationcode\"] = to_unicode(registrationcode)\n return response_detail\n", "path": "privacyidea/lib/tokens/registrationtoken.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA\n# Aug 12, 2014 Cornelius K\u00f6lbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# 2015-01-29 Adapt during migration to flask\n# Cornelius K\u00f6lbel <[email protected]>\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis file contains the definition of the RegisterToken class.\n\nThe code is tested in test_lib_tokens_registration.py.\n\"\"\"\n\nimport logging\n\nfrom privacyidea.lib.utils import to_unicode\nfrom privacyidea.lib.tokens.passwordtoken import PasswordTokenClass\nfrom privacyidea.lib.log import log_with\nfrom privacyidea.lib.crypto import generate_password\nfrom privacyidea.lib.decorators import check_token_locked\nfrom privacyidea.lib import _\n\noptional = True\nrequired = False\n\nlog = logging.getLogger(__name__)\n\n\nclass RegistrationTokenClass(PasswordTokenClass):\n \"\"\"\n Token to implement a registration code.\n It can be used to create a registration code or a \"TAN\" which can be used\n once by a user to authenticate somewhere. After this registration code is\n used, the token is automatically deleted.\n\n The idea is to provide a workflow, where the user can get a registration code\n by e.g. postal mail and then use this code as the initial first factor to\n authenticate to the UI to enroll real tokens.\n\n A registration code can be created by an administrative task with the\n token/init api like this:\n\n **Example Authentication Request**:\n\n .. sourcecode:: http\n\n POST /token/init HTTP/1.1\n Host: example.com\n Accept: application/json\n\n type=registration\n user=cornelius\n realm=realm1\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Content-Type: application/json\n\n {\n \"detail\": {\n \"registrationcode\": \"12345808124095097608\"\n },\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": {\n \"status\": true,\n \"value\": true\n },\n \"version\": \"privacyIDEA unknown\"\n }\n\n \"\"\"\n\n def __init__(self, aToken):\n PasswordTokenClass.__init__(self, aToken)\n self.hKeyRequired = False\n self.set_type(u\"registration\")\n self.otp_len = 24\n\n @staticmethod\n def get_class_type():\n return \"registration\"\n\n @staticmethod\n def get_class_prefix():\n return \"REG\"\n\n @staticmethod\n @log_with(log)\n def get_class_info(key=None, ret='all'):\n \"\"\"\n returns a subtree of the token definition\n\n :param key: subsection identifier\n :type key: string\n :param ret: default return value, if nothing is found\n :type ret: user defined\n :return: subsection if key exists or user defined\n :rtype: dict or scalar\n \"\"\"\n res = {'type': 'registration',\n 'title': 'Registration Code Token',\n 'description': _('Registration: A token that creates a '\n 'registration code that '\n 'can be used as a second factor once.'),\n 'init': {},\n 'config': {},\n 'user': [],\n # This tokentype is enrollable in the UI for...\n 'ui_enroll': [\"admin\"],\n 'policy': {},\n }\n\n if key:\n ret = res.get(key)\n else:\n if ret == 'all':\n ret = res\n return ret\n\n def update(self, param):\n \"\"\"\n This method is called during the initialization process.\n :param param: parameters from the token init\n :type param: dict\n :return: None\n \"\"\"\n if \"genkey\" in param:\n # We do not need the genkey! We generate anyway.\n # Otherwise genkey and otpkey will raise an exception in\n # PasswordTokenClass\n del param[\"genkey\"]\n param[\"otpkey\"] = generate_password(size=self.otp_len)\n PasswordTokenClass.update(self, param)\n\n @log_with(log, log_entry=False)\n @check_token_locked\n def inc_count_auth_success(self):\n \"\"\"\n Increase the counter, that counts successful authentications\n In case of successful authentication the token does needs to be deleted.\n \"\"\"\n self.delete_token()\n return 1\n\n @log_with(log)\n def get_init_detail(self, params=None, user=None):\n \"\"\"\n At the end of the initialization we return the registration code.\n \"\"\"\n response_detail = PasswordTokenClass.get_init_detail(self, params, user)\n params = params or {}\n secretHOtp = self.token.get_otpkey()\n registrationcode = secretHOtp.getKey()\n response_detail[\"registrationcode\"] = to_unicode(registrationcode)\n return response_detail\n", "path": "privacyidea/lib/tokens/registrationtoken.py"}]} | 1,954 | 101 |
gh_patches_debug_5499 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-3438 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cometml Logger epoch is not set.
<!--
### Common bugs:
1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79).
2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq)
-->
## 🐛 Bug
While logging using comet ml there is an argument to set epoch https://www.comet.ml/docs/python-sdk/Experiment/#experimentlog_metrics
The info is available in metrics dict, but instead of passing it as an arg, it is passed as metrics value. I will suply a PR in a moment
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/loggers/comet.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Comet
17 -----
18 """
19
20 from argparse import Namespace
21 from typing import Optional, Dict, Union, Any
22
23 try:
24 from comet_ml import Experiment as CometExperiment
25 from comet_ml import ExistingExperiment as CometExistingExperiment
26 from comet_ml import OfflineExperiment as CometOfflineExperiment
27 from comet_ml import BaseExperiment as CometBaseExperiment
28 try:
29 from comet_ml.api import API
30 except ImportError: # pragma: no-cover
31 # For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300
32 from comet_ml.papi import API # pragma: no-cover
33 from comet_ml.config import get_config, get_api_key
34 except ImportError: # pragma: no-cover
35 CometExperiment = None
36 CometExistingExperiment = None
37 CometOfflineExperiment = None
38 CometBaseExperiment = None
39 API = None
40 _COMET_AVAILABLE = False
41 else:
42 _COMET_AVAILABLE = True
43
44
45 import torch
46 from torch import is_tensor
47
48 from pytorch_lightning import _logger as log
49 from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment
50 from pytorch_lightning.utilities.exceptions import MisconfigurationException
51 from pytorch_lightning.utilities import rank_zero_only
52
53
54 class CometLogger(LightningLoggerBase):
55 r"""
56 Log using `Comet.ml <https://www.comet.ml>`_. Install it with pip:
57
58 .. code-block:: bash
59
60 pip install comet-ml
61
62 Comet requires either an API Key (online mode) or a local directory path (offline mode).
63
64 **ONLINE MODE**
65
66 Example:
67 >>> import os
68 >>> from pytorch_lightning import Trainer
69 >>> from pytorch_lightning.loggers import CometLogger
70 >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class
71 >>> comet_logger = CometLogger(
72 ... api_key=os.environ.get('COMET_API_KEY'),
73 ... workspace=os.environ.get('COMET_WORKSPACE'), # Optional
74 ... save_dir='.', # Optional
75 ... project_name='default_project', # Optional
76 ... rest_api_key=os.environ.get('COMET_REST_API_KEY'), # Optional
77 ... experiment_name='default' # Optional
78 ... )
79 >>> trainer = Trainer(logger=comet_logger)
80
81 **OFFLINE MODE**
82
83 Example:
84 >>> from pytorch_lightning.loggers import CometLogger
85 >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class
86 >>> comet_logger = CometLogger(
87 ... save_dir='.',
88 ... workspace=os.environ.get('COMET_WORKSPACE'), # Optional
89 ... project_name='default_project', # Optional
90 ... rest_api_key=os.environ.get('COMET_REST_API_KEY'), # Optional
91 ... experiment_name='default' # Optional
92 ... )
93 >>> trainer = Trainer(logger=comet_logger)
94
95 Args:
96 api_key: Required in online mode. API key, found on Comet.ml. If not given, this
97 will be loaded from the environment variable COMET_API_KEY or ~/.comet.config
98 if either exists.
99 save_dir: Required in offline mode. The path for the directory to save local
100 comet logs. If given, this also sets the directory for saving checkpoints.
101 workspace: Optional. Name of workspace for this user
102 project_name: Optional. Send your experiment to a specific project.
103 Otherwise will be sent to Uncategorized Experiments.
104 If the project name does not already exist, Comet.ml will create a new project.
105 rest_api_key: Optional. Rest API key found in Comet.ml settings.
106 This is used to determine version number
107 experiment_name: Optional. String representing the name for this particular experiment on Comet.ml.
108 experiment_key: Optional. If set, restores from existing experiment.
109 offline: If api_key and save_dir are both given, this determines whether
110 the experiment will be in online or offline mode. This is useful if you use
111 save_dir to control the checkpoints directory and have a ~/.comet.config
112 file but still want to run offline experiments.
113 """
114
115 def __init__(self,
116 api_key: Optional[str] = None,
117 save_dir: Optional[str] = None,
118 workspace: Optional[str] = None,
119 project_name: Optional[str] = None,
120 rest_api_key: Optional[str] = None,
121 experiment_name: Optional[str] = None,
122 experiment_key: Optional[str] = None,
123 offline: bool = False,
124 **kwargs):
125
126 if not _COMET_AVAILABLE:
127 raise ImportError('You want to use `comet_ml` logger which is not installed yet,'
128 ' install it with `pip install comet-ml`.')
129 super().__init__()
130 self._experiment = None
131
132 # Determine online or offline mode based on which arguments were passed to CometLogger
133 api_key = api_key or get_api_key(None, get_config())
134
135 if api_key is not None and save_dir is not None:
136 self.mode = "offline" if offline else "online"
137 self.api_key = api_key
138 self._save_dir = save_dir
139 elif api_key is not None:
140 self.mode = "online"
141 self.api_key = api_key
142 self._save_dir = None
143 elif save_dir is not None:
144 self.mode = "offline"
145 self._save_dir = save_dir
146 else:
147 # If neither api_key nor save_dir are passed as arguments, raise an exception
148 raise MisconfigurationException(
149 "CometLogger requires either api_key or save_dir during initialization."
150 )
151
152 log.info(f"CometLogger will be initialized in {self.mode} mode")
153
154 self.workspace = workspace
155 self.project_name = project_name
156 self.experiment_key = experiment_key
157 self._kwargs = kwargs
158
159 if rest_api_key is not None:
160 # Comet.ml rest API, used to determine version number
161 self.rest_api_key = rest_api_key
162 self.comet_api = API(self.rest_api_key)
163 else:
164 self.rest_api_key = None
165 self.comet_api = None
166
167 if experiment_name:
168 self.experiment.set_name(experiment_name)
169 self._kwargs = kwargs
170
171 @property
172 @rank_zero_experiment
173 def experiment(self) -> CometBaseExperiment:
174 r"""
175 Actual Comet object. To use Comet features in your
176 :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
177
178 Example::
179
180 self.logger.experiment.some_comet_function()
181
182 """
183 if self._experiment is not None:
184 return self._experiment
185
186 if self.mode == "online":
187 if self.experiment_key is None:
188 self._experiment = CometExperiment(
189 api_key=self.api_key,
190 workspace=self.workspace,
191 project_name=self.project_name,
192 **self._kwargs
193 )
194 self.experiment_key = self._experiment.get_key()
195 else:
196 self._experiment = CometExistingExperiment(
197 api_key=self.api_key,
198 workspace=self.workspace,
199 project_name=self.project_name,
200 previous_experiment=self.experiment_key,
201 **self._kwargs
202 )
203 else:
204 self._experiment = CometOfflineExperiment(
205 offline_directory=self.save_dir,
206 workspace=self.workspace,
207 project_name=self.project_name,
208 **self._kwargs
209 )
210
211 return self._experiment
212
213 @rank_zero_only
214 def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
215 params = self._convert_params(params)
216 params = self._flatten_dict(params)
217 self.experiment.log_parameters(params)
218
219 @rank_zero_only
220 def log_metrics(
221 self,
222 metrics: Dict[str, Union[torch.Tensor, float]],
223 step: Optional[int] = None
224 ) -> None:
225 assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0'
226
227 # Comet.ml expects metrics to be a dictionary of detached tensors on CPU
228 for key, val in metrics.items():
229 if is_tensor(val):
230 metrics[key] = val.cpu().detach()
231
232 self.experiment.log_metrics(metrics, step=step)
233
234 def reset_experiment(self):
235 self._experiment = None
236
237 @rank_zero_only
238 def finalize(self, status: str) -> None:
239 r"""
240 When calling ``self.experiment.end()``, that experiment won't log any more data to Comet.
241 That's why, if you need to log any more data, you need to create an ExistingCometExperiment.
242 For example, to log data when testing your model after training, because when training is
243 finalized :meth:`CometLogger.finalize` is called.
244
245 This happens automatically in the :meth:`~CometLogger.experiment` property, when
246 ``self._experiment`` is set to ``None``, i.e. ``self.reset_experiment()``.
247 """
248 self.experiment.end()
249 self.reset_experiment()
250
251 @property
252 def save_dir(self) -> Optional[str]:
253 return self._save_dir
254
255 @property
256 def name(self) -> str:
257 return str(self.experiment.project_name)
258
259 @property
260 def version(self) -> str:
261 return self.experiment.id
262
263 def __getstate__(self):
264 state = self.__dict__.copy()
265 state["_experiment"] = None
266 return state
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_lightning/loggers/comet.py b/pytorch_lightning/loggers/comet.py
--- a/pytorch_lightning/loggers/comet.py
+++ b/pytorch_lightning/loggers/comet.py
@@ -229,7 +229,10 @@
if is_tensor(val):
metrics[key] = val.cpu().detach()
- self.experiment.log_metrics(metrics, step=step)
+ metrics_without_epoch = metrics.copy()
+ epoch = metrics_without_epoch.pop('epoch', None)
+
+ self.experiment.log_metrics(metrics_without_epoch, step=step, epoch=epoch)
def reset_experiment(self):
self._experiment = None
| {"golden_diff": "diff --git a/pytorch_lightning/loggers/comet.py b/pytorch_lightning/loggers/comet.py\n--- a/pytorch_lightning/loggers/comet.py\n+++ b/pytorch_lightning/loggers/comet.py\n@@ -229,7 +229,10 @@\n if is_tensor(val):\n metrics[key] = val.cpu().detach()\n \n- self.experiment.log_metrics(metrics, step=step)\n+ metrics_without_epoch = metrics.copy()\n+ epoch = metrics_without_epoch.pop('epoch', None)\n+\n+ self.experiment.log_metrics(metrics_without_epoch, step=step, epoch=epoch)\n \n def reset_experiment(self):\n self._experiment = None\n", "issue": "Cometml Logger epoch is not set.\n<!-- \r\n### Common bugs:\r\n1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). \r\n2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) \r\n-->\r\n\r\n## \ud83d\udc1b Bug\r\n\r\nWhile logging using comet ml there is an argument to set epoch https://www.comet.ml/docs/python-sdk/Experiment/#experimentlog_metrics\r\nThe info is available in metrics dict, but instead of passing it as an arg, it is passed as metrics value. I will suply a PR in a moment\r\n\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nComet\n-----\n\"\"\"\n\nfrom argparse import Namespace\nfrom typing import Optional, Dict, Union, Any\n\ntry:\n from comet_ml import Experiment as CometExperiment\n from comet_ml import ExistingExperiment as CometExistingExperiment\n from comet_ml import OfflineExperiment as CometOfflineExperiment\n from comet_ml import BaseExperiment as CometBaseExperiment\n try:\n from comet_ml.api import API\n except ImportError: # pragma: no-cover\n # For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300\n from comet_ml.papi import API # pragma: no-cover\n from comet_ml.config import get_config, get_api_key\nexcept ImportError: # pragma: no-cover\n CometExperiment = None\n CometExistingExperiment = None\n CometOfflineExperiment = None\n CometBaseExperiment = None\n API = None\n _COMET_AVAILABLE = False\nelse:\n _COMET_AVAILABLE = True\n\n\nimport torch\nfrom torch import is_tensor\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities import rank_zero_only\n\n\nclass CometLogger(LightningLoggerBase):\n r\"\"\"\n Log using `Comet.ml <https://www.comet.ml>`_. Install it with pip:\n\n .. code-block:: bash\n\n pip install comet-ml\n\n Comet requires either an API Key (online mode) or a local directory path (offline mode).\n\n **ONLINE MODE**\n\n Example:\n >>> import os\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.loggers import CometLogger\n >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n >>> comet_logger = CometLogger(\n ... api_key=os.environ.get('COMET_API_KEY'),\n ... workspace=os.environ.get('COMET_WORKSPACE'), # Optional\n ... save_dir='.', # Optional\n ... project_name='default_project', # Optional\n ... rest_api_key=os.environ.get('COMET_REST_API_KEY'), # Optional\n ... experiment_name='default' # Optional\n ... )\n >>> trainer = Trainer(logger=comet_logger)\n\n **OFFLINE MODE**\n\n Example:\n >>> from pytorch_lightning.loggers import CometLogger\n >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n >>> comet_logger = CometLogger(\n ... save_dir='.',\n ... workspace=os.environ.get('COMET_WORKSPACE'), # Optional\n ... project_name='default_project', # Optional\n ... rest_api_key=os.environ.get('COMET_REST_API_KEY'), # Optional\n ... experiment_name='default' # Optional\n ... )\n >>> trainer = Trainer(logger=comet_logger)\n\n Args:\n api_key: Required in online mode. API key, found on Comet.ml. If not given, this\n will be loaded from the environment variable COMET_API_KEY or ~/.comet.config\n if either exists.\n save_dir: Required in offline mode. The path for the directory to save local\n comet logs. If given, this also sets the directory for saving checkpoints.\n workspace: Optional. Name of workspace for this user\n project_name: Optional. Send your experiment to a specific project.\n Otherwise will be sent to Uncategorized Experiments.\n If the project name does not already exist, Comet.ml will create a new project.\n rest_api_key: Optional. Rest API key found in Comet.ml settings.\n This is used to determine version number\n experiment_name: Optional. String representing the name for this particular experiment on Comet.ml.\n experiment_key: Optional. If set, restores from existing experiment.\n offline: If api_key and save_dir are both given, this determines whether\n the experiment will be in online or offline mode. This is useful if you use\n save_dir to control the checkpoints directory and have a ~/.comet.config\n file but still want to run offline experiments.\n \"\"\"\n\n def __init__(self,\n api_key: Optional[str] = None,\n save_dir: Optional[str] = None,\n workspace: Optional[str] = None,\n project_name: Optional[str] = None,\n rest_api_key: Optional[str] = None,\n experiment_name: Optional[str] = None,\n experiment_key: Optional[str] = None,\n offline: bool = False,\n **kwargs):\n\n if not _COMET_AVAILABLE:\n raise ImportError('You want to use `comet_ml` logger which is not installed yet,'\n ' install it with `pip install comet-ml`.')\n super().__init__()\n self._experiment = None\n\n # Determine online or offline mode based on which arguments were passed to CometLogger\n api_key = api_key or get_api_key(None, get_config())\n\n if api_key is not None and save_dir is not None:\n self.mode = \"offline\" if offline else \"online\"\n self.api_key = api_key\n self._save_dir = save_dir\n elif api_key is not None:\n self.mode = \"online\"\n self.api_key = api_key\n self._save_dir = None\n elif save_dir is not None:\n self.mode = \"offline\"\n self._save_dir = save_dir\n else:\n # If neither api_key nor save_dir are passed as arguments, raise an exception\n raise MisconfigurationException(\n \"CometLogger requires either api_key or save_dir during initialization.\"\n )\n\n log.info(f\"CometLogger will be initialized in {self.mode} mode\")\n\n self.workspace = workspace\n self.project_name = project_name\n self.experiment_key = experiment_key\n self._kwargs = kwargs\n\n if rest_api_key is not None:\n # Comet.ml rest API, used to determine version number\n self.rest_api_key = rest_api_key\n self.comet_api = API(self.rest_api_key)\n else:\n self.rest_api_key = None\n self.comet_api = None\n\n if experiment_name:\n self.experiment.set_name(experiment_name)\n self._kwargs = kwargs\n\n @property\n @rank_zero_experiment\n def experiment(self) -> CometBaseExperiment:\n r\"\"\"\n Actual Comet object. To use Comet features in your\n :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.\n\n Example::\n\n self.logger.experiment.some_comet_function()\n\n \"\"\"\n if self._experiment is not None:\n return self._experiment\n\n if self.mode == \"online\":\n if self.experiment_key is None:\n self._experiment = CometExperiment(\n api_key=self.api_key,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n self.experiment_key = self._experiment.get_key()\n else:\n self._experiment = CometExistingExperiment(\n api_key=self.api_key,\n workspace=self.workspace,\n project_name=self.project_name,\n previous_experiment=self.experiment_key,\n **self._kwargs\n )\n else:\n self._experiment = CometOfflineExperiment(\n offline_directory=self.save_dir,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:\n params = self._convert_params(params)\n params = self._flatten_dict(params)\n self.experiment.log_parameters(params)\n\n @rank_zero_only\n def log_metrics(\n self,\n metrics: Dict[str, Union[torch.Tensor, float]],\n step: Optional[int] = None\n ) -> None:\n assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0'\n\n # Comet.ml expects metrics to be a dictionary of detached tensors on CPU\n for key, val in metrics.items():\n if is_tensor(val):\n metrics[key] = val.cpu().detach()\n\n self.experiment.log_metrics(metrics, step=step)\n\n def reset_experiment(self):\n self._experiment = None\n\n @rank_zero_only\n def finalize(self, status: str) -> None:\n r\"\"\"\n When calling ``self.experiment.end()``, that experiment won't log any more data to Comet.\n That's why, if you need to log any more data, you need to create an ExistingCometExperiment.\n For example, to log data when testing your model after training, because when training is\n finalized :meth:`CometLogger.finalize` is called.\n\n This happens automatically in the :meth:`~CometLogger.experiment` property, when\n ``self._experiment`` is set to ``None``, i.e. ``self.reset_experiment()``.\n \"\"\"\n self.experiment.end()\n self.reset_experiment()\n\n @property\n def save_dir(self) -> Optional[str]:\n return self._save_dir\n\n @property\n def name(self) -> str:\n return str(self.experiment.project_name)\n\n @property\n def version(self) -> str:\n return self.experiment.id\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"_experiment\"] = None\n return state\n", "path": "pytorch_lightning/loggers/comet.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nComet\n-----\n\"\"\"\n\nfrom argparse import Namespace\nfrom typing import Optional, Dict, Union, Any\n\ntry:\n from comet_ml import Experiment as CometExperiment\n from comet_ml import ExistingExperiment as CometExistingExperiment\n from comet_ml import OfflineExperiment as CometOfflineExperiment\n from comet_ml import BaseExperiment as CometBaseExperiment\n try:\n from comet_ml.api import API\n except ImportError: # pragma: no-cover\n # For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300\n from comet_ml.papi import API # pragma: no-cover\n from comet_ml.config import get_config, get_api_key\nexcept ImportError: # pragma: no-cover\n CometExperiment = None\n CometExistingExperiment = None\n CometOfflineExperiment = None\n CometBaseExperiment = None\n API = None\n _COMET_AVAILABLE = False\nelse:\n _COMET_AVAILABLE = True\n\n\nimport torch\nfrom torch import is_tensor\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities import rank_zero_only\n\n\nclass CometLogger(LightningLoggerBase):\n r\"\"\"\n Log using `Comet.ml <https://www.comet.ml>`_. Install it with pip:\n\n .. code-block:: bash\n\n pip install comet-ml\n\n Comet requires either an API Key (online mode) or a local directory path (offline mode).\n\n **ONLINE MODE**\n\n Example:\n >>> import os\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.loggers import CometLogger\n >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n >>> comet_logger = CometLogger(\n ... api_key=os.environ.get('COMET_API_KEY'),\n ... workspace=os.environ.get('COMET_WORKSPACE'), # Optional\n ... save_dir='.', # Optional\n ... project_name='default_project', # Optional\n ... rest_api_key=os.environ.get('COMET_REST_API_KEY'), # Optional\n ... experiment_name='default' # Optional\n ... )\n >>> trainer = Trainer(logger=comet_logger)\n\n **OFFLINE MODE**\n\n Example:\n >>> from pytorch_lightning.loggers import CometLogger\n >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n >>> comet_logger = CometLogger(\n ... save_dir='.',\n ... workspace=os.environ.get('COMET_WORKSPACE'), # Optional\n ... project_name='default_project', # Optional\n ... rest_api_key=os.environ.get('COMET_REST_API_KEY'), # Optional\n ... experiment_name='default' # Optional\n ... )\n >>> trainer = Trainer(logger=comet_logger)\n\n Args:\n api_key: Required in online mode. API key, found on Comet.ml. If not given, this\n will be loaded from the environment variable COMET_API_KEY or ~/.comet.config\n if either exists.\n save_dir: Required in offline mode. The path for the directory to save local\n comet logs. If given, this also sets the directory for saving checkpoints.\n workspace: Optional. Name of workspace for this user\n project_name: Optional. Send your experiment to a specific project.\n Otherwise will be sent to Uncategorized Experiments.\n If the project name does not already exist, Comet.ml will create a new project.\n rest_api_key: Optional. Rest API key found in Comet.ml settings.\n This is used to determine version number\n experiment_name: Optional. String representing the name for this particular experiment on Comet.ml.\n experiment_key: Optional. If set, restores from existing experiment.\n offline: If api_key and save_dir are both given, this determines whether\n the experiment will be in online or offline mode. This is useful if you use\n save_dir to control the checkpoints directory and have a ~/.comet.config\n file but still want to run offline experiments.\n \"\"\"\n\n def __init__(self,\n api_key: Optional[str] = None,\n save_dir: Optional[str] = None,\n workspace: Optional[str] = None,\n project_name: Optional[str] = None,\n rest_api_key: Optional[str] = None,\n experiment_name: Optional[str] = None,\n experiment_key: Optional[str] = None,\n offline: bool = False,\n **kwargs):\n\n if not _COMET_AVAILABLE:\n raise ImportError('You want to use `comet_ml` logger which is not installed yet,'\n ' install it with `pip install comet-ml`.')\n super().__init__()\n self._experiment = None\n\n # Determine online or offline mode based on which arguments were passed to CometLogger\n api_key = api_key or get_api_key(None, get_config())\n\n if api_key is not None and save_dir is not None:\n self.mode = \"offline\" if offline else \"online\"\n self.api_key = api_key\n self._save_dir = save_dir\n elif api_key is not None:\n self.mode = \"online\"\n self.api_key = api_key\n self._save_dir = None\n elif save_dir is not None:\n self.mode = \"offline\"\n self._save_dir = save_dir\n else:\n # If neither api_key nor save_dir are passed as arguments, raise an exception\n raise MisconfigurationException(\n \"CometLogger requires either api_key or save_dir during initialization.\"\n )\n\n log.info(f\"CometLogger will be initialized in {self.mode} mode\")\n\n self.workspace = workspace\n self.project_name = project_name\n self.experiment_key = experiment_key\n self._kwargs = kwargs\n\n if rest_api_key is not None:\n # Comet.ml rest API, used to determine version number\n self.rest_api_key = rest_api_key\n self.comet_api = API(self.rest_api_key)\n else:\n self.rest_api_key = None\n self.comet_api = None\n\n if experiment_name:\n self.experiment.set_name(experiment_name)\n self._kwargs = kwargs\n\n @property\n @rank_zero_experiment\n def experiment(self) -> CometBaseExperiment:\n r\"\"\"\n Actual Comet object. To use Comet features in your\n :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.\n\n Example::\n\n self.logger.experiment.some_comet_function()\n\n \"\"\"\n if self._experiment is not None:\n return self._experiment\n\n if self.mode == \"online\":\n if self.experiment_key is None:\n self._experiment = CometExperiment(\n api_key=self.api_key,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n self.experiment_key = self._experiment.get_key()\n else:\n self._experiment = CometExistingExperiment(\n api_key=self.api_key,\n workspace=self.workspace,\n project_name=self.project_name,\n previous_experiment=self.experiment_key,\n **self._kwargs\n )\n else:\n self._experiment = CometOfflineExperiment(\n offline_directory=self.save_dir,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:\n params = self._convert_params(params)\n params = self._flatten_dict(params)\n self.experiment.log_parameters(params)\n\n @rank_zero_only\n def log_metrics(\n self,\n metrics: Dict[str, Union[torch.Tensor, float]],\n step: Optional[int] = None\n ) -> None:\n assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0'\n\n # Comet.ml expects metrics to be a dictionary of detached tensors on CPU\n for key, val in metrics.items():\n if is_tensor(val):\n metrics[key] = val.cpu().detach()\n\n metrics_without_epoch = metrics.copy()\n epoch = metrics_without_epoch.pop('epoch', None)\n\n self.experiment.log_metrics(metrics_without_epoch, step=step, epoch=epoch)\n\n def reset_experiment(self):\n self._experiment = None\n\n @rank_zero_only\n def finalize(self, status: str) -> None:\n r\"\"\"\n When calling ``self.experiment.end()``, that experiment won't log any more data to Comet.\n That's why, if you need to log any more data, you need to create an ExistingCometExperiment.\n For example, to log data when testing your model after training, because when training is\n finalized :meth:`CometLogger.finalize` is called.\n\n This happens automatically in the :meth:`~CometLogger.experiment` property, when\n ``self._experiment`` is set to ``None``, i.e. ``self.reset_experiment()``.\n \"\"\"\n self.experiment.end()\n self.reset_experiment()\n\n @property\n def save_dir(self) -> Optional[str]:\n return self._save_dir\n\n @property\n def name(self) -> str:\n return str(self.experiment.project_name)\n\n @property\n def version(self) -> str:\n return self.experiment.id\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"_experiment\"] = None\n return state\n", "path": "pytorch_lightning/loggers/comet.py"}]} | 3,290 | 149 |
gh_patches_debug_37467 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1293 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Parameter model_zoo cannot take a path end with `/`
If the parameter `model_zoo` is a path end with slash, the following exception will show up:
```
[2019-10-10 13:15:47,574] [INFO] [image_builder.py:227:_print_docker_progress]
Traceback (most recent call last):
File "/miniconda2/envs/elasticdl-rc0/bin/elasticdl", line 10, in <module>
sys.exit(main())
File "/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/client.py", line 46, in main
args.func(args)
File "/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/api.py", line 24, in train
docker_tlskey=args.docker_tlskey,
File "/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py", line 74, in build_and_push_docker_image
_build_docker_image(client, ctx_dir, df.name, image_name)
File "/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py", line 241, in _build_docker_image
_print_docker_progress(line)
File "/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py", line 224, in _print_docker_progress
raise RuntimeError("Docker image build: " + error)
RuntimeError: Docker image build: COPY failed: stat /var/lib/docker/tmp/docker-builder531748767/model_zoo: no such file or directory
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl/python/elasticdl/api.py`
Content:
```
1 import os
2
3 from elasticdl.python.common import k8s_client as k8s
4 from elasticdl.python.common.args import (
5 build_arguments_from_parsed_result,
6 parse_envs,
7 )
8 from elasticdl.python.common.log_utils import default_logger as logger
9 from elasticdl.python.elasticdl.image_builder import (
10 build_and_push_docker_image,
11 remove_images,
12 )
13
14
15 def train(args):
16 image_name = build_and_push_docker_image(
17 model_zoo=args.model_zoo,
18 base_image=args.image_base,
19 docker_image_repository=args.docker_image_repository,
20 extra_pypi=args.extra_pypi_index,
21 cluster_spec=args.cluster_spec,
22 docker_base_url=args.docker_base_url,
23 docker_tlscert=args.docker_tlscert,
24 docker_tlskey=args.docker_tlskey,
25 )
26
27 container_args = [
28 "-m",
29 "elasticdl.python.master.main",
30 "--worker_image",
31 image_name,
32 "--model_zoo",
33 _model_zoo_in_docker(args.model_zoo),
34 "--cluster_spec",
35 _cluster_spec_def_in_docker(args.cluster_spec),
36 ]
37 container_args.extend(
38 build_arguments_from_parsed_result(
39 args, filter_args=["model_zoo", "cluster_spec", "worker_image"]
40 )
41 )
42
43 _submit_job(image_name, args, container_args)
44 # TODO: print dashboard url after launching the master pod
45
46
47 def evaluate(args):
48 image_name = build_and_push_docker_image(
49 model_zoo=args.model_zoo,
50 base_image=args.image_base,
51 docker_image_repository=args.docker_image_repository,
52 extra_pypi=args.extra_pypi_index,
53 cluster_spec=args.cluster_spec,
54 docker_base_url=args.docker_base_url,
55 docker_tlscert=args.docker_tlscert,
56 docker_tlskey=args.docker_tlskey,
57 )
58 container_args = [
59 "-m",
60 "elasticdl.python.master.main",
61 "--worker_image",
62 image_name,
63 "--model_zoo",
64 _model_zoo_in_docker(args.model_zoo),
65 "--cluster_spec",
66 _cluster_spec_def_in_docker(args.cluster_spec),
67 ]
68 container_args.extend(
69 build_arguments_from_parsed_result(
70 args, filter_args=["model_zoo", "cluster_spec", "worker_image"]
71 )
72 )
73
74 _submit_job(image_name, args, container_args)
75
76
77 def predict(args):
78 image_name = build_and_push_docker_image(
79 model_zoo=args.model_zoo,
80 base_image=args.image_base,
81 docker_image_repository=args.docker_image_repository,
82 extra_pypi=args.extra_pypi_index,
83 cluster_spec=args.cluster_spec,
84 docker_base_url=args.docker_base_url,
85 docker_tlscert=args.docker_tlscert,
86 docker_tlskey=args.docker_tlskey,
87 )
88 container_args = [
89 "-m",
90 "elasticdl.python.master.main",
91 "--worker_image",
92 image_name,
93 "--model_zoo",
94 _model_zoo_in_docker(args.model_zoo),
95 "--cluster_spec",
96 _cluster_spec_def_in_docker(args.cluster_spec),
97 ]
98 container_args.extend(
99 build_arguments_from_parsed_result(
100 args, filter_args=["model_zoo", "cluster_spec", "worker_image"]
101 )
102 )
103
104 _submit_job(image_name, args, container_args)
105
106
107 def clean(args):
108 if args.docker_image_repository and args.all:
109 raise ValueError(
110 "--docker_image_repository and --all cannot "
111 "be specified at the same time"
112 )
113 if not (args.docker_image_repository or args.all):
114 raise ValueError(
115 "Either --docker_image_repository or --all "
116 "needs to be configured"
117 )
118 remove_images(
119 docker_image_repository=args.docker_image_repository,
120 docker_base_url=args.docker_base_url,
121 docker_tlscert=args.docker_tlscert,
122 docker_tlskey=args.docker_tlskey,
123 )
124
125
126 def _submit_job(image_name, client_args, container_args):
127 client = k8s.Client(
128 image_name=image_name,
129 namespace=client_args.namespace,
130 job_name=client_args.job_name,
131 event_callback=None,
132 cluster_spec=client_args.cluster_spec,
133 )
134
135 client.create_master(
136 resource_requests=client_args.master_resource_request,
137 resource_limits=client_args.master_resource_limit,
138 args=container_args,
139 pod_priority=client_args.master_pod_priority,
140 image_pull_policy=client_args.image_pull_policy,
141 restart_policy=client_args.restart_policy,
142 volume=client_args.volume,
143 envs=parse_envs(client_args.envs),
144 )
145 logger.info(
146 "ElasticDL job %s was successfully submitted. The master pod is: %s."
147 % (client_args.job_name, client.get_master_pod_name())
148 )
149
150
151 def _model_zoo_in_docker(model_zoo):
152 MODEL_ROOT_PATH = "/model_zoo"
153 return os.path.join(MODEL_ROOT_PATH, os.path.basename(model_zoo))
154
155
156 def _cluster_spec_def_in_docker(cluster_spec):
157 CLUSTER_SPEC_ROOT_PATH = "/cluster_spec"
158 return (
159 os.path.join(CLUSTER_SPEC_ROOT_PATH, os.path.basename(cluster_spec))
160 if cluster_spec
161 else ""
162 )
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticdl/python/elasticdl/api.py b/elasticdl/python/elasticdl/api.py
--- a/elasticdl/python/elasticdl/api.py
+++ b/elasticdl/python/elasticdl/api.py
@@ -13,8 +13,10 @@
def train(args):
+ model_zoo = os.path.normpath(args.model_zoo)
+
image_name = build_and_push_docker_image(
- model_zoo=args.model_zoo,
+ model_zoo=model_zoo,
base_image=args.image_base,
docker_image_repository=args.docker_image_repository,
extra_pypi=args.extra_pypi_index,
@@ -30,7 +32,7 @@
"--worker_image",
image_name,
"--model_zoo",
- _model_zoo_in_docker(args.model_zoo),
+ _model_zoo_in_docker(model_zoo),
"--cluster_spec",
_cluster_spec_def_in_docker(args.cluster_spec),
]
@@ -45,8 +47,10 @@
def evaluate(args):
+ model_zoo = os.path.normpath(args.model_zoo)
+
image_name = build_and_push_docker_image(
- model_zoo=args.model_zoo,
+ model_zoo=model_zoo,
base_image=args.image_base,
docker_image_repository=args.docker_image_repository,
extra_pypi=args.extra_pypi_index,
@@ -61,7 +65,7 @@
"--worker_image",
image_name,
"--model_zoo",
- _model_zoo_in_docker(args.model_zoo),
+ _model_zoo_in_docker(model_zoo),
"--cluster_spec",
_cluster_spec_def_in_docker(args.cluster_spec),
]
@@ -75,8 +79,10 @@
def predict(args):
+ model_zoo = os.path.normpath(args.model_zoo)
+
image_name = build_and_push_docker_image(
- model_zoo=args.model_zoo,
+ model_zoo=model_zoo,
base_image=args.image_base,
docker_image_repository=args.docker_image_repository,
extra_pypi=args.extra_pypi_index,
@@ -91,7 +97,7 @@
"--worker_image",
image_name,
"--model_zoo",
- _model_zoo_in_docker(args.model_zoo),
+ _model_zoo_in_docker(model_zoo),
"--cluster_spec",
_cluster_spec_def_in_docker(args.cluster_spec),
]
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/api.py b/elasticdl/python/elasticdl/api.py\n--- a/elasticdl/python/elasticdl/api.py\n+++ b/elasticdl/python/elasticdl/api.py\n@@ -13,8 +13,10 @@\n \n \n def train(args):\n+ model_zoo = os.path.normpath(args.model_zoo)\n+\n image_name = build_and_push_docker_image(\n- model_zoo=args.model_zoo,\n+ model_zoo=model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n@@ -30,7 +32,7 @@\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n- _model_zoo_in_docker(args.model_zoo),\n+ _model_zoo_in_docker(model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n@@ -45,8 +47,10 @@\n \n \n def evaluate(args):\n+ model_zoo = os.path.normpath(args.model_zoo)\n+\n image_name = build_and_push_docker_image(\n- model_zoo=args.model_zoo,\n+ model_zoo=model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n@@ -61,7 +65,7 @@\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n- _model_zoo_in_docker(args.model_zoo),\n+ _model_zoo_in_docker(model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n@@ -75,8 +79,10 @@\n \n \n def predict(args):\n+ model_zoo = os.path.normpath(args.model_zoo)\n+\n image_name = build_and_push_docker_image(\n- model_zoo=args.model_zoo,\n+ model_zoo=model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n@@ -91,7 +97,7 @@\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n- _model_zoo_in_docker(args.model_zoo),\n+ _model_zoo_in_docker(model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n", "issue": "Parameter model_zoo cannot take a path end with `/`\nIf the parameter `model_zoo` is a path end with slash, the following exception will show up:\r\n```\r\n[2019-10-10 13:15:47,574] [INFO] [image_builder.py:227:_print_docker_progress]\r\n\r\nTraceback (most recent call last):\r\n File \"/miniconda2/envs/elasticdl-rc0/bin/elasticdl\", line 10, in <module>\r\n sys.exit(main())\r\n File \"/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/client.py\", line 46, in main\r\n args.func(args)\r\n File \"/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/api.py\", line 24, in train\r\n docker_tlskey=args.docker_tlskey,\r\n File \"/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py\", line 74, in build_and_push_docker_image\r\n _build_docker_image(client, ctx_dir, df.name, image_name)\r\n File \"/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py\", line 241, in _build_docker_image\r\n _print_docker_progress(line)\r\n File \"/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py\", line 224, in _print_docker_progress\r\n raise RuntimeError(\"Docker image build: \" + error)\r\nRuntimeError: Docker image build: COPY failed: stat /var/lib/docker/tmp/docker-builder531748767/model_zoo: no such file or directory\r\n```\n", "before_files": [{"content": "import os\n\nfrom elasticdl.python.common import k8s_client as k8s\nfrom elasticdl.python.common.args import (\n build_arguments_from_parsed_result,\n parse_envs,\n)\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.elasticdl.image_builder import (\n build_and_push_docker_image,\n remove_images,\n)\n\n\ndef train(args):\n image_name = build_and_push_docker_image(\n model_zoo=args.model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n _model_zoo_in_docker(args.model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n container_args.extend(\n build_arguments_from_parsed_result(\n args, filter_args=[\"model_zoo\", \"cluster_spec\", \"worker_image\"]\n )\n )\n\n _submit_job(image_name, args, container_args)\n # TODO: print dashboard url after launching the master pod\n\n\ndef evaluate(args):\n image_name = build_and_push_docker_image(\n model_zoo=args.model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n _model_zoo_in_docker(args.model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n container_args.extend(\n build_arguments_from_parsed_result(\n args, filter_args=[\"model_zoo\", \"cluster_spec\", \"worker_image\"]\n )\n )\n\n _submit_job(image_name, args, container_args)\n\n\ndef predict(args):\n image_name = build_and_push_docker_image(\n model_zoo=args.model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n _model_zoo_in_docker(args.model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n container_args.extend(\n build_arguments_from_parsed_result(\n args, filter_args=[\"model_zoo\", \"cluster_spec\", \"worker_image\"]\n )\n )\n\n _submit_job(image_name, args, container_args)\n\n\ndef clean(args):\n if args.docker_image_repository and args.all:\n raise ValueError(\n \"--docker_image_repository and --all cannot \"\n \"be specified at the same time\"\n )\n if not (args.docker_image_repository or args.all):\n raise ValueError(\n \"Either --docker_image_repository or --all \"\n \"needs to be configured\"\n )\n remove_images(\n docker_image_repository=args.docker_image_repository,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n\n\ndef _submit_job(image_name, client_args, container_args):\n client = k8s.Client(\n image_name=image_name,\n namespace=client_args.namespace,\n job_name=client_args.job_name,\n event_callback=None,\n cluster_spec=client_args.cluster_spec,\n )\n\n client.create_master(\n resource_requests=client_args.master_resource_request,\n resource_limits=client_args.master_resource_limit,\n args=container_args,\n pod_priority=client_args.master_pod_priority,\n image_pull_policy=client_args.image_pull_policy,\n restart_policy=client_args.restart_policy,\n volume=client_args.volume,\n envs=parse_envs(client_args.envs),\n )\n logger.info(\n \"ElasticDL job %s was successfully submitted. The master pod is: %s.\"\n % (client_args.job_name, client.get_master_pod_name())\n )\n\n\ndef _model_zoo_in_docker(model_zoo):\n MODEL_ROOT_PATH = \"/model_zoo\"\n return os.path.join(MODEL_ROOT_PATH, os.path.basename(model_zoo))\n\n\ndef _cluster_spec_def_in_docker(cluster_spec):\n CLUSTER_SPEC_ROOT_PATH = \"/cluster_spec\"\n return (\n os.path.join(CLUSTER_SPEC_ROOT_PATH, os.path.basename(cluster_spec))\n if cluster_spec\n else \"\"\n )\n", "path": "elasticdl/python/elasticdl/api.py"}], "after_files": [{"content": "import os\n\nfrom elasticdl.python.common import k8s_client as k8s\nfrom elasticdl.python.common.args import (\n build_arguments_from_parsed_result,\n parse_envs,\n)\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.elasticdl.image_builder import (\n build_and_push_docker_image,\n remove_images,\n)\n\n\ndef train(args):\n model_zoo = os.path.normpath(args.model_zoo)\n\n image_name = build_and_push_docker_image(\n model_zoo=model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n _model_zoo_in_docker(model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n container_args.extend(\n build_arguments_from_parsed_result(\n args, filter_args=[\"model_zoo\", \"cluster_spec\", \"worker_image\"]\n )\n )\n\n _submit_job(image_name, args, container_args)\n # TODO: print dashboard url after launching the master pod\n\n\ndef evaluate(args):\n model_zoo = os.path.normpath(args.model_zoo)\n\n image_name = build_and_push_docker_image(\n model_zoo=model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n _model_zoo_in_docker(model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n container_args.extend(\n build_arguments_from_parsed_result(\n args, filter_args=[\"model_zoo\", \"cluster_spec\", \"worker_image\"]\n )\n )\n\n _submit_job(image_name, args, container_args)\n\n\ndef predict(args):\n model_zoo = os.path.normpath(args.model_zoo)\n\n image_name = build_and_push_docker_image(\n model_zoo=model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n _model_zoo_in_docker(model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n container_args.extend(\n build_arguments_from_parsed_result(\n args, filter_args=[\"model_zoo\", \"cluster_spec\", \"worker_image\"]\n )\n )\n\n _submit_job(image_name, args, container_args)\n\n\ndef clean(args):\n if args.docker_image_repository and args.all:\n raise ValueError(\n \"--docker_image_repository and --all cannot \"\n \"be specified at the same time\"\n )\n if not (args.docker_image_repository or args.all):\n raise ValueError(\n \"Either --docker_image_repository or --all \"\n \"needs to be configured\"\n )\n remove_images(\n docker_image_repository=args.docker_image_repository,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n\n\ndef _submit_job(image_name, client_args, container_args):\n client = k8s.Client(\n image_name=image_name,\n namespace=client_args.namespace,\n job_name=client_args.job_name,\n event_callback=None,\n cluster_spec=client_args.cluster_spec,\n )\n\n client.create_master(\n resource_requests=client_args.master_resource_request,\n resource_limits=client_args.master_resource_limit,\n args=container_args,\n pod_priority=client_args.master_pod_priority,\n image_pull_policy=client_args.image_pull_policy,\n restart_policy=client_args.restart_policy,\n volume=client_args.volume,\n envs=parse_envs(client_args.envs),\n )\n logger.info(\n \"ElasticDL job %s was successfully submitted. The master pod is: %s.\"\n % (client_args.job_name, client.get_master_pod_name())\n )\n\n\ndef _model_zoo_in_docker(model_zoo):\n MODEL_ROOT_PATH = \"/model_zoo\"\n return os.path.join(MODEL_ROOT_PATH, os.path.basename(model_zoo))\n\n\ndef _cluster_spec_def_in_docker(cluster_spec):\n CLUSTER_SPEC_ROOT_PATH = \"/cluster_spec\"\n return (\n os.path.join(CLUSTER_SPEC_ROOT_PATH, os.path.basename(cluster_spec))\n if cluster_spec\n else \"\"\n )\n", "path": "elasticdl/python/elasticdl/api.py"}]} | 2,166 | 537 |
gh_patches_debug_11115 | rasdani/github-patches | git_diff | optuna__optuna-2686 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Waiting for Heartbeat
<!-- Please write a clear and concise description of what the bug is. -->
## Expected behavior
After `study.optimize` with a high heartbeat interval (e.g. 60s) finishes running trials, it should exit almost immediately.
## Environment
- Optuna version: 2.7.0
- Python version: 3.9
- OS: Fedora 33
- (Optional) Other libraries and their versions: PostgreSQL
## Observed Behavior
Instead of exiting immediately, the main thread tries to join with the heartbeat thread, which only completes after the next scheduled hearbeat. This can lead to high delays where the program does basically nothing.
## Steps to reproduce
1. Create a RDB storage with a heartbeat interval of 60s.
2. Create a fast objective, a study and call `study.optimize(n_trials=1)`.
3. Measure runtime.
## Additional context (optional)
It might be better to use the [`wait(timeout=...)`](https://docs.python.org/3/library/threading.html#threading.Event.wait) method [here](https://github.com/optuna/optuna/blob/2fd68d0102a22e76b5a4a142cc87722d92dcf667/optuna/_optimize.py#L340).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/_optimize.py`
Content:
```
1 from concurrent.futures import FIRST_COMPLETED
2 from concurrent.futures import Future
3 from concurrent.futures import ThreadPoolExecutor
4 from concurrent.futures import wait
5 import copy
6 import datetime
7 import gc
8 import itertools
9 import math
10 import os
11 import sys
12 from threading import Event
13 from threading import Thread
14 import time
15 from typing import Any
16 from typing import Callable
17 from typing import cast
18 from typing import List
19 from typing import Optional
20 from typing import Sequence
21 from typing import Set
22 from typing import Tuple
23 from typing import Type
24 from typing import Union
25 import warnings
26
27 import optuna
28 from optuna import exceptions
29 from optuna import logging
30 from optuna import progress_bar as pbar_module
31 from optuna import storages
32 from optuna import trial as trial_module
33 from optuna.trial import FrozenTrial
34 from optuna.trial import TrialState
35
36
37 _logger = logging.get_logger(__name__)
38
39
40 def _optimize(
41 study: "optuna.Study",
42 func: "optuna.study.ObjectiveFuncType",
43 n_trials: Optional[int] = None,
44 timeout: Optional[float] = None,
45 n_jobs: int = 1,
46 catch: Tuple[Type[Exception], ...] = (),
47 callbacks: Optional[List[Callable[["optuna.Study", FrozenTrial], None]]] = None,
48 gc_after_trial: bool = False,
49 show_progress_bar: bool = False,
50 ) -> None:
51 if not isinstance(catch, tuple):
52 raise TypeError(
53 "The catch argument is of type '{}' but must be a tuple.".format(type(catch).__name__)
54 )
55
56 if not study._optimize_lock.acquire(False):
57 raise RuntimeError("Nested invocation of `Study.optimize` method isn't allowed.")
58
59 # TODO(crcrpar): Make progress bar work when n_jobs != 1.
60 progress_bar = pbar_module._ProgressBar(show_progress_bar and n_jobs == 1, n_trials, timeout)
61
62 study._stop_flag = False
63
64 try:
65 if n_jobs == 1:
66 _optimize_sequential(
67 study,
68 func,
69 n_trials,
70 timeout,
71 catch,
72 callbacks,
73 gc_after_trial,
74 reseed_sampler_rng=False,
75 time_start=None,
76 progress_bar=progress_bar,
77 )
78 else:
79 if show_progress_bar:
80 warnings.warn("Progress bar only supports serial execution (`n_jobs=1`).")
81
82 if n_jobs == -1:
83 n_jobs = os.cpu_count() or 1
84
85 time_start = datetime.datetime.now()
86 futures: Set[Future] = set()
87
88 with ThreadPoolExecutor(max_workers=n_jobs) as executor:
89 for n_submitted_trials in itertools.count():
90 if study._stop_flag:
91 break
92
93 if (
94 timeout is not None
95 and (datetime.datetime.now() - time_start).total_seconds() > timeout
96 ):
97 break
98
99 if n_trials is not None and n_submitted_trials >= n_trials:
100 break
101
102 if len(futures) >= n_jobs:
103 completed, futures = wait(futures, return_when=FIRST_COMPLETED)
104 # Raise if exception occurred in executing the completed futures.
105 for f in completed:
106 f.result()
107
108 futures.add(
109 executor.submit(
110 _optimize_sequential,
111 study,
112 func,
113 1,
114 timeout,
115 catch,
116 callbacks,
117 gc_after_trial,
118 True,
119 time_start,
120 None,
121 )
122 )
123 finally:
124 study._optimize_lock.release()
125 progress_bar.close()
126
127
128 def _optimize_sequential(
129 study: "optuna.Study",
130 func: "optuna.study.ObjectiveFuncType",
131 n_trials: Optional[int],
132 timeout: Optional[float],
133 catch: Tuple[Type[Exception], ...],
134 callbacks: Optional[List[Callable[["optuna.Study", FrozenTrial], None]]],
135 gc_after_trial: bool,
136 reseed_sampler_rng: bool,
137 time_start: Optional[datetime.datetime],
138 progress_bar: Optional[pbar_module._ProgressBar],
139 ) -> None:
140 if reseed_sampler_rng:
141 study.sampler.reseed_rng()
142
143 i_trial = 0
144
145 if time_start is None:
146 time_start = datetime.datetime.now()
147
148 while True:
149 if study._stop_flag:
150 break
151
152 if n_trials is not None:
153 if i_trial >= n_trials:
154 break
155 i_trial += 1
156
157 if timeout is not None:
158 elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds()
159 if elapsed_seconds >= timeout:
160 break
161
162 try:
163 trial = _run_trial(study, func, catch)
164 except Exception:
165 raise
166 finally:
167 # The following line mitigates memory problems that can be occurred in some
168 # environments (e.g., services that use computing containers such as CircleCI).
169 # Please refer to the following PR for further details:
170 # https://github.com/optuna/optuna/pull/325.
171 if gc_after_trial:
172 gc.collect()
173
174 if callbacks is not None:
175 frozen_trial = copy.deepcopy(study._storage.get_trial(trial._trial_id))
176 for callback in callbacks:
177 callback(study, frozen_trial)
178
179 if progress_bar is not None:
180 progress_bar.update((datetime.datetime.now() - time_start).total_seconds())
181
182 study._storage.remove_session()
183
184
185 def _run_trial(
186 study: "optuna.Study",
187 func: "optuna.study.ObjectiveFuncType",
188 catch: Tuple[Type[Exception], ...],
189 ) -> trial_module.Trial:
190 if study._storage.is_heartbeat_enabled():
191 failed_trial_ids = study._storage.fail_stale_trials(study._study_id)
192 failed_trial_callback = study._storage.get_failed_trial_callback()
193 if failed_trial_callback is not None:
194 for trial_id in failed_trial_ids:
195 failed_trial = copy.deepcopy(study._storage.get_trial(trial_id))
196 failed_trial_callback(study, failed_trial)
197
198 trial = study.ask()
199
200 state: Optional[TrialState] = None
201 values: Optional[List[float]] = None
202 func_err: Optional[Exception] = None
203 func_err_fail_exc_info: Optional[Any] = None
204 # Set to a string if `func` returns correctly but the return value violates assumptions.
205 values_conversion_failure_message: Optional[str] = None
206 stop_event: Optional[Event] = None
207 thread: Optional[Thread] = None
208
209 if study._storage.is_heartbeat_enabled():
210 stop_event = Event()
211 thread = Thread(
212 target=_record_heartbeat, args=(trial._trial_id, study._storage, stop_event)
213 )
214 thread.start()
215
216 try:
217 value_or_values = func(trial)
218 except exceptions.TrialPruned as e:
219 # TODO(mamu): Handle multi-objective cases.
220 state = TrialState.PRUNED
221 func_err = e
222 except Exception as e:
223 state = TrialState.FAIL
224 func_err = e
225 func_err_fail_exc_info = sys.exc_info()
226 else:
227 # TODO(hvy): Avoid checking the values both here and inside `Study.tell`.
228 values, values_conversion_failure_message = _check_and_convert_to_values(
229 len(study.directions), value_or_values, trial.number
230 )
231 if values_conversion_failure_message is not None:
232 state = TrialState.FAIL
233 else:
234 state = TrialState.COMPLETE
235
236 if study._storage.is_heartbeat_enabled():
237 assert stop_event is not None
238 assert thread is not None
239 stop_event.set()
240 thread.join()
241
242 # `Study.tell` may raise during trial post-processing.
243 try:
244 study.tell(trial, values=values, state=state)
245 except Exception:
246 raise
247 finally:
248 if state == TrialState.COMPLETE:
249 study._log_completed_trial(trial, cast(List[float], values))
250 elif state == TrialState.PRUNED:
251 _logger.info("Trial {} pruned. {}".format(trial.number, str(func_err)))
252 elif state == TrialState.FAIL:
253 if func_err is not None:
254 _logger.warning(
255 "Trial {} failed because of the following error: {}".format(
256 trial.number, repr(func_err)
257 ),
258 exc_info=func_err_fail_exc_info,
259 )
260 elif values_conversion_failure_message is not None:
261 _logger.warning(values_conversion_failure_message)
262 else:
263 assert False, "Should not reach."
264 else:
265 assert False, "Should not reach."
266
267 if state == TrialState.FAIL and func_err is not None and not isinstance(func_err, catch):
268 raise func_err
269 return trial
270
271
272 def _check_and_convert_to_values(
273 n_objectives: int, original_value: Union[float, Sequence[float]], trial_number: int
274 ) -> Tuple[Optional[List[float]], Optional[str]]:
275 if isinstance(original_value, Sequence):
276 if n_objectives != len(original_value):
277 return (
278 None,
279 (
280 f"Trial {trial_number} failed, because the number of the values "
281 f"{len(original_value)} did not match the number of the objectives "
282 f"{n_objectives}."
283 ),
284 )
285 else:
286 _original_values = list(original_value)
287 else:
288 _original_values = [original_value]
289
290 _checked_values = []
291 for v in _original_values:
292 checked_v, failure_message = _check_single_value(v, trial_number)
293 if failure_message is not None:
294 # TODO(Imamura): Construct error message taking into account all values and do not
295 # early return
296 # `value` is assumed to be ignored on failure so we can set it to any value.
297 return None, failure_message
298 elif isinstance(checked_v, float):
299 _checked_values.append(checked_v)
300 else:
301 assert False
302
303 return _checked_values, None
304
305
306 def _check_single_value(
307 original_value: float, trial_number: int
308 ) -> Tuple[Optional[float], Optional[str]]:
309 value = None
310 failure_message = None
311
312 try:
313 value = float(original_value)
314 except (
315 ValueError,
316 TypeError,
317 ):
318 failure_message = (
319 f"Trial {trial_number} failed, because the value {repr(original_value)} could not be "
320 "cast to float."
321 )
322
323 if value is not None and math.isnan(value):
324 value = None
325 failure_message = (
326 f"Trial {trial_number} failed, because the objective function returned "
327 f"{original_value}."
328 )
329
330 return value, failure_message
331
332
333 def _record_heartbeat(trial_id: int, storage: storages.BaseStorage, stop_event: Event) -> None:
334 heartbeat_interval = storage.get_heartbeat_interval()
335 assert heartbeat_interval is not None
336 while True:
337 storage.record_heartbeat(trial_id)
338 if stop_event.is_set():
339 return
340 time.sleep(heartbeat_interval)
341
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optuna/_optimize.py b/optuna/_optimize.py
--- a/optuna/_optimize.py
+++ b/optuna/_optimize.py
@@ -11,7 +11,6 @@
import sys
from threading import Event
from threading import Thread
-import time
from typing import Any
from typing import Callable
from typing import cast
@@ -335,6 +334,5 @@
assert heartbeat_interval is not None
while True:
storage.record_heartbeat(trial_id)
- if stop_event.is_set():
+ if stop_event.wait(timeout=heartbeat_interval):
return
- time.sleep(heartbeat_interval)
| {"golden_diff": "diff --git a/optuna/_optimize.py b/optuna/_optimize.py\n--- a/optuna/_optimize.py\n+++ b/optuna/_optimize.py\n@@ -11,7 +11,6 @@\n import sys\n from threading import Event\n from threading import Thread\n-import time\n from typing import Any\n from typing import Callable\n from typing import cast\n@@ -335,6 +334,5 @@\n assert heartbeat_interval is not None\n while True:\n storage.record_heartbeat(trial_id)\n- if stop_event.is_set():\n+ if stop_event.wait(timeout=heartbeat_interval):\n return\n- time.sleep(heartbeat_interval)\n", "issue": "Waiting for Heartbeat\n<!-- Please write a clear and concise description of what the bug is. -->\r\n\r\n## Expected behavior\r\n\r\nAfter `study.optimize` with a high heartbeat interval (e.g. 60s) finishes running trials, it should exit almost immediately. \r\n\r\n## Environment\r\n\r\n- Optuna version: 2.7.0\r\n- Python version: 3.9\r\n- OS: Fedora 33\r\n- (Optional) Other libraries and their versions: PostgreSQL\r\n\r\n## Observed Behavior\r\nInstead of exiting immediately, the main thread tries to join with the heartbeat thread, which only completes after the next scheduled hearbeat. This can lead to high delays where the program does basically nothing.\r\n\r\n## Steps to reproduce\r\n\r\n1. Create a RDB storage with a heartbeat interval of 60s.\r\n2. Create a fast objective, a study and call `study.optimize(n_trials=1)`.\r\n3. Measure runtime.\r\n\r\n## Additional context (optional)\r\nIt might be better to use the [`wait(timeout=...)`](https://docs.python.org/3/library/threading.html#threading.Event.wait) method [here](https://github.com/optuna/optuna/blob/2fd68d0102a22e76b5a4a142cc87722d92dcf667/optuna/_optimize.py#L340).\r\n\r\n\n", "before_files": [{"content": "from concurrent.futures import FIRST_COMPLETED\nfrom concurrent.futures import Future\nfrom concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures import wait\nimport copy\nimport datetime\nimport gc\nimport itertools\nimport math\nimport os\nimport sys\nfrom threading import Event\nfrom threading import Thread\nimport time\nfrom typing import Any\nfrom typing import Callable\nfrom typing import cast\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Set\nfrom typing import Tuple\nfrom typing import Type\nfrom typing import Union\nimport warnings\n\nimport optuna\nfrom optuna import exceptions\nfrom optuna import logging\nfrom optuna import progress_bar as pbar_module\nfrom optuna import storages\nfrom optuna import trial as trial_module\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\n_logger = logging.get_logger(__name__)\n\n\ndef _optimize(\n study: \"optuna.Study\",\n func: \"optuna.study.ObjectiveFuncType\",\n n_trials: Optional[int] = None,\n timeout: Optional[float] = None,\n n_jobs: int = 1,\n catch: Tuple[Type[Exception], ...] = (),\n callbacks: Optional[List[Callable[[\"optuna.Study\", FrozenTrial], None]]] = None,\n gc_after_trial: bool = False,\n show_progress_bar: bool = False,\n) -> None:\n if not isinstance(catch, tuple):\n raise TypeError(\n \"The catch argument is of type '{}' but must be a tuple.\".format(type(catch).__name__)\n )\n\n if not study._optimize_lock.acquire(False):\n raise RuntimeError(\"Nested invocation of `Study.optimize` method isn't allowed.\")\n\n # TODO(crcrpar): Make progress bar work when n_jobs != 1.\n progress_bar = pbar_module._ProgressBar(show_progress_bar and n_jobs == 1, n_trials, timeout)\n\n study._stop_flag = False\n\n try:\n if n_jobs == 1:\n _optimize_sequential(\n study,\n func,\n n_trials,\n timeout,\n catch,\n callbacks,\n gc_after_trial,\n reseed_sampler_rng=False,\n time_start=None,\n progress_bar=progress_bar,\n )\n else:\n if show_progress_bar:\n warnings.warn(\"Progress bar only supports serial execution (`n_jobs=1`).\")\n\n if n_jobs == -1:\n n_jobs = os.cpu_count() or 1\n\n time_start = datetime.datetime.now()\n futures: Set[Future] = set()\n\n with ThreadPoolExecutor(max_workers=n_jobs) as executor:\n for n_submitted_trials in itertools.count():\n if study._stop_flag:\n break\n\n if (\n timeout is not None\n and (datetime.datetime.now() - time_start).total_seconds() > timeout\n ):\n break\n\n if n_trials is not None and n_submitted_trials >= n_trials:\n break\n\n if len(futures) >= n_jobs:\n completed, futures = wait(futures, return_when=FIRST_COMPLETED)\n # Raise if exception occurred in executing the completed futures.\n for f in completed:\n f.result()\n\n futures.add(\n executor.submit(\n _optimize_sequential,\n study,\n func,\n 1,\n timeout,\n catch,\n callbacks,\n gc_after_trial,\n True,\n time_start,\n None,\n )\n )\n finally:\n study._optimize_lock.release()\n progress_bar.close()\n\n\ndef _optimize_sequential(\n study: \"optuna.Study\",\n func: \"optuna.study.ObjectiveFuncType\",\n n_trials: Optional[int],\n timeout: Optional[float],\n catch: Tuple[Type[Exception], ...],\n callbacks: Optional[List[Callable[[\"optuna.Study\", FrozenTrial], None]]],\n gc_after_trial: bool,\n reseed_sampler_rng: bool,\n time_start: Optional[datetime.datetime],\n progress_bar: Optional[pbar_module._ProgressBar],\n) -> None:\n if reseed_sampler_rng:\n study.sampler.reseed_rng()\n\n i_trial = 0\n\n if time_start is None:\n time_start = datetime.datetime.now()\n\n while True:\n if study._stop_flag:\n break\n\n if n_trials is not None:\n if i_trial >= n_trials:\n break\n i_trial += 1\n\n if timeout is not None:\n elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds()\n if elapsed_seconds >= timeout:\n break\n\n try:\n trial = _run_trial(study, func, catch)\n except Exception:\n raise\n finally:\n # The following line mitigates memory problems that can be occurred in some\n # environments (e.g., services that use computing containers such as CircleCI).\n # Please refer to the following PR for further details:\n # https://github.com/optuna/optuna/pull/325.\n if gc_after_trial:\n gc.collect()\n\n if callbacks is not None:\n frozen_trial = copy.deepcopy(study._storage.get_trial(trial._trial_id))\n for callback in callbacks:\n callback(study, frozen_trial)\n\n if progress_bar is not None:\n progress_bar.update((datetime.datetime.now() - time_start).total_seconds())\n\n study._storage.remove_session()\n\n\ndef _run_trial(\n study: \"optuna.Study\",\n func: \"optuna.study.ObjectiveFuncType\",\n catch: Tuple[Type[Exception], ...],\n) -> trial_module.Trial:\n if study._storage.is_heartbeat_enabled():\n failed_trial_ids = study._storage.fail_stale_trials(study._study_id)\n failed_trial_callback = study._storage.get_failed_trial_callback()\n if failed_trial_callback is not None:\n for trial_id in failed_trial_ids:\n failed_trial = copy.deepcopy(study._storage.get_trial(trial_id))\n failed_trial_callback(study, failed_trial)\n\n trial = study.ask()\n\n state: Optional[TrialState] = None\n values: Optional[List[float]] = None\n func_err: Optional[Exception] = None\n func_err_fail_exc_info: Optional[Any] = None\n # Set to a string if `func` returns correctly but the return value violates assumptions.\n values_conversion_failure_message: Optional[str] = None\n stop_event: Optional[Event] = None\n thread: Optional[Thread] = None\n\n if study._storage.is_heartbeat_enabled():\n stop_event = Event()\n thread = Thread(\n target=_record_heartbeat, args=(trial._trial_id, study._storage, stop_event)\n )\n thread.start()\n\n try:\n value_or_values = func(trial)\n except exceptions.TrialPruned as e:\n # TODO(mamu): Handle multi-objective cases.\n state = TrialState.PRUNED\n func_err = e\n except Exception as e:\n state = TrialState.FAIL\n func_err = e\n func_err_fail_exc_info = sys.exc_info()\n else:\n # TODO(hvy): Avoid checking the values both here and inside `Study.tell`.\n values, values_conversion_failure_message = _check_and_convert_to_values(\n len(study.directions), value_or_values, trial.number\n )\n if values_conversion_failure_message is not None:\n state = TrialState.FAIL\n else:\n state = TrialState.COMPLETE\n\n if study._storage.is_heartbeat_enabled():\n assert stop_event is not None\n assert thread is not None\n stop_event.set()\n thread.join()\n\n # `Study.tell` may raise during trial post-processing.\n try:\n study.tell(trial, values=values, state=state)\n except Exception:\n raise\n finally:\n if state == TrialState.COMPLETE:\n study._log_completed_trial(trial, cast(List[float], values))\n elif state == TrialState.PRUNED:\n _logger.info(\"Trial {} pruned. {}\".format(trial.number, str(func_err)))\n elif state == TrialState.FAIL:\n if func_err is not None:\n _logger.warning(\n \"Trial {} failed because of the following error: {}\".format(\n trial.number, repr(func_err)\n ),\n exc_info=func_err_fail_exc_info,\n )\n elif values_conversion_failure_message is not None:\n _logger.warning(values_conversion_failure_message)\n else:\n assert False, \"Should not reach.\"\n else:\n assert False, \"Should not reach.\"\n\n if state == TrialState.FAIL and func_err is not None and not isinstance(func_err, catch):\n raise func_err\n return trial\n\n\ndef _check_and_convert_to_values(\n n_objectives: int, original_value: Union[float, Sequence[float]], trial_number: int\n) -> Tuple[Optional[List[float]], Optional[str]]:\n if isinstance(original_value, Sequence):\n if n_objectives != len(original_value):\n return (\n None,\n (\n f\"Trial {trial_number} failed, because the number of the values \"\n f\"{len(original_value)} did not match the number of the objectives \"\n f\"{n_objectives}.\"\n ),\n )\n else:\n _original_values = list(original_value)\n else:\n _original_values = [original_value]\n\n _checked_values = []\n for v in _original_values:\n checked_v, failure_message = _check_single_value(v, trial_number)\n if failure_message is not None:\n # TODO(Imamura): Construct error message taking into account all values and do not\n # early return\n # `value` is assumed to be ignored on failure so we can set it to any value.\n return None, failure_message\n elif isinstance(checked_v, float):\n _checked_values.append(checked_v)\n else:\n assert False\n\n return _checked_values, None\n\n\ndef _check_single_value(\n original_value: float, trial_number: int\n) -> Tuple[Optional[float], Optional[str]]:\n value = None\n failure_message = None\n\n try:\n value = float(original_value)\n except (\n ValueError,\n TypeError,\n ):\n failure_message = (\n f\"Trial {trial_number} failed, because the value {repr(original_value)} could not be \"\n \"cast to float.\"\n )\n\n if value is not None and math.isnan(value):\n value = None\n failure_message = (\n f\"Trial {trial_number} failed, because the objective function returned \"\n f\"{original_value}.\"\n )\n\n return value, failure_message\n\n\ndef _record_heartbeat(trial_id: int, storage: storages.BaseStorage, stop_event: Event) -> None:\n heartbeat_interval = storage.get_heartbeat_interval()\n assert heartbeat_interval is not None\n while True:\n storage.record_heartbeat(trial_id)\n if stop_event.is_set():\n return\n time.sleep(heartbeat_interval)\n", "path": "optuna/_optimize.py"}], "after_files": [{"content": "from concurrent.futures import FIRST_COMPLETED\nfrom concurrent.futures import Future\nfrom concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures import wait\nimport copy\nimport datetime\nimport gc\nimport itertools\nimport math\nimport os\nimport sys\nfrom threading import Event\nfrom threading import Thread\nfrom typing import Any\nfrom typing import Callable\nfrom typing import cast\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Set\nfrom typing import Tuple\nfrom typing import Type\nfrom typing import Union\nimport warnings\n\nimport optuna\nfrom optuna import exceptions\nfrom optuna import logging\nfrom optuna import progress_bar as pbar_module\nfrom optuna import storages\nfrom optuna import trial as trial_module\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\n_logger = logging.get_logger(__name__)\n\n\ndef _optimize(\n study: \"optuna.Study\",\n func: \"optuna.study.ObjectiveFuncType\",\n n_trials: Optional[int] = None,\n timeout: Optional[float] = None,\n n_jobs: int = 1,\n catch: Tuple[Type[Exception], ...] = (),\n callbacks: Optional[List[Callable[[\"optuna.Study\", FrozenTrial], None]]] = None,\n gc_after_trial: bool = False,\n show_progress_bar: bool = False,\n) -> None:\n if not isinstance(catch, tuple):\n raise TypeError(\n \"The catch argument is of type '{}' but must be a tuple.\".format(type(catch).__name__)\n )\n\n if not study._optimize_lock.acquire(False):\n raise RuntimeError(\"Nested invocation of `Study.optimize` method isn't allowed.\")\n\n # TODO(crcrpar): Make progress bar work when n_jobs != 1.\n progress_bar = pbar_module._ProgressBar(show_progress_bar and n_jobs == 1, n_trials, timeout)\n\n study._stop_flag = False\n\n try:\n if n_jobs == 1:\n _optimize_sequential(\n study,\n func,\n n_trials,\n timeout,\n catch,\n callbacks,\n gc_after_trial,\n reseed_sampler_rng=False,\n time_start=None,\n progress_bar=progress_bar,\n )\n else:\n if show_progress_bar:\n warnings.warn(\"Progress bar only supports serial execution (`n_jobs=1`).\")\n\n if n_jobs == -1:\n n_jobs = os.cpu_count() or 1\n\n time_start = datetime.datetime.now()\n futures: Set[Future] = set()\n\n with ThreadPoolExecutor(max_workers=n_jobs) as executor:\n for n_submitted_trials in itertools.count():\n if study._stop_flag:\n break\n\n if (\n timeout is not None\n and (datetime.datetime.now() - time_start).total_seconds() > timeout\n ):\n break\n\n if n_trials is not None and n_submitted_trials >= n_trials:\n break\n\n if len(futures) >= n_jobs:\n completed, futures = wait(futures, return_when=FIRST_COMPLETED)\n # Raise if exception occurred in executing the completed futures.\n for f in completed:\n f.result()\n\n futures.add(\n executor.submit(\n _optimize_sequential,\n study,\n func,\n 1,\n timeout,\n catch,\n callbacks,\n gc_after_trial,\n True,\n time_start,\n None,\n )\n )\n finally:\n study._optimize_lock.release()\n progress_bar.close()\n\n\ndef _optimize_sequential(\n study: \"optuna.Study\",\n func: \"optuna.study.ObjectiveFuncType\",\n n_trials: Optional[int],\n timeout: Optional[float],\n catch: Tuple[Type[Exception], ...],\n callbacks: Optional[List[Callable[[\"optuna.Study\", FrozenTrial], None]]],\n gc_after_trial: bool,\n reseed_sampler_rng: bool,\n time_start: Optional[datetime.datetime],\n progress_bar: Optional[pbar_module._ProgressBar],\n) -> None:\n if reseed_sampler_rng:\n study.sampler.reseed_rng()\n\n i_trial = 0\n\n if time_start is None:\n time_start = datetime.datetime.now()\n\n while True:\n if study._stop_flag:\n break\n\n if n_trials is not None:\n if i_trial >= n_trials:\n break\n i_trial += 1\n\n if timeout is not None:\n elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds()\n if elapsed_seconds >= timeout:\n break\n\n try:\n trial = _run_trial(study, func, catch)\n except Exception:\n raise\n finally:\n # The following line mitigates memory problems that can be occurred in some\n # environments (e.g., services that use computing containers such as CircleCI).\n # Please refer to the following PR for further details:\n # https://github.com/optuna/optuna/pull/325.\n if gc_after_trial:\n gc.collect()\n\n if callbacks is not None:\n frozen_trial = copy.deepcopy(study._storage.get_trial(trial._trial_id))\n for callback in callbacks:\n callback(study, frozen_trial)\n\n if progress_bar is not None:\n progress_bar.update((datetime.datetime.now() - time_start).total_seconds())\n\n study._storage.remove_session()\n\n\ndef _run_trial(\n study: \"optuna.Study\",\n func: \"optuna.study.ObjectiveFuncType\",\n catch: Tuple[Type[Exception], ...],\n) -> trial_module.Trial:\n if study._storage.is_heartbeat_enabled():\n failed_trial_ids = study._storage.fail_stale_trials(study._study_id)\n failed_trial_callback = study._storage.get_failed_trial_callback()\n if failed_trial_callback is not None:\n for trial_id in failed_trial_ids:\n failed_trial = copy.deepcopy(study._storage.get_trial(trial_id))\n failed_trial_callback(study, failed_trial)\n\n trial = study.ask()\n\n state: Optional[TrialState] = None\n values: Optional[List[float]] = None\n func_err: Optional[Exception] = None\n func_err_fail_exc_info: Optional[Any] = None\n # Set to a string if `func` returns correctly but the return value violates assumptions.\n values_conversion_failure_message: Optional[str] = None\n stop_event: Optional[Event] = None\n thread: Optional[Thread] = None\n\n if study._storage.is_heartbeat_enabled():\n stop_event = Event()\n thread = Thread(\n target=_record_heartbeat, args=(trial._trial_id, study._storage, stop_event)\n )\n thread.start()\n\n try:\n value_or_values = func(trial)\n except exceptions.TrialPruned as e:\n # TODO(mamu): Handle multi-objective cases.\n state = TrialState.PRUNED\n func_err = e\n except Exception as e:\n state = TrialState.FAIL\n func_err = e\n func_err_fail_exc_info = sys.exc_info()\n else:\n # TODO(hvy): Avoid checking the values both here and inside `Study.tell`.\n values, values_conversion_failure_message = _check_and_convert_to_values(\n len(study.directions), value_or_values, trial.number\n )\n if values_conversion_failure_message is not None:\n state = TrialState.FAIL\n else:\n state = TrialState.COMPLETE\n\n if study._storage.is_heartbeat_enabled():\n assert stop_event is not None\n assert thread is not None\n stop_event.set()\n thread.join()\n\n # `Study.tell` may raise during trial post-processing.\n try:\n study.tell(trial, values=values, state=state)\n except Exception:\n raise\n finally:\n if state == TrialState.COMPLETE:\n study._log_completed_trial(trial, cast(List[float], values))\n elif state == TrialState.PRUNED:\n _logger.info(\"Trial {} pruned. {}\".format(trial.number, str(func_err)))\n elif state == TrialState.FAIL:\n if func_err is not None:\n _logger.warning(\n \"Trial {} failed because of the following error: {}\".format(\n trial.number, repr(func_err)\n ),\n exc_info=func_err_fail_exc_info,\n )\n elif values_conversion_failure_message is not None:\n _logger.warning(values_conversion_failure_message)\n else:\n assert False, \"Should not reach.\"\n else:\n assert False, \"Should not reach.\"\n\n if state == TrialState.FAIL and func_err is not None and not isinstance(func_err, catch):\n raise func_err\n return trial\n\n\ndef _check_and_convert_to_values(\n n_objectives: int, original_value: Union[float, Sequence[float]], trial_number: int\n) -> Tuple[Optional[List[float]], Optional[str]]:\n if isinstance(original_value, Sequence):\n if n_objectives != len(original_value):\n return (\n None,\n (\n f\"Trial {trial_number} failed, because the number of the values \"\n f\"{len(original_value)} did not match the number of the objectives \"\n f\"{n_objectives}.\"\n ),\n )\n else:\n _original_values = list(original_value)\n else:\n _original_values = [original_value]\n\n _checked_values = []\n for v in _original_values:\n checked_v, failure_message = _check_single_value(v, trial_number)\n if failure_message is not None:\n # TODO(Imamura): Construct error message taking into account all values and do not\n # early return\n # `value` is assumed to be ignored on failure so we can set it to any value.\n return None, failure_message\n elif isinstance(checked_v, float):\n _checked_values.append(checked_v)\n else:\n assert False\n\n return _checked_values, None\n\n\ndef _check_single_value(\n original_value: float, trial_number: int\n) -> Tuple[Optional[float], Optional[str]]:\n value = None\n failure_message = None\n\n try:\n value = float(original_value)\n except (\n ValueError,\n TypeError,\n ):\n failure_message = (\n f\"Trial {trial_number} failed, because the value {repr(original_value)} could not be \"\n \"cast to float.\"\n )\n\n if value is not None and math.isnan(value):\n value = None\n failure_message = (\n f\"Trial {trial_number} failed, because the objective function returned \"\n f\"{original_value}.\"\n )\n\n return value, failure_message\n\n\ndef _record_heartbeat(trial_id: int, storage: storages.BaseStorage, stop_event: Event) -> None:\n heartbeat_interval = storage.get_heartbeat_interval()\n assert heartbeat_interval is not None\n while True:\n storage.record_heartbeat(trial_id)\n if stop_event.wait(timeout=heartbeat_interval):\n return\n", "path": "optuna/_optimize.py"}]} | 3,809 | 140 |
gh_patches_debug_31575 | rasdani/github-patches | git_diff | python-discord__bot-475 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tags can't be edited due to using the POST method with the API
Currently, the `!tag edit` subcommand is just an alias of `!tag set`. This means that if we try to edit an existing tag, the bot will use the POST http method to communicate with the API. Since we're not posting a new tag, but editing an existing entry, the API will reject this request.
Instead of using POST, we should be using PATCH, since we're only partially updating the entry in the database.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/tags.py`
Content:
```
1 import logging
2 import time
3
4 from discord import Colour, Embed
5 from discord.ext.commands import Bot, Cog, Context, group
6
7 from bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles
8 from bot.converters import TagContentConverter, TagNameConverter
9 from bot.decorators import with_role
10 from bot.pagination import LinePaginator
11
12
13 log = logging.getLogger(__name__)
14
15 TEST_CHANNELS = (
16 Channels.devtest,
17 Channels.bot,
18 Channels.helpers
19 )
20
21
22 class Tags(Cog):
23 """Save new tags and fetch existing tags."""
24
25 def __init__(self, bot: Bot):
26 self.bot = bot
27 self.tag_cooldowns = {}
28
29 @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)
30 async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
31 """Show all known tags, a single tag, or run a subcommand."""
32 await ctx.invoke(self.get_command, tag_name=tag_name)
33
34 @tags_group.command(name='get', aliases=('show', 'g'))
35 async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
36 """Get a specified tag, or a list of all tags if no tag is specified."""
37 def _command_on_cooldown(tag_name: str) -> bool:
38 """
39 Check if the command is currently on cooldown, on a per-tag, per-channel basis.
40
41 The cooldown duration is set in constants.py.
42 """
43 now = time.time()
44
45 cooldown_conditions = (
46 tag_name
47 and tag_name in self.tag_cooldowns
48 and (now - self.tag_cooldowns[tag_name]["time"]) < Cooldowns.tags
49 and self.tag_cooldowns[tag_name]["channel"] == ctx.channel.id
50 )
51
52 if cooldown_conditions:
53 return True
54 return False
55
56 if _command_on_cooldown(tag_name):
57 time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name]["time"])
58 log.warning(f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
59 f"Cooldown ends in {time_left:.1f} seconds.")
60 return
61
62 if tag_name is not None:
63 tag = await self.bot.api_client.get(f'bot/tags/{tag_name}')
64 if ctx.channel.id not in TEST_CHANNELS:
65 self.tag_cooldowns[tag_name] = {
66 "time": time.time(),
67 "channel": ctx.channel.id
68 }
69 await ctx.send(embed=Embed.from_dict(tag['embed']))
70
71 else:
72 tags = await self.bot.api_client.get('bot/tags')
73 if not tags:
74 await ctx.send(embed=Embed(
75 description="**There are no tags in the database!**",
76 colour=Colour.red()
77 ))
78 else:
79 embed: Embed = Embed(title="**Current tags**")
80 await LinePaginator.paginate(
81 sorted(f"**»** {tag['title']}" for tag in tags),
82 ctx,
83 embed,
84 footer_text="To show a tag, type !tags <tagname>.",
85 empty=False,
86 max_lines=15
87 )
88
89 @tags_group.command(name='set', aliases=('add', 'edit', 's'))
90 @with_role(*MODERATION_ROLES)
91 async def set_command(
92 self,
93 ctx: Context,
94 tag_name: TagNameConverter,
95 *,
96 tag_content: TagContentConverter,
97 ) -> None:
98 """Create a new tag or update an existing one."""
99 body = {
100 'title': tag_name.lower().strip(),
101 'embed': {
102 'title': tag_name,
103 'description': tag_content
104 }
105 }
106
107 await self.bot.api_client.post('bot/tags', json=body)
108
109 log.debug(f"{ctx.author} successfully added the following tag to our database: \n"
110 f"tag_name: {tag_name}\n"
111 f"tag_content: '{tag_content}'\n")
112
113 await ctx.send(embed=Embed(
114 title="Tag successfully added",
115 description=f"**{tag_name}** added to tag database.",
116 colour=Colour.blurple()
117 ))
118
119 @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))
120 @with_role(Roles.admin, Roles.owner)
121 async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:
122 """Remove a tag from the database."""
123 await self.bot.api_client.delete(f'bot/tags/{tag_name}')
124
125 log.debug(f"{ctx.author} successfully deleted the tag called '{tag_name}'")
126 await ctx.send(embed=Embed(
127 title=tag_name,
128 description=f"Tag successfully removed: {tag_name}.",
129 colour=Colour.blurple()
130 ))
131
132
133 def setup(bot: Bot) -> None:
134 """Tags cog load."""
135 bot.add_cog(Tags(bot))
136 log.info("Cog loaded: Tags")
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py
--- a/bot/cogs/tags.py
+++ b/bot/cogs/tags.py
@@ -86,7 +86,7 @@
max_lines=15
)
- @tags_group.command(name='set', aliases=('add', 'edit', 's'))
+ @tags_group.command(name='set', aliases=('add', 's'))
@with_role(*MODERATION_ROLES)
async def set_command(
self,
@@ -95,7 +95,7 @@
*,
tag_content: TagContentConverter,
) -> None:
- """Create a new tag or update an existing one."""
+ """Create a new tag."""
body = {
'title': tag_name.lower().strip(),
'embed': {
@@ -116,6 +116,35 @@
colour=Colour.blurple()
))
+ @tags_group.command(name='edit', aliases=('e', ))
+ @with_role(*MODERATION_ROLES)
+ async def edit_command(
+ self,
+ ctx: Context,
+ tag_name: TagNameConverter,
+ *,
+ tag_content: TagContentConverter,
+ ) -> None:
+ """Edit an existing tag."""
+ body = {
+ 'embed': {
+ 'title': tag_name,
+ 'description': tag_content
+ }
+ }
+
+ await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)
+
+ log.debug(f"{ctx.author} successfully edited the following tag in our database: \n"
+ f"tag_name: {tag_name}\n"
+ f"tag_content: '{tag_content}'\n")
+
+ await ctx.send(embed=Embed(
+ title="Tag successfully edited",
+ description=f"**{tag_name}** edited in the database.",
+ colour=Colour.blurple()
+ ))
+
@tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))
@with_role(Roles.admin, Roles.owner)
async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:
| {"golden_diff": "diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py\n--- a/bot/cogs/tags.py\n+++ b/bot/cogs/tags.py\n@@ -86,7 +86,7 @@\n max_lines=15\n )\n \n- @tags_group.command(name='set', aliases=('add', 'edit', 's'))\n+ @tags_group.command(name='set', aliases=('add', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n@@ -95,7 +95,7 @@\n *,\n tag_content: TagContentConverter,\n ) -> None:\n- \"\"\"Create a new tag or update an existing one.\"\"\"\n+ \"\"\"Create a new tag.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n@@ -116,6 +116,35 @@\n colour=Colour.blurple()\n ))\n \n+ @tags_group.command(name='edit', aliases=('e', ))\n+ @with_role(*MODERATION_ROLES)\n+ async def edit_command(\n+ self,\n+ ctx: Context,\n+ tag_name: TagNameConverter,\n+ *,\n+ tag_content: TagContentConverter,\n+ ) -> None:\n+ \"\"\"Edit an existing tag.\"\"\"\n+ body = {\n+ 'embed': {\n+ 'title': tag_name,\n+ 'description': tag_content\n+ }\n+ }\n+\n+ await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)\n+\n+ log.debug(f\"{ctx.author} successfully edited the following tag in our database: \\n\"\n+ f\"tag_name: {tag_name}\\n\"\n+ f\"tag_content: '{tag_content}'\\n\")\n+\n+ await ctx.send(embed=Embed(\n+ title=\"Tag successfully edited\",\n+ description=f\"**{tag_name}** edited in the database.\",\n+ colour=Colour.blurple()\n+ ))\n+\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admin, Roles.owner)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n", "issue": "Tags can't be edited due to using the POST method with the API\nCurrently, the `!tag edit` subcommand is just an alias of `!tag set`. This means that if we try to edit an existing tag, the bot will use the POST http method to communicate with the API. Since we're not posting a new tag, but editing an existing entry, the API will reject this request. \r\n\r\nInstead of using POST, we should be using PATCH, since we're only partially updating the entry in the database.\n", "before_files": [{"content": "import logging\nimport time\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Bot, Cog, Context, group\n\nfrom bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles\nfrom bot.converters import TagContentConverter, TagNameConverter\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\n\nlog = logging.getLogger(__name__)\n\nTEST_CHANNELS = (\n Channels.devtest,\n Channels.bot,\n Channels.helpers\n)\n\n\nclass Tags(Cog):\n \"\"\"Save new tags and fetch existing tags.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.tag_cooldowns = {}\n\n @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)\n async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Show all known tags, a single tag, or run a subcommand.\"\"\"\n await ctx.invoke(self.get_command, tag_name=tag_name)\n\n @tags_group.command(name='get', aliases=('show', 'g'))\n async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Get a specified tag, or a list of all tags if no tag is specified.\"\"\"\n def _command_on_cooldown(tag_name: str) -> bool:\n \"\"\"\n Check if the command is currently on cooldown, on a per-tag, per-channel basis.\n\n The cooldown duration is set in constants.py.\n \"\"\"\n now = time.time()\n\n cooldown_conditions = (\n tag_name\n and tag_name in self.tag_cooldowns\n and (now - self.tag_cooldowns[tag_name][\"time\"]) < Cooldowns.tags\n and self.tag_cooldowns[tag_name][\"channel\"] == ctx.channel.id\n )\n\n if cooldown_conditions:\n return True\n return False\n\n if _command_on_cooldown(tag_name):\n time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name][\"time\"])\n log.warning(f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n f\"Cooldown ends in {time_left:.1f} seconds.\")\n return\n\n if tag_name is not None:\n tag = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n if ctx.channel.id not in TEST_CHANNELS:\n self.tag_cooldowns[tag_name] = {\n \"time\": time.time(),\n \"channel\": ctx.channel.id\n }\n await ctx.send(embed=Embed.from_dict(tag['embed']))\n\n else:\n tags = await self.bot.api_client.get('bot/tags')\n if not tags:\n await ctx.send(embed=Embed(\n description=\"**There are no tags in the database!**\",\n colour=Colour.red()\n ))\n else:\n embed: Embed = Embed(title=\"**Current tags**\")\n await LinePaginator.paginate(\n sorted(f\"**\u00bb** {tag['title']}\" for tag in tags),\n ctx,\n embed,\n footer_text=\"To show a tag, type !tags <tagname>.\",\n empty=False,\n max_lines=15\n )\n\n @tags_group.command(name='set', aliases=('add', 'edit', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Create a new tag or update an existing one.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.post('bot/tags', json=body)\n\n log.debug(f\"{ctx.author} successfully added the following tag to our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully added\",\n description=f\"**{tag_name}** added to tag database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admin, Roles.owner)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n \"\"\"Remove a tag from the database.\"\"\"\n await self.bot.api_client.delete(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully deleted the tag called '{tag_name}'\")\n await ctx.send(embed=Embed(\n title=tag_name,\n description=f\"Tag successfully removed: {tag_name}.\",\n colour=Colour.blurple()\n ))\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Tags cog load.\"\"\"\n bot.add_cog(Tags(bot))\n log.info(\"Cog loaded: Tags\")\n", "path": "bot/cogs/tags.py"}], "after_files": [{"content": "import logging\nimport time\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Bot, Cog, Context, group\n\nfrom bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles\nfrom bot.converters import TagContentConverter, TagNameConverter\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\n\nlog = logging.getLogger(__name__)\n\nTEST_CHANNELS = (\n Channels.devtest,\n Channels.bot,\n Channels.helpers\n)\n\n\nclass Tags(Cog):\n \"\"\"Save new tags and fetch existing tags.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.tag_cooldowns = {}\n\n @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)\n async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Show all known tags, a single tag, or run a subcommand.\"\"\"\n await ctx.invoke(self.get_command, tag_name=tag_name)\n\n @tags_group.command(name='get', aliases=('show', 'g'))\n async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Get a specified tag, or a list of all tags if no tag is specified.\"\"\"\n def _command_on_cooldown(tag_name: str) -> bool:\n \"\"\"\n Check if the command is currently on cooldown, on a per-tag, per-channel basis.\n\n The cooldown duration is set in constants.py.\n \"\"\"\n now = time.time()\n\n cooldown_conditions = (\n tag_name\n and tag_name in self.tag_cooldowns\n and (now - self.tag_cooldowns[tag_name][\"time\"]) < Cooldowns.tags\n and self.tag_cooldowns[tag_name][\"channel\"] == ctx.channel.id\n )\n\n if cooldown_conditions:\n return True\n return False\n\n if _command_on_cooldown(tag_name):\n time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name][\"time\"])\n log.warning(f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n f\"Cooldown ends in {time_left:.1f} seconds.\")\n return\n\n if tag_name is not None:\n tag = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n if ctx.channel.id not in TEST_CHANNELS:\n self.tag_cooldowns[tag_name] = {\n \"time\": time.time(),\n \"channel\": ctx.channel.id\n }\n await ctx.send(embed=Embed.from_dict(tag['embed']))\n\n else:\n tags = await self.bot.api_client.get('bot/tags')\n if not tags:\n await ctx.send(embed=Embed(\n description=\"**There are no tags in the database!**\",\n colour=Colour.red()\n ))\n else:\n embed: Embed = Embed(title=\"**Current tags**\")\n await LinePaginator.paginate(\n sorted(f\"**\u00bb** {tag['title']}\" for tag in tags),\n ctx,\n embed,\n footer_text=\"To show a tag, type !tags <tagname>.\",\n empty=False,\n max_lines=15\n )\n\n @tags_group.command(name='set', aliases=('add', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Create a new tag.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.post('bot/tags', json=body)\n\n log.debug(f\"{ctx.author} successfully added the following tag to our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully added\",\n description=f\"**{tag_name}** added to tag database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='edit', aliases=('e', ))\n @with_role(*MODERATION_ROLES)\n async def edit_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Edit an existing tag.\"\"\"\n body = {\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)\n\n log.debug(f\"{ctx.author} successfully edited the following tag in our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully edited\",\n description=f\"**{tag_name}** edited in the database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admin, Roles.owner)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n \"\"\"Remove a tag from the database.\"\"\"\n await self.bot.api_client.delete(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully deleted the tag called '{tag_name}'\")\n await ctx.send(embed=Embed(\n title=tag_name,\n description=f\"Tag successfully removed: {tag_name}.\",\n colour=Colour.blurple()\n ))\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Tags cog load.\"\"\"\n bot.add_cog(Tags(bot))\n log.info(\"Cog loaded: Tags\")\n", "path": "bot/cogs/tags.py"}]} | 1,750 | 486 |
gh_patches_debug_9387 | rasdani/github-patches | git_diff | flairNLP__flair-1679 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
is corpus object reusable across ModelTrainer instances ?
I have three checkpoint files generated from a training run that uses PooledFlair embedding. Say chk10.pt, chk20.pt, chk30.pt.
I finalize using the following code in a for loop to get the F1 predictions out:
trainer: ModelTrainer = ModelTrainer.load_checkpoint(chkfile, corpus)
trainer.train('.', checkpoint = False, train_with_dev=True, max_epochs=epochs)
I set the epochs to the value at which this checkpoint got generated. So 10, 20, 30 etc. So typically it goes straight to creating the final model and emitting the predictions.
This works perfectly fine for the first time in the loop, after which the predictions are quite wrong. Now instead of doing it in the loop, if i simply do just once by restarting the process i get the values i expect. This behavior happens only with PooledFlairEmbedding. Same program runs just fine with ElmoEmbedding, BertEmbedding.
So my question is why is this the case ? Is it because i create the corpus object outside the for loop and keep reusing it across different ModelTrainer instances ?
It happens quite regularly for me. If needed i can make a small program and share.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flair/nn.py`
Content:
```
1 import warnings
2 from pathlib import Path
3
4 import torch.nn
5
6 from abc import abstractmethod
7
8 from typing import Union, List
9
10 from torch.utils.data.dataset import Dataset
11
12 import flair
13 from flair import file_utils
14 from flair.data import DataPoint, Sentence
15 from flair.datasets import DataLoader
16 from flair.training_utils import Result
17
18
19 class Model(torch.nn.Module):
20 """Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.
21 Every new type of model must implement these methods."""
22
23 @abstractmethod
24 def forward_loss(
25 self, data_points: Union[List[DataPoint], DataPoint]
26 ) -> torch.tensor:
27 """Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training."""
28 pass
29
30 @abstractmethod
31 def evaluate(
32 self,
33 sentences: Union[List[DataPoint], Dataset],
34 out_path: Path = None,
35 embedding_storage_mode: str = "none",
36 ) -> (Result, float):
37 """Evaluates the model. Returns a Result object containing evaluation
38 results and a loss value. Implement this to enable evaluation.
39 :param data_loader: DataLoader that iterates over dataset to be evaluated
40 :param out_path: Optional output path to store predictions
41 :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and
42 freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU
43 :return: Returns a Tuple consisting of a Result object and a loss float value
44 """
45 pass
46
47 @abstractmethod
48 def _get_state_dict(self):
49 """Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()
50 functionality."""
51 pass
52
53 @staticmethod
54 @abstractmethod
55 def _init_model_with_state_dict(state):
56 """Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()
57 functionality."""
58 pass
59
60 @staticmethod
61 @abstractmethod
62 def _fetch_model(model_name) -> str:
63 return model_name
64
65 def save(self, model_file: Union[str, Path]):
66 """
67 Saves the current model to the provided file.
68 :param model_file: the model file
69 """
70 model_state = self._get_state_dict()
71
72 torch.save(model_state, str(model_file), pickle_protocol=4)
73
74 @classmethod
75 def load(cls, model: Union[str, Path]):
76 """
77 Loads the model from the given file.
78 :param model: the model file
79 :return: the loaded text classifier model
80 """
81 model_file = cls._fetch_model(str(model))
82
83 with warnings.catch_warnings():
84 warnings.filterwarnings("ignore")
85 # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
86 # see https://github.com/zalandoresearch/flair/issues/351
87 f = file_utils.load_big_file(str(model_file))
88 state = torch.load(f, map_location=flair.device)
89
90 model = cls._init_model_with_state_dict(state)
91
92 model.eval()
93 model.to(flair.device)
94
95 return model
96
97
98 class LockedDropout(torch.nn.Module):
99 """
100 Implementation of locked (or variational) dropout. Randomly drops out entire parameters in embedding space.
101 """
102
103 def __init__(self, dropout_rate=0.5, batch_first=True, inplace=False):
104 super(LockedDropout, self).__init__()
105 self.dropout_rate = dropout_rate
106 self.batch_first = batch_first
107 self.inplace = inplace
108
109 def forward(self, x):
110 if not self.training or not self.dropout_rate:
111 return x
112
113 if not self.batch_first:
114 m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout_rate)
115 else:
116 m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout_rate)
117
118 mask = torch.autograd.Variable(m, requires_grad=False) / (1 - self.dropout_rate)
119 mask = mask.expand_as(x)
120 return mask * x
121
122 def extra_repr(self):
123 inplace_str = ", inplace" if self.inplace else ""
124 return "p={}{}".format(self.dropout_rate, inplace_str)
125
126
127 class WordDropout(torch.nn.Module):
128 """
129 Implementation of word dropout. Randomly drops out entire words (or characters) in embedding space.
130 """
131
132 def __init__(self, dropout_rate=0.05, inplace=False):
133 super(WordDropout, self).__init__()
134 self.dropout_rate = dropout_rate
135 self.inplace = inplace
136
137 def forward(self, x):
138 if not self.training or not self.dropout_rate:
139 return x
140
141 m = x.data.new(x.size(0), x.size(1), 1).bernoulli_(1 - self.dropout_rate)
142
143 mask = torch.autograd.Variable(m, requires_grad=False)
144 return mask * x
145
146 def extra_repr(self):
147 inplace_str = ", inplace" if self.inplace else ""
148 return "p={}{}".format(self.dropout_rate, inplace_str)
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flair/nn.py b/flair/nn.py
--- a/flair/nn.py
+++ b/flair/nn.py
@@ -85,7 +85,7 @@
# load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
# see https://github.com/zalandoresearch/flair/issues/351
f = file_utils.load_big_file(str(model_file))
- state = torch.load(f, map_location=flair.device)
+ state = torch.load(f, map_location='cpu')
model = cls._init_model_with_state_dict(state)
| {"golden_diff": "diff --git a/flair/nn.py b/flair/nn.py\n--- a/flair/nn.py\n+++ b/flair/nn.py\n@@ -85,7 +85,7 @@\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = file_utils.load_big_file(str(model_file))\n- state = torch.load(f, map_location=flair.device)\n+ state = torch.load(f, map_location='cpu')\n \n model = cls._init_model_with_state_dict(state)\n", "issue": "is corpus object reusable across ModelTrainer instances ?\nI have three checkpoint files generated from a training run that uses PooledFlair embedding. Say chk10.pt, chk20.pt, chk30.pt.\r\n\r\nI finalize using the following code in a for loop to get the F1 predictions out:\r\n\r\ntrainer: ModelTrainer = ModelTrainer.load_checkpoint(chkfile, corpus)\r\ntrainer.train('.', checkpoint = False, train_with_dev=True, max_epochs=epochs)\r\n\r\nI set the epochs to the value at which this checkpoint got generated. So 10, 20, 30 etc. So typically it goes straight to creating the final model and emitting the predictions.\r\n\r\nThis works perfectly fine for the first time in the loop, after which the predictions are quite wrong. Now instead of doing it in the loop, if i simply do just once by restarting the process i get the values i expect. This behavior happens only with PooledFlairEmbedding. Same program runs just fine with ElmoEmbedding, BertEmbedding.\r\n\r\nSo my question is why is this the case ? Is it because i create the corpus object outside the for loop and keep reusing it across different ModelTrainer instances ? \r\n\r\nIt happens quite regularly for me. If needed i can make a small program and share.\r\n\n", "before_files": [{"content": "import warnings\nfrom pathlib import Path\n\nimport torch.nn\n\nfrom abc import abstractmethod\n\nfrom typing import Union, List\n\nfrom torch.utils.data.dataset import Dataset\n\nimport flair\nfrom flair import file_utils\nfrom flair.data import DataPoint, Sentence\nfrom flair.datasets import DataLoader\nfrom flair.training_utils import Result\n\n\nclass Model(torch.nn.Module):\n \"\"\"Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.\n Every new type of model must implement these methods.\"\"\"\n\n @abstractmethod\n def forward_loss(\n self, data_points: Union[List[DataPoint], DataPoint]\n ) -> torch.tensor:\n \"\"\"Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training.\"\"\"\n pass\n\n @abstractmethod\n def evaluate(\n self,\n sentences: Union[List[DataPoint], Dataset],\n out_path: Path = None,\n embedding_storage_mode: str = \"none\",\n ) -> (Result, float):\n \"\"\"Evaluates the model. Returns a Result object containing evaluation\n results and a loss value. Implement this to enable evaluation.\n :param data_loader: DataLoader that iterates over dataset to be evaluated\n :param out_path: Optional output path to store predictions\n :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and\n freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU\n :return: Returns a Tuple consisting of a Result object and a loss float value\n \"\"\"\n pass\n\n @abstractmethod\n def _get_state_dict(self):\n \"\"\"Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()\n functionality.\"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def _init_model_with_state_dict(state):\n \"\"\"Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()\n functionality.\"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def _fetch_model(model_name) -> str:\n return model_name\n\n def save(self, model_file: Union[str, Path]):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = self._get_state_dict()\n\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n @classmethod\n def load(cls, model: Union[str, Path]):\n \"\"\"\n Loads the model from the given file.\n :param model: the model file\n :return: the loaded text classifier model\n \"\"\"\n model_file = cls._fetch_model(str(model))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = file_utils.load_big_file(str(model_file))\n state = torch.load(f, map_location=flair.device)\n\n model = cls._init_model_with_state_dict(state)\n\n model.eval()\n model.to(flair.device)\n\n return model\n\n\nclass LockedDropout(torch.nn.Module):\n \"\"\"\n Implementation of locked (or variational) dropout. Randomly drops out entire parameters in embedding space.\n \"\"\"\n\n def __init__(self, dropout_rate=0.5, batch_first=True, inplace=False):\n super(LockedDropout, self).__init__()\n self.dropout_rate = dropout_rate\n self.batch_first = batch_first\n self.inplace = inplace\n\n def forward(self, x):\n if not self.training or not self.dropout_rate:\n return x\n\n if not self.batch_first:\n m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout_rate)\n else:\n m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout_rate)\n\n mask = torch.autograd.Variable(m, requires_grad=False) / (1 - self.dropout_rate)\n mask = mask.expand_as(x)\n return mask * x\n\n def extra_repr(self):\n inplace_str = \", inplace\" if self.inplace else \"\"\n return \"p={}{}\".format(self.dropout_rate, inplace_str)\n\n\nclass WordDropout(torch.nn.Module):\n \"\"\"\n Implementation of word dropout. Randomly drops out entire words (or characters) in embedding space.\n \"\"\"\n\n def __init__(self, dropout_rate=0.05, inplace=False):\n super(WordDropout, self).__init__()\n self.dropout_rate = dropout_rate\n self.inplace = inplace\n\n def forward(self, x):\n if not self.training or not self.dropout_rate:\n return x\n\n m = x.data.new(x.size(0), x.size(1), 1).bernoulli_(1 - self.dropout_rate)\n\n mask = torch.autograd.Variable(m, requires_grad=False)\n return mask * x\n\n def extra_repr(self):\n inplace_str = \", inplace\" if self.inplace else \"\"\n return \"p={}{}\".format(self.dropout_rate, inplace_str)\n", "path": "flair/nn.py"}], "after_files": [{"content": "import warnings\nfrom pathlib import Path\n\nimport torch.nn\n\nfrom abc import abstractmethod\n\nfrom typing import Union, List\n\nfrom torch.utils.data.dataset import Dataset\n\nimport flair\nfrom flair import file_utils\nfrom flair.data import DataPoint, Sentence\nfrom flair.datasets import DataLoader\nfrom flair.training_utils import Result\n\n\nclass Model(torch.nn.Module):\n \"\"\"Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.\n Every new type of model must implement these methods.\"\"\"\n\n @abstractmethod\n def forward_loss(\n self, data_points: Union[List[DataPoint], DataPoint]\n ) -> torch.tensor:\n \"\"\"Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training.\"\"\"\n pass\n\n @abstractmethod\n def evaluate(\n self,\n sentences: Union[List[DataPoint], Dataset],\n out_path: Path = None,\n embedding_storage_mode: str = \"none\",\n ) -> (Result, float):\n \"\"\"Evaluates the model. Returns a Result object containing evaluation\n results and a loss value. Implement this to enable evaluation.\n :param data_loader: DataLoader that iterates over dataset to be evaluated\n :param out_path: Optional output path to store predictions\n :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and\n freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU\n :return: Returns a Tuple consisting of a Result object and a loss float value\n \"\"\"\n pass\n\n @abstractmethod\n def _get_state_dict(self):\n \"\"\"Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()\n functionality.\"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def _init_model_with_state_dict(state):\n \"\"\"Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()\n functionality.\"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def _fetch_model(model_name) -> str:\n return model_name\n\n def save(self, model_file: Union[str, Path]):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = self._get_state_dict()\n\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n @classmethod\n def load(cls, model: Union[str, Path]):\n \"\"\"\n Loads the model from the given file.\n :param model: the model file\n :return: the loaded text classifier model\n \"\"\"\n model_file = cls._fetch_model(str(model))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = file_utils.load_big_file(str(model_file))\n state = torch.load(f, map_location='cpu')\n\n model = cls._init_model_with_state_dict(state)\n\n model.eval()\n model.to(flair.device)\n\n return model\n\n\nclass LockedDropout(torch.nn.Module):\n \"\"\"\n Implementation of locked (or variational) dropout. Randomly drops out entire parameters in embedding space.\n \"\"\"\n\n def __init__(self, dropout_rate=0.5, batch_first=True, inplace=False):\n super(LockedDropout, self).__init__()\n self.dropout_rate = dropout_rate\n self.batch_first = batch_first\n self.inplace = inplace\n\n def forward(self, x):\n if not self.training or not self.dropout_rate:\n return x\n\n if not self.batch_first:\n m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout_rate)\n else:\n m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout_rate)\n\n mask = torch.autograd.Variable(m, requires_grad=False) / (1 - self.dropout_rate)\n mask = mask.expand_as(x)\n return mask * x\n\n def extra_repr(self):\n inplace_str = \", inplace\" if self.inplace else \"\"\n return \"p={}{}\".format(self.dropout_rate, inplace_str)\n\n\nclass WordDropout(torch.nn.Module):\n \"\"\"\n Implementation of word dropout. Randomly drops out entire words (or characters) in embedding space.\n \"\"\"\n\n def __init__(self, dropout_rate=0.05, inplace=False):\n super(WordDropout, self).__init__()\n self.dropout_rate = dropout_rate\n self.inplace = inplace\n\n def forward(self, x):\n if not self.training or not self.dropout_rate:\n return x\n\n m = x.data.new(x.size(0), x.size(1), 1).bernoulli_(1 - self.dropout_rate)\n\n mask = torch.autograd.Variable(m, requires_grad=False)\n return mask * x\n\n def extra_repr(self):\n inplace_str = \", inplace\" if self.inplace else \"\"\n return \"p={}{}\".format(self.dropout_rate, inplace_str)\n", "path": "flair/nn.py"}]} | 2,011 | 144 |
gh_patches_debug_23571 | rasdani/github-patches | git_diff | napari__napari-5565 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
viewer axes point in basically random directions 😂
## 🐛 Bug
I was trying to explain anisotropic image resolution to someone the other day and this napari bug severely undermined my otherwise excellent demonstration. 😂 TLDR: the axes 0, 1, 2 displayed in the viewer by default are not the same as the axes 0, 1, 2 on the input array (and by extension the positions of the `layer.scale` parameter).
## To Reproduce
```python
import numpy as np
import napari
volume = np.random.random((64, 64, 64))
viewer, layer = napari.imshow(volume, scale=(1, 2, 4), ndisplay=3)
viewer.axes.visible = True
napari.run()
```
<img width="948" alt="Screen Shot 2023-02-07 at 9 05 39 pm" src="https://user-images.githubusercontent.com/492549/217215889-c6aca123-f529-4a82-9893-4d585ac69256.png">
## Expected behavior
The code produces a 3D oblong/rectangular prism with sides of length 64, 128, and 256 in world-space. Given the scale setting above, one would expect axis 0 to point in the direction of the side of length 64, 1 for 128, and 2 for 256. Instead, you see 0 pointing along the side of length 128, 1 along the side of length 256, and 2 along the side of length 64. (**Edited:** [fixed typo](https://github.com/napari/napari/issues/5536#issuecomment-1420555774), 0->2)
## Environment
```
napari: 0.4.17rc4.dev169+g57fa998e7.d20221219
Platform: macOS-12.5.1-arm64-arm-64bit
System: MacOS 12.5.1
Python: 3.10.5 | packaged by conda-forge | (main, Jun 14 2022, 07:07:06) [Clang 13.0.1 ]
Qt: 5.15.4
PyQt5: 5.15.7
NumPy: 1.22.4
SciPy: 1.8.1
Dask: 2022.9.1
VisPy: 0.12.1
magicgui: 0.5.1
superqt: 0.3.2
in-n-out: 0.1.5
app-model: 0.1.1
npe2: 0.6.1
OpenGL:
- GL version: 2.1 Metal - 76.3
- MAX_TEXTURE_SIZE: 16384
Screens:
- screen 1: resolution 1800x1169, scale 2.0
Settings path:
- /Users/jni/Library/Application Support/napari/all_f332943dd4a538ce2ff3134fd0ff74396d8d8b23/settings.yaml
Plugins:
- affinder: 0.2.3.dev12+g6d235f9 (2 contributions)
- napari: 0.4.17rc4.dev169+g57fa998e7.d20221219 (77 contributions)
- napari-console: 0.0.6 (0 contributions)
- napari-multiscale-rendering-prototype: 0.0.1 (6 contributions)
- napari-pymeshlab: 0.0.5 (17 contributions)
- napari-skeleton-curator: 0.1.dev21+gf2de859 (4 contributions)
- napari-svg: 0.1.6 (2 contributions)
- napari-watershed: 0.0.0 (4 contributions)
- skan: 0.11.0.dev0 (2 contributions)
- zarpaint: 0.1.1.dev20+g61ba4a0 (14 contributions)
```
## Additional context
Semi-related: #4633
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/_vispy/overlays/axes.py`
Content:
```
1 import numpy as np
2
3 from napari._vispy.overlays.base import ViewerOverlayMixin, VispySceneOverlay
4 from napari._vispy.visuals.axes import Axes
5 from napari.utils.theme import get_theme
6
7
8 class VispyAxesOverlay(ViewerOverlayMixin, VispySceneOverlay):
9 """Axes indicating world coordinate origin and orientation."""
10
11 def __init__(self, *, viewer, overlay, parent=None) -> None:
12 self._scale = 1
13
14 # Target axes length in canvas pixels
15 self._target_length = 80
16
17 super().__init__(
18 node=Axes(), viewer=viewer, overlay=overlay, parent=parent
19 )
20 self.overlay.events.visible.connect(self._on_visible_change)
21 self.overlay.events.colored.connect(self._on_data_change)
22 self.overlay.events.dashed.connect(self._on_data_change)
23 self.overlay.events.labels.connect(self._on_labels_visible_change)
24 self.overlay.events.arrows.connect(self._on_data_change)
25
26 self.viewer.events.theme.connect(self._on_data_change)
27 self.viewer.camera.events.zoom.connect(self._on_zoom_change)
28 self.viewer.dims.events.order.connect(self._on_data_change)
29 self.viewer.dims.events.range.connect(self._on_data_change)
30 self.viewer.dims.events.ndisplay.connect(self._on_data_change)
31 self.viewer.dims.events.axis_labels.connect(
32 self._on_labels_text_change
33 )
34
35 self.reset()
36
37 def _on_data_change(self):
38 # Determine which axes are displayed
39 axes = self.viewer.dims.displayed[::-1]
40
41 # Counting backwards from total number of dimensions
42 # determine axes positions. This is done as by default
43 # the last NumPy axis corresponds to the first Vispy axis
44 reversed_axes = [self.viewer.dims.ndim - 1 - a for a in axes]
45
46 self.node.set_data(
47 axes=axes,
48 reversed_axes=reversed_axes,
49 colored=self.overlay.colored,
50 bg_color=get_theme(self.viewer.theme, False).canvas,
51 dashed=self.overlay.dashed,
52 arrows=self.overlay.arrows,
53 )
54
55 def _on_labels_visible_change(self):
56 self.node.text.visible = self.overlay.labels
57
58 def _on_labels_text_change(self):
59 axes = self.viewer.dims.displayed[::-1]
60 axes_labels = [self.viewer.dims.axis_labels[a] for a in axes]
61 self.node.text.text = axes_labels
62
63 def _on_zoom_change(self):
64 scale = 1 / self.viewer.camera.zoom
65
66 # If scale has not changed, do not redraw
67 if abs(np.log10(self._scale) - np.log10(scale)) < 1e-4:
68 return
69 self._scale = scale
70 scale = self._target_length * self._scale
71 # Update axes scale
72 self.node.transform.reset()
73 self.node.transform.scale([scale, scale, scale, 1])
74
75 def reset(self):
76 super().reset()
77 self._on_data_change()
78 self._on_labels_visible_change()
79 self._on_labels_text_change()
80 self._on_zoom_change()
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/_vispy/overlays/axes.py b/napari/_vispy/overlays/axes.py
--- a/napari/_vispy/overlays/axes.py
+++ b/napari/_vispy/overlays/axes.py
@@ -17,7 +17,6 @@
super().__init__(
node=Axes(), viewer=viewer, overlay=overlay, parent=parent
)
- self.overlay.events.visible.connect(self._on_visible_change)
self.overlay.events.colored.connect(self._on_data_change)
self.overlay.events.dashed.connect(self._on_data_change)
self.overlay.events.labels.connect(self._on_labels_visible_change)
@@ -52,6 +51,8 @@
arrows=self.overlay.arrows,
)
+ self._on_labels_text_change()
+
def _on_labels_visible_change(self):
self.node.text.visible = self.overlay.labels
@@ -76,5 +77,4 @@
super().reset()
self._on_data_change()
self._on_labels_visible_change()
- self._on_labels_text_change()
self._on_zoom_change()
| {"golden_diff": "diff --git a/napari/_vispy/overlays/axes.py b/napari/_vispy/overlays/axes.py\n--- a/napari/_vispy/overlays/axes.py\n+++ b/napari/_vispy/overlays/axes.py\n@@ -17,7 +17,6 @@\n super().__init__(\n node=Axes(), viewer=viewer, overlay=overlay, parent=parent\n )\n- self.overlay.events.visible.connect(self._on_visible_change)\n self.overlay.events.colored.connect(self._on_data_change)\n self.overlay.events.dashed.connect(self._on_data_change)\n self.overlay.events.labels.connect(self._on_labels_visible_change)\n@@ -52,6 +51,8 @@\n arrows=self.overlay.arrows,\n )\n \n+ self._on_labels_text_change()\n+\n def _on_labels_visible_change(self):\n self.node.text.visible = self.overlay.labels\n \n@@ -76,5 +77,4 @@\n super().reset()\n self._on_data_change()\n self._on_labels_visible_change()\n- self._on_labels_text_change()\n self._on_zoom_change()\n", "issue": "viewer axes point in basically random directions \ud83d\ude02\n## \ud83d\udc1b Bug\r\n\r\nI was trying to explain anisotropic image resolution to someone the other day and this napari bug severely undermined my otherwise excellent demonstration. \ud83d\ude02 TLDR: the axes 0, 1, 2 displayed in the viewer by default are not the same as the axes 0, 1, 2 on the input array (and by extension the positions of the `layer.scale` parameter).\r\n\r\n## To Reproduce\r\n\r\n```python\r\nimport numpy as np\r\nimport napari\r\n\r\nvolume = np.random.random((64, 64, 64))\r\n\r\nviewer, layer = napari.imshow(volume, scale=(1, 2, 4), ndisplay=3)\r\nviewer.axes.visible = True\r\n\r\nnapari.run()\r\n```\r\n\r\n<img width=\"948\" alt=\"Screen Shot 2023-02-07 at 9 05 39 pm\" src=\"https://user-images.githubusercontent.com/492549/217215889-c6aca123-f529-4a82-9893-4d585ac69256.png\">\r\n\r\n## Expected behavior\r\n\r\nThe code produces a 3D oblong/rectangular prism with sides of length 64, 128, and 256 in world-space. Given the scale setting above, one would expect axis 0 to point in the direction of the side of length 64, 1 for 128, and 2 for 256. Instead, you see 0 pointing along the side of length 128, 1 along the side of length 256, and 2 along the side of length 64. (**Edited:** [fixed typo](https://github.com/napari/napari/issues/5536#issuecomment-1420555774), 0->2)\r\n\r\n## Environment\r\n\r\n```\r\nnapari: 0.4.17rc4.dev169+g57fa998e7.d20221219\r\nPlatform: macOS-12.5.1-arm64-arm-64bit\r\nSystem: MacOS 12.5.1\r\nPython: 3.10.5 | packaged by conda-forge | (main, Jun 14 2022, 07:07:06) [Clang 13.0.1 ]\r\nQt: 5.15.4\r\nPyQt5: 5.15.7\r\nNumPy: 1.22.4\r\nSciPy: 1.8.1\r\nDask: 2022.9.1\r\nVisPy: 0.12.1\r\nmagicgui: 0.5.1\r\nsuperqt: 0.3.2\r\nin-n-out: 0.1.5\r\napp-model: 0.1.1\r\nnpe2: 0.6.1\r\n\r\nOpenGL:\r\n - GL version: 2.1 Metal - 76.3\r\n - MAX_TEXTURE_SIZE: 16384\r\n\r\nScreens:\r\n - screen 1: resolution 1800x1169, scale 2.0\r\n\r\nSettings path:\r\n - /Users/jni/Library/Application Support/napari/all_f332943dd4a538ce2ff3134fd0ff74396d8d8b23/settings.yaml\r\nPlugins:\r\n - affinder: 0.2.3.dev12+g6d235f9 (2 contributions)\r\n - napari: 0.4.17rc4.dev169+g57fa998e7.d20221219 (77 contributions)\r\n - napari-console: 0.0.6 (0 contributions)\r\n - napari-multiscale-rendering-prototype: 0.0.1 (6 contributions)\r\n - napari-pymeshlab: 0.0.5 (17 contributions)\r\n - napari-skeleton-curator: 0.1.dev21+gf2de859 (4 contributions)\r\n - napari-svg: 0.1.6 (2 contributions)\r\n - napari-watershed: 0.0.0 (4 contributions)\r\n - skan: 0.11.0.dev0 (2 contributions)\r\n - zarpaint: 0.1.1.dev20+g61ba4a0 (14 contributions)\r\n```\r\n\r\n## Additional context\r\n\r\nSemi-related: #4633\n", "before_files": [{"content": "import numpy as np\n\nfrom napari._vispy.overlays.base import ViewerOverlayMixin, VispySceneOverlay\nfrom napari._vispy.visuals.axes import Axes\nfrom napari.utils.theme import get_theme\n\n\nclass VispyAxesOverlay(ViewerOverlayMixin, VispySceneOverlay):\n \"\"\"Axes indicating world coordinate origin and orientation.\"\"\"\n\n def __init__(self, *, viewer, overlay, parent=None) -> None:\n self._scale = 1\n\n # Target axes length in canvas pixels\n self._target_length = 80\n\n super().__init__(\n node=Axes(), viewer=viewer, overlay=overlay, parent=parent\n )\n self.overlay.events.visible.connect(self._on_visible_change)\n self.overlay.events.colored.connect(self._on_data_change)\n self.overlay.events.dashed.connect(self._on_data_change)\n self.overlay.events.labels.connect(self._on_labels_visible_change)\n self.overlay.events.arrows.connect(self._on_data_change)\n\n self.viewer.events.theme.connect(self._on_data_change)\n self.viewer.camera.events.zoom.connect(self._on_zoom_change)\n self.viewer.dims.events.order.connect(self._on_data_change)\n self.viewer.dims.events.range.connect(self._on_data_change)\n self.viewer.dims.events.ndisplay.connect(self._on_data_change)\n self.viewer.dims.events.axis_labels.connect(\n self._on_labels_text_change\n )\n\n self.reset()\n\n def _on_data_change(self):\n # Determine which axes are displayed\n axes = self.viewer.dims.displayed[::-1]\n\n # Counting backwards from total number of dimensions\n # determine axes positions. This is done as by default\n # the last NumPy axis corresponds to the first Vispy axis\n reversed_axes = [self.viewer.dims.ndim - 1 - a for a in axes]\n\n self.node.set_data(\n axes=axes,\n reversed_axes=reversed_axes,\n colored=self.overlay.colored,\n bg_color=get_theme(self.viewer.theme, False).canvas,\n dashed=self.overlay.dashed,\n arrows=self.overlay.arrows,\n )\n\n def _on_labels_visible_change(self):\n self.node.text.visible = self.overlay.labels\n\n def _on_labels_text_change(self):\n axes = self.viewer.dims.displayed[::-1]\n axes_labels = [self.viewer.dims.axis_labels[a] for a in axes]\n self.node.text.text = axes_labels\n\n def _on_zoom_change(self):\n scale = 1 / self.viewer.camera.zoom\n\n # If scale has not changed, do not redraw\n if abs(np.log10(self._scale) - np.log10(scale)) < 1e-4:\n return\n self._scale = scale\n scale = self._target_length * self._scale\n # Update axes scale\n self.node.transform.reset()\n self.node.transform.scale([scale, scale, scale, 1])\n\n def reset(self):\n super().reset()\n self._on_data_change()\n self._on_labels_visible_change()\n self._on_labels_text_change()\n self._on_zoom_change()\n", "path": "napari/_vispy/overlays/axes.py"}], "after_files": [{"content": "import numpy as np\n\nfrom napari._vispy.overlays.base import ViewerOverlayMixin, VispySceneOverlay\nfrom napari._vispy.visuals.axes import Axes\nfrom napari.utils.theme import get_theme\n\n\nclass VispyAxesOverlay(ViewerOverlayMixin, VispySceneOverlay):\n \"\"\"Axes indicating world coordinate origin and orientation.\"\"\"\n\n def __init__(self, *, viewer, overlay, parent=None) -> None:\n self._scale = 1\n\n # Target axes length in canvas pixels\n self._target_length = 80\n\n super().__init__(\n node=Axes(), viewer=viewer, overlay=overlay, parent=parent\n )\n self.overlay.events.colored.connect(self._on_data_change)\n self.overlay.events.dashed.connect(self._on_data_change)\n self.overlay.events.labels.connect(self._on_labels_visible_change)\n self.overlay.events.arrows.connect(self._on_data_change)\n\n self.viewer.events.theme.connect(self._on_data_change)\n self.viewer.camera.events.zoom.connect(self._on_zoom_change)\n self.viewer.dims.events.order.connect(self._on_data_change)\n self.viewer.dims.events.range.connect(self._on_data_change)\n self.viewer.dims.events.ndisplay.connect(self._on_data_change)\n self.viewer.dims.events.axis_labels.connect(\n self._on_labels_text_change\n )\n\n self.reset()\n\n def _on_data_change(self):\n # Determine which axes are displayed\n axes = self.viewer.dims.displayed[::-1]\n\n # Counting backwards from total number of dimensions\n # determine axes positions. This is done as by default\n # the last NumPy axis corresponds to the first Vispy axis\n reversed_axes = [self.viewer.dims.ndim - 1 - a for a in axes]\n\n self.node.set_data(\n axes=axes,\n reversed_axes=reversed_axes,\n colored=self.overlay.colored,\n bg_color=get_theme(self.viewer.theme, False).canvas,\n dashed=self.overlay.dashed,\n arrows=self.overlay.arrows,\n )\n\n self._on_labels_text_change()\n\n def _on_labels_visible_change(self):\n self.node.text.visible = self.overlay.labels\n\n def _on_labels_text_change(self):\n axes = self.viewer.dims.displayed[::-1]\n axes_labels = [self.viewer.dims.axis_labels[a] for a in axes]\n self.node.text.text = axes_labels\n\n def _on_zoom_change(self):\n scale = 1 / self.viewer.camera.zoom\n\n # If scale has not changed, do not redraw\n if abs(np.log10(self._scale) - np.log10(scale)) < 1e-4:\n return\n self._scale = scale\n scale = self._target_length * self._scale\n # Update axes scale\n self.node.transform.reset()\n self.node.transform.scale([scale, scale, scale, 1])\n\n def reset(self):\n super().reset()\n self._on_data_change()\n self._on_labels_visible_change()\n self._on_zoom_change()\n", "path": "napari/_vispy/overlays/axes.py"}]} | 2,114 | 247 |
gh_patches_debug_6440 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1976 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
jsonschema RefResolutionError for 0.6.3 or lower
### Summary
The newest version of `jsonschema` (4.15.0, released today) breaks the current release version of `pyhf` (0.6.3).
### OS / Environment
```console
NAME=Gentoo
ID=gentoo
PRETTY_NAME="Gentoo Linux"
ANSI_COLOR="1;32"
HOME_URL="https://www.gentoo.org/"
SUPPORT_URL="https://www.gentoo.org/support/"
BUG_REPORT_URL="https://bugs.gentoo.org/"
VERSION_ID="2.8"
```
### Steps to Reproduce
```bash
pip install 'jsonschema==4.15.0' 'pyhf==0.6.3'
```
```python
import pyhf
model = pyhf.simplemodels.uncorrelated_background(
signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
)
```
### File Upload (optional)
_No response_
### Expected Results
I expected not to get an error because this is the [Hello World example](https://pyhf.readthedocs.io/en/v0.6.3/examples/notebooks/hello-world.html). I confirmed that there's no error with `jsonschema==4.14.0` (the previous version) in the exact same environment otherwise.
### Actual Results
```console
Traceback (most recent call last):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 889, in resolve_from_url
document = self.store[url]
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/_utils.py", line 28, in __getitem__
return self.store[self.normalize(uri)]
KeyError: 'file:///home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 1505, in open_local_file
stats = os.stat(localfile)
FileNotFoundError: [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 892, in resolve_from_url
document = self.resolve_remote(url)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 1000, in resolve_remote
with urlopen(uri) as url:
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 216, in urlopen
return opener.open(url, data, timeout)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 519, in open
response = self._open(req, data)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 536, in _open
result = self._call_chain(self.handle_open, protocol, protocol +
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 496, in _call_chain
result = func(*args)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 1483, in file_open
return self.open_local_file(req)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 1522, in open_local_file
raise URLError(exp)
urllib.error.URLError: <urlopen error [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/simplemodels.py", line 141, in uncorrelated_background
return Model(spec, batch_size=batch_size)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/pdf.py", line 682, in __init__
utils.validate(self.spec, self.schema, version=self.version)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/utils.py", line 62, in validate
return validator.validate(spec)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 302, in validate
for error in self.iter_errors(*args, **kwargs):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 277, in iter_errors
for error in errors:
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/_validators.py", line 294, in ref
scope, resolved = validator.resolver.resolve(ref)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 880, in resolve
return url, self._remote_cache(url)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 894, in resolve_from_url
raise exceptions.RefResolutionError(exc)
jsonschema.exceptions.RefResolutionError: <urlopen error [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'>
```
### pyhf Version
```console
pyhf, version 0.6.3
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/schema/validator.py`
Content:
```
1 import jsonschema
2 import pyhf.exceptions
3 from pyhf.schema.loader import load_schema
4 from pyhf.schema import variables
5 from typing import Union, Mapping
6
7
8 def validate(spec: Mapping, schema_name: str, version: Union[str, None] = None):
9 """
10 Validate a provided specification against a schema.
11
12 Args:
13 spec (dict): The specification to validate.
14 schema_name (str): The name of the schema to use.
15 version (None or str): The version to use if not the default from :attr:`pyhf.schema.version`.
16
17 Returns:
18 None: schema validated fine
19
20 Raises:
21 pyhf.exceptions.InvalidSpecification: the specification is invalid
22 """
23
24 version = version or variables.SCHEMA_VERSION
25
26 schema = load_schema(f'{version}/{schema_name}')
27
28 # note: trailing slash needed for RefResolver to resolve correctly
29 resolver = jsonschema.RefResolver(
30 base_uri=f"file://{variables.schemas}/",
31 referrer=f"{version}/{schema_name}",
32 store=variables.SCHEMA_CACHE,
33 )
34 validator = jsonschema.Draft6Validator(
35 schema, resolver=resolver, format_checker=None
36 )
37
38 try:
39 return validator.validate(spec)
40 except jsonschema.ValidationError as err:
41 raise pyhf.exceptions.InvalidSpecification(err, schema_name)
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyhf/schema/validator.py b/src/pyhf/schema/validator.py
--- a/src/pyhf/schema/validator.py
+++ b/src/pyhf/schema/validator.py
@@ -27,8 +27,8 @@
# note: trailing slash needed for RefResolver to resolve correctly
resolver = jsonschema.RefResolver(
- base_uri=f"file://{variables.schemas}/",
- referrer=f"{version}/{schema_name}",
+ base_uri=f"file://{variables.schemas}/{version}/",
+ referrer=f"{schema_name}",
store=variables.SCHEMA_CACHE,
)
validator = jsonschema.Draft6Validator(
| {"golden_diff": "diff --git a/src/pyhf/schema/validator.py b/src/pyhf/schema/validator.py\n--- a/src/pyhf/schema/validator.py\n+++ b/src/pyhf/schema/validator.py\n@@ -27,8 +27,8 @@\n \n # note: trailing slash needed for RefResolver to resolve correctly\n resolver = jsonschema.RefResolver(\n- base_uri=f\"file://{variables.schemas}/\",\n- referrer=f\"{version}/{schema_name}\",\n+ base_uri=f\"file://{variables.schemas}/{version}/\",\n+ referrer=f\"{schema_name}\",\n store=variables.SCHEMA_CACHE,\n )\n validator = jsonschema.Draft6Validator(\n", "issue": "jsonschema RefResolutionError for 0.6.3 or lower\n### Summary\n\nThe newest version of `jsonschema` (4.15.0, released today) breaks the current release version of `pyhf` (0.6.3).\n\n### OS / Environment\n\n```console\nNAME=Gentoo\r\nID=gentoo\r\nPRETTY_NAME=\"Gentoo Linux\"\r\nANSI_COLOR=\"1;32\"\r\nHOME_URL=\"https://www.gentoo.org/\"\r\nSUPPORT_URL=\"https://www.gentoo.org/support/\"\r\nBUG_REPORT_URL=\"https://bugs.gentoo.org/\"\r\nVERSION_ID=\"2.8\"\n```\n\n\n### Steps to Reproduce\n\n```bash\r\npip install 'jsonschema==4.15.0' 'pyhf==0.6.3'\r\n```\r\n\r\n```python\r\nimport pyhf\r\nmodel = pyhf.simplemodels.uncorrelated_background(\r\n signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]\r\n)\r\n```\n\n### File Upload (optional)\n\n_No response_\n\n### Expected Results\n\nI expected not to get an error because this is the [Hello World example](https://pyhf.readthedocs.io/en/v0.6.3/examples/notebooks/hello-world.html). I confirmed that there's no error with `jsonschema==4.14.0` (the previous version) in the exact same environment otherwise.\n\n### Actual Results\n\n```console\nTraceback (most recent call last):\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 889, in resolve_from_url\r\n document = self.store[url]\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/_utils.py\", line 28, in __getitem__\r\n return self.store[self.normalize(uri)]\r\nKeyError: 'file:///home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 1505, in open_local_file\r\n stats = os.stat(localfile)\r\nFileNotFoundError: [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 892, in resolve_from_url\r\n document = self.resolve_remote(url)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 1000, in resolve_remote\r\n with urlopen(uri) as url:\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 216, in urlopen\r\n return opener.open(url, data, timeout)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 519, in open\r\n response = self._open(req, data)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 536, in _open\r\n result = self._call_chain(self.handle_open, protocol, protocol +\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 496, in _call_chain\r\n result = func(*args)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 1483, in file_open\r\n return self.open_local_file(req)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 1522, in open_local_file\r\n raise URLError(exp)\r\nurllib.error.URLError: <urlopen error [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'>\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/simplemodels.py\", line 141, in uncorrelated_background\r\n return Model(spec, batch_size=batch_size)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/pdf.py\", line 682, in __init__\r\n utils.validate(self.spec, self.schema, version=self.version)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/utils.py\", line 62, in validate\r\n return validator.validate(spec)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 302, in validate\r\n for error in self.iter_errors(*args, **kwargs):\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 277, in iter_errors\r\n for error in errors:\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/_validators.py\", line 294, in ref\r\n scope, resolved = validator.resolver.resolve(ref)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 880, in resolve\r\n return url, self._remote_cache(url)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 894, in resolve_from_url\r\n raise exceptions.RefResolutionError(exc)\r\njsonschema.exceptions.RefResolutionError: <urlopen error [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'>\n```\n\n\n### pyhf Version\n\n```console\npyhf, version 0.6.3\n```\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Code of Conduct\n", "before_files": [{"content": "import jsonschema\nimport pyhf.exceptions\nfrom pyhf.schema.loader import load_schema\nfrom pyhf.schema import variables\nfrom typing import Union, Mapping\n\n\ndef validate(spec: Mapping, schema_name: str, version: Union[str, None] = None):\n \"\"\"\n Validate a provided specification against a schema.\n\n Args:\n spec (dict): The specification to validate.\n schema_name (str): The name of the schema to use.\n version (None or str): The version to use if not the default from :attr:`pyhf.schema.version`.\n\n Returns:\n None: schema validated fine\n\n Raises:\n pyhf.exceptions.InvalidSpecification: the specification is invalid\n \"\"\"\n\n version = version or variables.SCHEMA_VERSION\n\n schema = load_schema(f'{version}/{schema_name}')\n\n # note: trailing slash needed for RefResolver to resolve correctly\n resolver = jsonschema.RefResolver(\n base_uri=f\"file://{variables.schemas}/\",\n referrer=f\"{version}/{schema_name}\",\n store=variables.SCHEMA_CACHE,\n )\n validator = jsonschema.Draft6Validator(\n schema, resolver=resolver, format_checker=None\n )\n\n try:\n return validator.validate(spec)\n except jsonschema.ValidationError as err:\n raise pyhf.exceptions.InvalidSpecification(err, schema_name)\n", "path": "src/pyhf/schema/validator.py"}], "after_files": [{"content": "import jsonschema\nimport pyhf.exceptions\nfrom pyhf.schema.loader import load_schema\nfrom pyhf.schema import variables\nfrom typing import Union, Mapping\n\n\ndef validate(spec: Mapping, schema_name: str, version: Union[str, None] = None):\n \"\"\"\n Validate a provided specification against a schema.\n\n Args:\n spec (dict): The specification to validate.\n schema_name (str): The name of the schema to use.\n version (None or str): The version to use if not the default from :attr:`pyhf.schema.version`.\n\n Returns:\n None: schema validated fine\n\n Raises:\n pyhf.exceptions.InvalidSpecification: the specification is invalid\n \"\"\"\n\n version = version or variables.SCHEMA_VERSION\n\n schema = load_schema(f'{version}/{schema_name}')\n\n # note: trailing slash needed for RefResolver to resolve correctly\n resolver = jsonschema.RefResolver(\n base_uri=f\"file://{variables.schemas}/{version}/\",\n referrer=f\"{schema_name}\",\n store=variables.SCHEMA_CACHE,\n )\n validator = jsonschema.Draft6Validator(\n schema, resolver=resolver, format_checker=None\n )\n\n try:\n return validator.validate(spec)\n except jsonschema.ValidationError as err:\n raise pyhf.exceptions.InvalidSpecification(err, schema_name)\n", "path": "src/pyhf/schema/validator.py"}]} | 2,145 | 143 |
gh_patches_debug_1588 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1804 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django Component Governance vulnerability
Django 1.11 before 1.11.28, 2.2 before 2.2.10, and 3.0 before 3.0.3 allows SQL Injection if untrusted data is used as a StringAgg delimiter (e.g., in Django applications that offer downloads of data as a series of rows with a user-specified column delimiter). By passing a suitably crafted delimiter to a contrib.postgres.aggregates.StringAgg instance, it was possible to break escaping and inject malicious SQL.
https://dev.azure.com/FuseLabs/SDK_v4/_componentGovernance/112465/alert/2370216?typeId=4354877
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-applicationinsights/setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 REQUIRES = [
8 "applicationinsights==0.11.9",
9 "botbuilder-schema==4.15.0",
10 "botframework-connector==4.15.0",
11 "botbuilder-core==4.15.0",
12 ]
13 TESTS_REQUIRES = [
14 "aiounittest==1.3.0",
15 "django==2.2.6", # For samples
16 "djangorestframework==3.10.3", # For samples
17 "flask==1.1.1", # For samples
18 ]
19
20 root = os.path.abspath(os.path.dirname(__file__))
21
22 with open(os.path.join(root, "botbuilder", "applicationinsights", "about.py")) as f:
23 package_info = {}
24 info = f.read()
25 exec(info, package_info)
26
27 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
28 long_description = f.read()
29
30 setup(
31 name=package_info["__title__"],
32 version=package_info["__version__"],
33 url=package_info["__uri__"],
34 author=package_info["__author__"],
35 description=package_info["__description__"],
36 keywords=[
37 "BotBuilderApplicationInsights",
38 "bots",
39 "ai",
40 "botframework",
41 "botbuilder",
42 ],
43 long_description=long_description,
44 long_description_content_type="text/x-rst",
45 license=package_info["__license__"],
46 packages=[
47 "botbuilder.applicationinsights",
48 "botbuilder.applicationinsights.django",
49 "botbuilder.applicationinsights.flask",
50 "botbuilder.applicationinsights.processor",
51 ],
52 install_requires=REQUIRES + TESTS_REQUIRES,
53 tests_require=TESTS_REQUIRES,
54 include_package_data=True,
55 classifiers=[
56 "Programming Language :: Python :: 3.7",
57 "Intended Audience :: Developers",
58 "License :: OSI Approved :: MIT License",
59 "Operating System :: OS Independent",
60 "Development Status :: 5 - Production/Stable",
61 "Topic :: Scientific/Engineering :: Artificial Intelligence",
62 ],
63 )
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-applicationinsights/setup.py b/libraries/botbuilder-applicationinsights/setup.py
--- a/libraries/botbuilder-applicationinsights/setup.py
+++ b/libraries/botbuilder-applicationinsights/setup.py
@@ -12,7 +12,7 @@
]
TESTS_REQUIRES = [
"aiounittest==1.3.0",
- "django==2.2.6", # For samples
+ "django==2.2.10", # For samples
"djangorestframework==3.10.3", # For samples
"flask==1.1.1", # For samples
]
| {"golden_diff": "diff --git a/libraries/botbuilder-applicationinsights/setup.py b/libraries/botbuilder-applicationinsights/setup.py\n--- a/libraries/botbuilder-applicationinsights/setup.py\n+++ b/libraries/botbuilder-applicationinsights/setup.py\n@@ -12,7 +12,7 @@\n ]\n TESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n- \"django==2.2.6\", # For samples\n+ \"django==2.2.10\", # For samples\n \"djangorestframework==3.10.3\", # For samples\n \"flask==1.1.1\", # For samples\n ]\n", "issue": "Django Component Governance vulnerability\nDjango 1.11 before 1.11.28, 2.2 before 2.2.10, and 3.0 before 3.0.3 allows SQL Injection if untrusted data is used as a StringAgg delimiter (e.g., in Django applications that offer downloads of data as a series of rows with a user-specified column delimiter). By passing a suitably crafted delimiter to a contrib.postgres.aggregates.StringAgg instance, it was possible to break escaping and inject malicious SQL.\r\n\r\nhttps://dev.azure.com/FuseLabs/SDK_v4/_componentGovernance/112465/alert/2370216?typeId=4354877\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights==0.11.9\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n \"django==2.2.6\", # For samples\n \"djangorestframework==3.10.3\", # For samples\n \"flask==1.1.1\", # For samples\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"applicationinsights\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.applicationinsights\",\n \"botbuilder.applicationinsights.django\",\n \"botbuilder.applicationinsights.flask\",\n \"botbuilder.applicationinsights.processor\",\n ],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-applicationinsights/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights==0.11.9\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n \"django==2.2.10\", # For samples\n \"djangorestframework==3.10.3\", # For samples\n \"flask==1.1.1\", # For samples\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"applicationinsights\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.applicationinsights\",\n \"botbuilder.applicationinsights.django\",\n \"botbuilder.applicationinsights.flask\",\n \"botbuilder.applicationinsights.processor\",\n ],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-applicationinsights/setup.py"}]} | 1,032 | 152 |
gh_patches_debug_36006 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1366 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't configure source without arguments
Hello Team,
I'm trying to configure into HomeAssistant the source that I added Mamirolle info, but I have argument issues.
`args` is marked `required`, so if none is passed, the configuration is invalid.
```
Invalid config for [waste_collection_schedule]: required key not provided @ data['waste_collection_schedule']['sources'][0]['args']. Got None. (See /config/configuration.yaml, line 27).
```
If a dummy argument is passed. The configuration is valid but the source setup fails.
```
Error during setup of component waste_collection_schedule
Traceback (most recent call last):
File "/usr/src/homeassistant/homeassistant/setup.py", line 288, in _async_setup_component
result = await task
^^^^^^^^^^
File "/config/custom_components/waste_collection_schedule/__init__.py", line 109, in async_setup
api.add_source_shell(
File "/config/custom_components/waste_collection_schedule/__init__.py", line 202, in add_source_shell
SourceShell.create(
File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 196, in create
source = source_module.Source(**source_args) # type: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: Source() takes no arguments
```
I understand that we want the configuration to fail early but the real error will still be seen only when the source is actually instantiated. Because of that I think the arguments shouldn't be required.
What do you think about this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py`
Content:
```
1 import datetime
2
3 import requests
4 from bs4 import BeautifulSoup
5 from waste_collection_schedule import Collection
6
7 TITLE = "Mairie de Mamirolle"
8 DESCRIPTION = "Source script for mamirolle.info"
9 COUNTRY = "fr"
10 URL = "http://mamirolle.info/"
11
12 TEST_CASES = {"TestSource": {}}
13
14 ICON_MAP = {
15 "Poubelle grise": "mdi:trash-can",
16 "Poubelle jaune": "mdi:recycle",
17 }
18
19 MONTH_NAMES = [
20 "janvier",
21 "février",
22 "mars",
23 "avril",
24 "mai",
25 "juin",
26 "juillet",
27 "août",
28 "septembre",
29 "octobre",
30 "novembre",
31 "décembre",
32 ]
33
34
35 class Source:
36 def fetch(self):
37 now = datetime.datetime.now()
38 # get list of regions and weblinks
39 page = requests.get(URL)
40 # A lenient HTML parser is need
41 soup = BeautifulSoup(page.text.replace("<![endif]", ""), "html.parser")
42 trash_domestic = soup.find("i", class_="poubelle-grise")
43 _, day, month = trash_domestic.next_sibling.string.split()
44 date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
45 if date_domestic < now.date():
46 date_domestic = date_domestic.replace(year=date_domestic.year + 1)
47
48 trash_recycle = soup.find("i", class_="poubelle-jaune")
49 _, day, month = trash_recycle.next_sibling.string.split()
50 date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
51 if date_recycle < now.date():
52 date_recycle = date_recycle.replace(year=date_recycle.year + 1)
53
54 entries = [
55 Collection(
56 date=date_domestic,
57 t="Poubelle grise",
58 icon=ICON_MAP.get("Poubelle grise"),
59 ),
60 Collection(
61 date=date_recycle,
62 t="Poubelle jaune",
63 icon=ICON_MAP.get("Poubelle jaune"),
64 ),
65 ] # List that holds collection schedule
66
67 return entries
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py
@@ -9,7 +9,12 @@
COUNTRY = "fr"
URL = "http://mamirolle.info/"
-TEST_CASES = {"TestSource": {}}
+TEST_CASES = {
+ "TestSource": {},
+ "IgnoredArgument": {
+ "_": ""
+ }
+}
ICON_MAP = {
"Poubelle grise": "mdi:trash-can",
@@ -33,6 +38,9 @@
class Source:
+ def __init__(self, _=None):
+ pass
+
def fetch(self):
now = datetime.datetime.now()
# get list of regions and weblinks
@@ -40,28 +48,19 @@
# A lenient HTML parser is need
soup = BeautifulSoup(page.text.replace("<![endif]", ""), "html.parser")
trash_domestic = soup.find("i", class_="poubelle-grise")
- _, day, month = trash_domestic.next_sibling.string.split()
- date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
- if date_domestic < now.date():
- date_domestic = date_domestic.replace(year=date_domestic.year + 1)
-
trash_recycle = soup.find("i", class_="poubelle-jaune")
- _, day, month = trash_recycle.next_sibling.string.split()
- date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
- if date_recycle < now.date():
- date_recycle = date_recycle.replace(year=date_recycle.year + 1)
- entries = [
- Collection(
- date=date_domestic,
- t="Poubelle grise",
- icon=ICON_MAP.get("Poubelle grise"),
- ),
- Collection(
- date=date_recycle,
- t="Poubelle jaune",
- icon=ICON_MAP.get("Poubelle jaune"),
- ),
- ] # List that holds collection schedule
+ entries = [] # List that holds collection schedule
+ for trash, label in [(trash_domestic, "Poubelle grise"), (trash_recycle, "Poubelle jaune")]:
+ _, day, month = trash.next_sibling.string.split()
+ date = now.replace(month=MONTH_NAMES.index(month) + 1, day=int(day)).date()
+ if date < now.date():
+ date = date.replace(year=date.year + 1)
+
+ entries.append(Collection(
+ date=date,
+ t=label,
+ icon=ICON_MAP.get(label),
+ ))
return entries
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py\n@@ -9,7 +9,12 @@\n COUNTRY = \"fr\"\n URL = \"http://mamirolle.info/\"\n \n-TEST_CASES = {\"TestSource\": {}}\n+TEST_CASES = {\n+ \"TestSource\": {},\n+ \"IgnoredArgument\": {\n+ \"_\": \"\"\n+ }\n+}\n \n ICON_MAP = {\n \"Poubelle grise\": \"mdi:trash-can\",\n@@ -33,6 +38,9 @@\n \n \n class Source:\n+ def __init__(self, _=None):\n+ pass\n+\n def fetch(self):\n now = datetime.datetime.now()\n # get list of regions and weblinks\n@@ -40,28 +48,19 @@\n # A lenient HTML parser is need\n soup = BeautifulSoup(page.text.replace(\"<![endif]\", \"\"), \"html.parser\")\n trash_domestic = soup.find(\"i\", class_=\"poubelle-grise\")\n- _, day, month = trash_domestic.next_sibling.string.split()\n- date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n- if date_domestic < now.date():\n- date_domestic = date_domestic.replace(year=date_domestic.year + 1)\n-\n trash_recycle = soup.find(\"i\", class_=\"poubelle-jaune\")\n- _, day, month = trash_recycle.next_sibling.string.split()\n- date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n- if date_recycle < now.date():\n- date_recycle = date_recycle.replace(year=date_recycle.year + 1)\n \n- entries = [\n- Collection(\n- date=date_domestic,\n- t=\"Poubelle grise\",\n- icon=ICON_MAP.get(\"Poubelle grise\"),\n- ),\n- Collection(\n- date=date_recycle,\n- t=\"Poubelle jaune\",\n- icon=ICON_MAP.get(\"Poubelle jaune\"),\n- ),\n- ] # List that holds collection schedule\n+ entries = [] # List that holds collection schedule\n+ for trash, label in [(trash_domestic, \"Poubelle grise\"), (trash_recycle, \"Poubelle jaune\")]:\n+ _, day, month = trash.next_sibling.string.split()\n+ date = now.replace(month=MONTH_NAMES.index(month) + 1, day=int(day)).date()\n+ if date < now.date():\n+ date = date.replace(year=date.year + 1)\n+\n+ entries.append(Collection(\n+ date=date,\n+ t=label,\n+ icon=ICON_MAP.get(label),\n+ ))\n \n return entries\n", "issue": "Can't configure source without arguments\nHello Team,\r\nI'm trying to configure into HomeAssistant the source that I added Mamirolle info, but I have argument issues.\r\n\r\n`args` is marked `required`, so if none is passed, the configuration is invalid.\r\n\r\n```\r\nInvalid config for [waste_collection_schedule]: required key not provided @ data['waste_collection_schedule']['sources'][0]['args']. Got None. (See /config/configuration.yaml, line 27). \r\n```\r\n\r\nIf a dummy argument is passed. The configuration is valid but the source setup fails.\r\n```\r\nError during setup of component waste_collection_schedule\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/src/homeassistant/homeassistant/setup.py\", line 288, in _async_setup_component\r\n result = await task\r\n ^^^^^^^^^^\r\n File \"/config/custom_components/waste_collection_schedule/__init__.py\", line 109, in async_setup\r\n api.add_source_shell(\r\n File \"/config/custom_components/waste_collection_schedule/__init__.py\", line 202, in add_source_shell\r\n SourceShell.create(\r\n File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 196, in create\r\n source = source_module.Source(**source_args) # type: ignore\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nTypeError: Source() takes no arguments\r\n```\r\nI understand that we want the configuration to fail early but the real error will still be seen only when the source is actually instantiated. Because of that I think the arguments shouldn't be required.\r\n\r\nWhat do you think about this?\n", "before_files": [{"content": "import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Mairie de Mamirolle\"\nDESCRIPTION = \"Source script for mamirolle.info\"\nCOUNTRY = \"fr\"\nURL = \"http://mamirolle.info/\"\n\nTEST_CASES = {\"TestSource\": {}}\n\nICON_MAP = {\n \"Poubelle grise\": \"mdi:trash-can\",\n \"Poubelle jaune\": \"mdi:recycle\",\n}\n\nMONTH_NAMES = [\n \"janvier\",\n \"f\u00e9vrier\",\n \"mars\",\n \"avril\",\n \"mai\",\n \"juin\",\n \"juillet\",\n \"ao\u00fbt\",\n \"septembre\",\n \"octobre\",\n \"novembre\",\n \"d\u00e9cembre\",\n]\n\n\nclass Source:\n def fetch(self):\n now = datetime.datetime.now()\n # get list of regions and weblinks\n page = requests.get(URL)\n # A lenient HTML parser is need\n soup = BeautifulSoup(page.text.replace(\"<![endif]\", \"\"), \"html.parser\")\n trash_domestic = soup.find(\"i\", class_=\"poubelle-grise\")\n _, day, month = trash_domestic.next_sibling.string.split()\n date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n if date_domestic < now.date():\n date_domestic = date_domestic.replace(year=date_domestic.year + 1)\n\n trash_recycle = soup.find(\"i\", class_=\"poubelle-jaune\")\n _, day, month = trash_recycle.next_sibling.string.split()\n date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n if date_recycle < now.date():\n date_recycle = date_recycle.replace(year=date_recycle.year + 1)\n\n entries = [\n Collection(\n date=date_domestic,\n t=\"Poubelle grise\",\n icon=ICON_MAP.get(\"Poubelle grise\"),\n ),\n Collection(\n date=date_recycle,\n t=\"Poubelle jaune\",\n icon=ICON_MAP.get(\"Poubelle jaune\"),\n ),\n ] # List that holds collection schedule\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py"}], "after_files": [{"content": "import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Mairie de Mamirolle\"\nDESCRIPTION = \"Source script for mamirolle.info\"\nCOUNTRY = \"fr\"\nURL = \"http://mamirolle.info/\"\n\nTEST_CASES = {\n \"TestSource\": {},\n \"IgnoredArgument\": {\n \"_\": \"\"\n }\n}\n\nICON_MAP = {\n \"Poubelle grise\": \"mdi:trash-can\",\n \"Poubelle jaune\": \"mdi:recycle\",\n}\n\nMONTH_NAMES = [\n \"janvier\",\n \"f\u00e9vrier\",\n \"mars\",\n \"avril\",\n \"mai\",\n \"juin\",\n \"juillet\",\n \"ao\u00fbt\",\n \"septembre\",\n \"octobre\",\n \"novembre\",\n \"d\u00e9cembre\",\n]\n\n\nclass Source:\n def __init__(self, _=None):\n pass\n\n def fetch(self):\n now = datetime.datetime.now()\n # get list of regions and weblinks\n page = requests.get(URL)\n # A lenient HTML parser is need\n soup = BeautifulSoup(page.text.replace(\"<![endif]\", \"\"), \"html.parser\")\n trash_domestic = soup.find(\"i\", class_=\"poubelle-grise\")\n trash_recycle = soup.find(\"i\", class_=\"poubelle-jaune\")\n\n entries = [] # List that holds collection schedule\n for trash, label in [(trash_domestic, \"Poubelle grise\"), (trash_recycle, \"Poubelle jaune\")]:\n _, day, month = trash.next_sibling.string.split()\n date = now.replace(month=MONTH_NAMES.index(month) + 1, day=int(day)).date()\n if date < now.date():\n date = date.replace(year=date.year + 1)\n\n entries.append(Collection(\n date=date,\n t=label,\n icon=ICON_MAP.get(label),\n ))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py"}]} | 1,224 | 665 |
gh_patches_debug_5170 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2781 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Traceback appears in Status Bar, when trying to replay live flow
##### Steps to reproduce the problem:
1. Run **pathod** : `pathod -a "/=200:p0,10"`
2. Run mitmproxy.
3. Send _get request_ to pathod through mitmproxy using **pathoc**:
`pathoc -c localhost:9999 localhost:8080 'get:/'`
4. Try to replay the corresponding live flow in mitmproxy by pressing `r`.
I am seeing:

##### Any other comments? What have you tried so far?
This issue is relevant for the situations, when server didn't have time to send a response yet, but a user tries to replay the corresponding flow.
I also faced this issue, when trying to replay `mitm.it` flow from onboardingapp.
##### System information
Mitmproxy: 3.0.0.dev1101 (commit d9d4d15) binary
Python: 3.5.2
OpenSSL: OpenSSL 1.1.0g 2 Nov 2017
Platform: Linux-4.4.0-104-generic-x86_64-with-debian-stretch-sid
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/clientplayback.py`
Content:
```
1 from mitmproxy import exceptions
2 from mitmproxy import ctx
3 from mitmproxy import io
4 from mitmproxy import flow
5 from mitmproxy import command
6 import mitmproxy.types
7
8 import typing
9
10
11 class ClientPlayback:
12 def __init__(self):
13 self.flows = [] # type: typing.List[flow.Flow]
14 self.current_thread = None
15 self.configured = False
16
17 def count(self) -> int:
18 if self.current_thread:
19 current = 1
20 else:
21 current = 0
22 return current + len(self.flows)
23
24 @command.command("replay.client.stop")
25 def stop_replay(self) -> None:
26 """
27 Stop client replay.
28 """
29 self.flows = []
30 ctx.log.alert("Client replay stopped.")
31 ctx.master.addons.trigger("update", [])
32
33 @command.command("replay.client")
34 def start_replay(self, flows: typing.Sequence[flow.Flow]) -> None:
35 """
36 Replay requests from flows.
37 """
38 self.flows = list(flows)
39 ctx.log.alert("Replaying %s flows." % len(self.flows))
40 ctx.master.addons.trigger("update", [])
41
42 @command.command("replay.client.file")
43 def load_file(self, path: mitmproxy.types.Path) -> None:
44 try:
45 flows = io.read_flows_from_paths([path])
46 except exceptions.FlowReadException as e:
47 raise exceptions.CommandError(str(e))
48 ctx.log.alert("Replaying %s flows." % len(self.flows))
49 self.flows = flows
50 ctx.master.addons.trigger("update", [])
51
52 def configure(self, updated):
53 if not self.configured and ctx.options.client_replay:
54 self.configured = True
55 ctx.log.info("Client Replay: {}".format(ctx.options.client_replay))
56 try:
57 flows = io.read_flows_from_paths(ctx.options.client_replay)
58 except exceptions.FlowReadException as e:
59 raise exceptions.OptionsError(str(e))
60 self.start_replay(flows)
61
62 def tick(self):
63 current_is_done = self.current_thread and not self.current_thread.is_alive()
64 can_start_new = not self.current_thread or current_is_done
65 will_start_new = can_start_new and self.flows
66
67 if current_is_done:
68 self.current_thread = None
69 ctx.master.addons.trigger("update", [])
70 if will_start_new:
71 f = self.flows.pop(0)
72 self.current_thread = ctx.master.replay_request(f)
73 ctx.master.addons.trigger("update", [f])
74 if current_is_done and not will_start_new:
75 ctx.master.addons.trigger("processing_complete")
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/clientplayback.py b/mitmproxy/addons/clientplayback.py
--- a/mitmproxy/addons/clientplayback.py
+++ b/mitmproxy/addons/clientplayback.py
@@ -35,6 +35,9 @@
"""
Replay requests from flows.
"""
+ for f in flows:
+ if f.live:
+ raise exceptions.CommandError("Can't replay live flow.")
self.flows = list(flows)
ctx.log.alert("Replaying %s flows." % len(self.flows))
ctx.master.addons.trigger("update", [])
| {"golden_diff": "diff --git a/mitmproxy/addons/clientplayback.py b/mitmproxy/addons/clientplayback.py\n--- a/mitmproxy/addons/clientplayback.py\n+++ b/mitmproxy/addons/clientplayback.py\n@@ -35,6 +35,9 @@\n \"\"\"\n Replay requests from flows.\n \"\"\"\n+ for f in flows:\n+ if f.live:\n+ raise exceptions.CommandError(\"Can't replay live flow.\")\n self.flows = list(flows)\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n ctx.master.addons.trigger(\"update\", [])\n", "issue": "Traceback appears in Status Bar, when trying to replay live flow\n##### Steps to reproduce the problem:\r\n\r\n1. Run **pathod** : `pathod -a \"/=200:p0,10\"`\r\n2. Run mitmproxy.\r\n3. Send _get request_ to pathod through mitmproxy using **pathoc**: \r\n`pathoc -c localhost:9999 localhost:8080 'get:/'`\r\n4. Try to replay the corresponding live flow in mitmproxy by pressing `r`.\r\n\r\nI am seeing:\r\n\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\nThis issue is relevant for the situations, when server didn't have time to send a response yet, but a user tries to replay the corresponding flow.\r\nI also faced this issue, when trying to replay `mitm.it` flow from onboardingapp.\r\n\r\n\r\n##### System information\r\n\r\nMitmproxy: 3.0.0.dev1101 (commit d9d4d15) binary\r\nPython: 3.5.2\r\nOpenSSL: OpenSSL 1.1.0g 2 Nov 2017\r\nPlatform: Linux-4.4.0-104-generic-x86_64-with-debian-stretch-sid\r\n\r\n \n", "before_files": [{"content": "from mitmproxy import exceptions\nfrom mitmproxy import ctx\nfrom mitmproxy import io\nfrom mitmproxy import flow\nfrom mitmproxy import command\nimport mitmproxy.types\n\nimport typing\n\n\nclass ClientPlayback:\n def __init__(self):\n self.flows = [] # type: typing.List[flow.Flow]\n self.current_thread = None\n self.configured = False\n\n def count(self) -> int:\n if self.current_thread:\n current = 1\n else:\n current = 0\n return current + len(self.flows)\n\n @command.command(\"replay.client.stop\")\n def stop_replay(self) -> None:\n \"\"\"\n Stop client replay.\n \"\"\"\n self.flows = []\n ctx.log.alert(\"Client replay stopped.\")\n ctx.master.addons.trigger(\"update\", [])\n\n @command.command(\"replay.client\")\n def start_replay(self, flows: typing.Sequence[flow.Flow]) -> None:\n \"\"\"\n Replay requests from flows.\n \"\"\"\n self.flows = list(flows)\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n ctx.master.addons.trigger(\"update\", [])\n\n @command.command(\"replay.client.file\")\n def load_file(self, path: mitmproxy.types.Path) -> None:\n try:\n flows = io.read_flows_from_paths([path])\n except exceptions.FlowReadException as e:\n raise exceptions.CommandError(str(e))\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n self.flows = flows\n ctx.master.addons.trigger(\"update\", [])\n\n def configure(self, updated):\n if not self.configured and ctx.options.client_replay:\n self.configured = True\n ctx.log.info(\"Client Replay: {}\".format(ctx.options.client_replay))\n try:\n flows = io.read_flows_from_paths(ctx.options.client_replay)\n except exceptions.FlowReadException as e:\n raise exceptions.OptionsError(str(e))\n self.start_replay(flows)\n\n def tick(self):\n current_is_done = self.current_thread and not self.current_thread.is_alive()\n can_start_new = not self.current_thread or current_is_done\n will_start_new = can_start_new and self.flows\n\n if current_is_done:\n self.current_thread = None\n ctx.master.addons.trigger(\"update\", [])\n if will_start_new:\n f = self.flows.pop(0)\n self.current_thread = ctx.master.replay_request(f)\n ctx.master.addons.trigger(\"update\", [f])\n if current_is_done and not will_start_new:\n ctx.master.addons.trigger(\"processing_complete\")\n", "path": "mitmproxy/addons/clientplayback.py"}], "after_files": [{"content": "from mitmproxy import exceptions\nfrom mitmproxy import ctx\nfrom mitmproxy import io\nfrom mitmproxy import flow\nfrom mitmproxy import command\nimport mitmproxy.types\n\nimport typing\n\n\nclass ClientPlayback:\n def __init__(self):\n self.flows = [] # type: typing.List[flow.Flow]\n self.current_thread = None\n self.configured = False\n\n def count(self) -> int:\n if self.current_thread:\n current = 1\n else:\n current = 0\n return current + len(self.flows)\n\n @command.command(\"replay.client.stop\")\n def stop_replay(self) -> None:\n \"\"\"\n Stop client replay.\n \"\"\"\n self.flows = []\n ctx.log.alert(\"Client replay stopped.\")\n ctx.master.addons.trigger(\"update\", [])\n\n @command.command(\"replay.client\")\n def start_replay(self, flows: typing.Sequence[flow.Flow]) -> None:\n \"\"\"\n Replay requests from flows.\n \"\"\"\n for f in flows:\n if f.live:\n raise exceptions.CommandError(\"Can't replay live flow.\")\n self.flows = list(flows)\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n ctx.master.addons.trigger(\"update\", [])\n\n @command.command(\"replay.client.file\")\n def load_file(self, path: mitmproxy.types.Path) -> None:\n try:\n flows = io.read_flows_from_paths([path])\n except exceptions.FlowReadException as e:\n raise exceptions.CommandError(str(e))\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n self.flows = flows\n ctx.master.addons.trigger(\"update\", [])\n\n def configure(self, updated):\n if not self.configured and ctx.options.client_replay:\n self.configured = True\n ctx.log.info(\"Client Replay: {}\".format(ctx.options.client_replay))\n try:\n flows = io.read_flows_from_paths(ctx.options.client_replay)\n except exceptions.FlowReadException as e:\n raise exceptions.OptionsError(str(e))\n self.start_replay(flows)\n\n def tick(self):\n current_is_done = self.current_thread and not self.current_thread.is_alive()\n can_start_new = not self.current_thread or current_is_done\n will_start_new = can_start_new and self.flows\n\n if current_is_done:\n self.current_thread = None\n ctx.master.addons.trigger(\"update\", [])\n if will_start_new:\n f = self.flows.pop(0)\n self.current_thread = ctx.master.replay_request(f)\n ctx.master.addons.trigger(\"update\", [f])\n if current_is_done and not will_start_new:\n ctx.master.addons.trigger(\"processing_complete\")\n", "path": "mitmproxy/addons/clientplayback.py"}]} | 1,333 | 132 |
gh_patches_debug_16032 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-976 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not recognizing '*' in a domain name in Route53 record set.
cfn-lint version: 0.20.2 (latest version at time of writing)
Description of issue:
Is not recognizing valid Route53 use of * in a recordset. The record set deploys and works correctly. Sample code seen below.
```
Resources:
startest30prdnuskinioRoute53pubRecordSet:
Type: AWS::Route53::RecordSetGroup
Properties:
Comment: '*.test30 prod'
HostedZoneId: !Ref PrdNuskinIoPublicZone
RecordSets:
- Name: '*.test30.prd.nuskin.io.'
SetIdentifier: 'usw2'
ResourceRecords:
- '*.test30.prd.usw2.nuskin.io.' <------ gives "does not contain a valid domain name" error
TTL: '300'
Type: CNAME
Weight: 100
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/route53/RecordSet.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import re
18 from cfnlint import CloudFormationLintRule
19 from cfnlint import RuleMatch
20 from cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC
21
22
23 class RecordSet(CloudFormationLintRule):
24 """Check Route53 Recordset Configuration"""
25 id = 'E3020'
26 shortdesc = 'Validate Route53 RecordSets'
27 description = 'Check if all RecordSets are correctly configured'
28 source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'
29 tags = ['resources', 'route53', 'record_set']
30
31 REGEX_DOMAINNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])(.)$')
32 REGEX_TXT = re.compile(r'^("[^"]{1,255}" *)*"[^"]{1,255}"$')
33 REGEX_CNAME_VALIDATIONS = re.compile(r'^.*\.acm-validations\.aws\.?$')
34
35 def count_c_names(self, records, path, cfn):
36 """ Count C Names """
37 matches = []
38
39 scenarios = cfn.get_object_without_nested_conditions(records, path)
40 for scenario in scenarios:
41 if len(scenario.get('Object')) > 1:
42 scenario = scenario.get('Scenario')
43 message = 'A CNAME recordset can only contain 1 value'
44 if scenario is None:
45 message = 'A CNAME recordset can only contain 1 value'
46 matches.append(
47 RuleMatch(path, message.format('/'.join(map(str, message)))))
48 else:
49 message = 'A CNAME recordset can only contain 1 value {0} at {1}'
50 scenario_text = ' and '.join(['when condition "%s" is %s' % (k, v) for (k, v) in scenario.items()])
51 matches.append(
52 RuleMatch(path, message.format(scenario_text, '/'.join(map(str, path)))))
53
54 return matches
55
56 def check_a_record(self, value, path):
57 """Check A record Configuration"""
58 matches = []
59
60 # Check if a valid IPv4 address is specified
61 if not re.match(REGEX_IPV4, value):
62 message = 'A record ({}) is not a valid IPv4 address'
63 matches.append(RuleMatch(path, message.format(value)))
64
65 return matches
66
67 def check_aaaa_record(self, value, path):
68 """Check AAAA record Configuration"""
69 matches = []
70
71 if not isinstance(value, dict):
72 # Check if a valid IPv4 address is specified
73 if not re.match(REGEX_IPV6, value):
74 message = 'AAAA record ({}) is not a valid IPv6 address'
75 matches.append(RuleMatch(path, message.format(value)))
76
77 return matches
78
79 def check_caa_record(self, value, path):
80 """Check CAA record Configuration"""
81 matches = []
82
83 if not isinstance(value, dict):
84 # Split the record up to the mandatory settings (flags tag "value")
85 items = value.split(' ', 2)
86 # Check if the 3 settings are given.
87 if len(items) != 3:
88 message = 'CAA record must contain 3 settings (flags tag "value"), record contains {} settings.'
89 matches.append(RuleMatch(path, message.format(len(items))))
90 else:
91 # Check the flag value
92 if not items[0].isdigit():
93 message = 'CAA record flag setting ({}) should be of type Integer.'
94 matches.append(RuleMatch(path, message.format(items[0])))
95 else:
96 if int(items[0]) not in [0, 128]:
97 message = 'Invalid CAA record flag setting ({}) given, must be 0 or 128.'
98 matches.append(RuleMatch(path, message.format(items[0])))
99
100 # Check the tag value
101 if not re.match(REGEX_ALPHANUMERIC, items[1]):
102 message = 'Invalid CAA record tag setting {}. Value has to be alphanumeric.'
103 matches.append(RuleMatch(path, message.format(items[1])))
104
105 # Check the value
106 if not items[2].startswith('"') or not items[2].endswith('"'):
107 message = 'CAA record value setting has to be enclosed in double quotation marks (").'
108 matches.append(RuleMatch(path, message))
109
110 return matches
111
112 def check_cname_record(self, value, path):
113 """Check CNAME record Configuration"""
114 matches = []
115
116 if not isinstance(value, dict):
117 if (not re.match(self.REGEX_DOMAINNAME, value) and
118 not re.match(self.REGEX_CNAME_VALIDATIONS, value)):
119 # ACM Route 53 validation uses invalid CNAMEs starting with `_`,
120 # special-case them rather than complicate the regex.
121 message = 'CNAME record ({}) does not contain a valid domain name'
122 matches.append(RuleMatch(path, message.format(value)))
123
124 return matches
125
126 def check_mx_record(self, value, path):
127 """Check MX record Configuration"""
128 matches = []
129
130 if not isinstance(value, dict):
131 # Split the record up to the mandatory settings (priority domainname)
132 items = value.split(' ')
133
134 # Check if the 3 settings are given.
135 if len(items) != 2:
136 message = 'MX record must contain 2 settings (priority domainname), record contains {} settings.'
137 matches.append(RuleMatch(path, message.format(len(items), value)))
138 else:
139 # Check the priority value
140 if not items[0].isdigit():
141 message = 'MX record priority setting ({}) should be of type Integer.'
142 matches.append(RuleMatch(path, message.format(items[0], value)))
143 else:
144 if not 0 <= int(items[0]) <= 65535:
145 message = 'Invalid MX record priority setting ({}) given, must be between 0 and 65535.'
146 matches.append(RuleMatch(path, message.format(items[0], value)))
147
148 # Check the domainname value
149 if not re.match(self.REGEX_DOMAINNAME, items[1]):
150 matches.append(RuleMatch(path, message.format(items[1])))
151
152 return matches
153
154 def check_ns_record(self, value, path):
155 """Check NS record Configuration"""
156 matches = []
157
158 if not isinstance(value, dict):
159 if not re.match(self.REGEX_DOMAINNAME, value):
160 message = 'NS record ({}) does not contain a valid domain name'
161 matches.append(RuleMatch(path, message.format(value)))
162
163 return matches
164
165 def check_ptr_record(self, value, path):
166 """Check PTR record Configuration"""
167 matches = []
168
169 if not isinstance(value, dict):
170 if not re.match(self.REGEX_DOMAINNAME, value):
171 message = 'PTR record ({}) does not contain a valid domain name'
172 matches.append(RuleMatch(path, message.format(value)))
173 return matches
174
175 def check_txt_record(self, value, path):
176 """Check TXT record Configuration"""
177 matches = []
178
179 if not isinstance(value, dict) and not re.match(self.REGEX_TXT, value):
180 message = 'TXT record is not structured as one or more items up to 255 characters ' \
181 'enclosed in double quotation marks at {0}'
182 matches.append(RuleMatch(
183 path,
184 (
185 message.format('/'.join(map(str, path)))
186 ),
187 ))
188
189 return matches
190
191 def check_recordset(self, path, recordset, cfn):
192 """Check record configuration"""
193
194 matches = []
195 recordset_type = recordset.get('Type')
196
197 # Skip Intrinsic functions
198 if not isinstance(recordset_type, dict):
199 if not recordset.get('AliasTarget'):
200 # If no Alias is specified, ResourceRecords has to be specified
201 if not recordset.get('ResourceRecords'):
202 return matches
203 # Record type specific checks
204 if recordset_type == 'A':
205 matches.extend(
206 cfn.check_value(
207 recordset, 'ResourceRecords', path[:],
208 check_value=self.check_a_record,
209 )
210 )
211 elif recordset_type == 'AAAA':
212 matches.extend(
213 cfn.check_value(
214 recordset, 'ResourceRecords', path[:],
215 check_value=self.check_aaaa_record,
216 )
217 )
218 elif recordset_type == 'CAA':
219 matches.extend(
220 cfn.check_value(
221 recordset, 'ResourceRecords', path[:],
222 check_value=self.check_caa_record,
223 )
224 )
225 elif recordset_type == 'CNAME':
226 matches.extend(
227 self.count_c_names(
228 recordset.get('ResourceRecords'), path[:] + ['ResourceRecords'], cfn
229 )
230 )
231 matches.extend(
232 cfn.check_value(
233 recordset, 'ResourceRecords', path[:],
234 check_value=self.check_cname_record,
235 )
236 )
237 elif recordset_type == 'MX':
238 matches.extend(
239 cfn.check_value(
240 recordset, 'ResourceRecords', path[:],
241 check_value=self.check_mx_record,
242 )
243 )
244 elif recordset_type == 'NS':
245 matches.extend(
246 cfn.check_value(
247 recordset, 'ResourceRecords', path[:],
248 check_value=self.check_ns_record,
249 )
250 )
251 elif recordset_type == 'PTR':
252 matches.extend(
253 cfn.check_value(
254 recordset, 'ResourceRecords', path[:],
255 check_value=self.check_ptr_record,
256 )
257 )
258 elif recordset_type == 'TXT':
259 matches.extend(
260 cfn.check_value(
261 recordset, 'ResourceRecords', path[:],
262 check_value=self.check_txt_record,
263 )
264 )
265
266 return matches
267
268 def match(self, cfn):
269 """Check RecordSets and RecordSetGroups Properties"""
270
271 matches = []
272
273 recordsets = cfn.get_resources(['AWS::Route53::RecordSet'])
274
275 for name, recordset in recordsets.items():
276 path = ['Resources', name, 'Properties']
277
278 if isinstance(recordset, dict):
279 props = recordset.get('Properties')
280 if props:
281 matches.extend(self.check_recordset(path, props, cfn))
282
283 recordsetgroups = cfn.get_resource_properties(['AWS::Route53::RecordSetGroup', 'RecordSets'])
284
285 for recordsetgroup in recordsetgroups:
286 path = recordsetgroup['Path']
287 value = recordsetgroup['Value']
288 if isinstance(value, list):
289 for index, recordset in enumerate(value):
290 tree = path[:] + [index]
291 matches.extend(self.check_recordset(tree, recordset, cfn))
292
293 return matches
294
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/route53/RecordSet.py b/src/cfnlint/rules/resources/route53/RecordSet.py
--- a/src/cfnlint/rules/resources/route53/RecordSet.py
+++ b/src/cfnlint/rules/resources/route53/RecordSet.py
@@ -28,7 +28,8 @@
source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'
tags = ['resources', 'route53', 'record_set']
- REGEX_DOMAINNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])(.)$')
+ # Regex generated from https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html
+ REGEX_DOMAINNAME = re.compile(r'^[a-zA-Z0-9\!\"\#\$\%\&\'\(\)\*\+\,-\/\:\;\<\=\>\?\@\[\\\]\^\_\`\{\|\}\~\.]+$')
REGEX_TXT = re.compile(r'^("[^"]{1,255}" *)*"[^"]{1,255}"$')
REGEX_CNAME_VALIDATIONS = re.compile(r'^.*\.acm-validations\.aws\.?$')
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/route53/RecordSet.py b/src/cfnlint/rules/resources/route53/RecordSet.py\n--- a/src/cfnlint/rules/resources/route53/RecordSet.py\n+++ b/src/cfnlint/rules/resources/route53/RecordSet.py\n@@ -28,7 +28,8 @@\n source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'\n tags = ['resources', 'route53', 'record_set']\n \n- REGEX_DOMAINNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])(.)$')\n+ # Regex generated from https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html\n+ REGEX_DOMAINNAME = re.compile(r'^[a-zA-Z0-9\\!\\\"\\#\\$\\%\\&\\'\\(\\)\\*\\+\\,-\\/\\:\\;\\<\\=\\>\\?\\@\\[\\\\\\]\\^\\_\\`\\{\\|\\}\\~\\.]+$')\n REGEX_TXT = re.compile(r'^(\"[^\"]{1,255}\" *)*\"[^\"]{1,255}\"$')\n REGEX_CNAME_VALIDATIONS = re.compile(r'^.*\\.acm-validations\\.aws\\.?$')\n", "issue": "Not recognizing '*' in a domain name in Route53 record set.\ncfn-lint version: 0.20.2 (latest version at time of writing)\r\n\r\nDescription of issue:\r\nIs not recognizing valid Route53 use of * in a recordset. The record set deploys and works correctly. Sample code seen below.\r\n\r\n```\r\nResources:\r\n startest30prdnuskinioRoute53pubRecordSet:\r\n Type: AWS::Route53::RecordSetGroup\r\n Properties: \r\n Comment: '*.test30 prod'\r\n HostedZoneId: !Ref PrdNuskinIoPublicZone \r\n RecordSets:\r\n - Name: '*.test30.prd.nuskin.io.'\r\n SetIdentifier: 'usw2'\r\n ResourceRecords:\r\n - '*.test30.prd.usw2.nuskin.io.' <------ gives \"does not contain a valid domain name\" error\r\n TTL: '300'\r\n Type: CNAME\r\n Weight: 100\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nfrom cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC\n\n\nclass RecordSet(CloudFormationLintRule):\n \"\"\"Check Route53 Recordset Configuration\"\"\"\n id = 'E3020'\n shortdesc = 'Validate Route53 RecordSets'\n description = 'Check if all RecordSets are correctly configured'\n source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'\n tags = ['resources', 'route53', 'record_set']\n\n REGEX_DOMAINNAME = re.compile(r'^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\\-]*[a-zA-Z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\\-]*[A-Za-z0-9])(.)$')\n REGEX_TXT = re.compile(r'^(\"[^\"]{1,255}\" *)*\"[^\"]{1,255}\"$')\n REGEX_CNAME_VALIDATIONS = re.compile(r'^.*\\.acm-validations\\.aws\\.?$')\n\n def count_c_names(self, records, path, cfn):\n \"\"\" Count C Names \"\"\"\n matches = []\n\n scenarios = cfn.get_object_without_nested_conditions(records, path)\n for scenario in scenarios:\n if len(scenario.get('Object')) > 1:\n scenario = scenario.get('Scenario')\n message = 'A CNAME recordset can only contain 1 value'\n if scenario is None:\n message = 'A CNAME recordset can only contain 1 value'\n matches.append(\n RuleMatch(path, message.format('/'.join(map(str, message)))))\n else:\n message = 'A CNAME recordset can only contain 1 value {0} at {1}'\n scenario_text = ' and '.join(['when condition \"%s\" is %s' % (k, v) for (k, v) in scenario.items()])\n matches.append(\n RuleMatch(path, message.format(scenario_text, '/'.join(map(str, path)))))\n\n return matches\n\n def check_a_record(self, value, path):\n \"\"\"Check A record Configuration\"\"\"\n matches = []\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV4, value):\n message = 'A record ({}) is not a valid IPv4 address'\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def check_aaaa_record(self, value, path):\n \"\"\"Check AAAA record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV6, value):\n message = 'AAAA record ({}) is not a valid IPv6 address'\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def check_caa_record(self, value, path):\n \"\"\"Check CAA record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n # Split the record up to the mandatory settings (flags tag \"value\")\n items = value.split(' ', 2)\n # Check if the 3 settings are given.\n if len(items) != 3:\n message = 'CAA record must contain 3 settings (flags tag \"value\"), record contains {} settings.'\n matches.append(RuleMatch(path, message.format(len(items))))\n else:\n # Check the flag value\n if not items[0].isdigit():\n message = 'CAA record flag setting ({}) should be of type Integer.'\n matches.append(RuleMatch(path, message.format(items[0])))\n else:\n if int(items[0]) not in [0, 128]:\n message = 'Invalid CAA record flag setting ({}) given, must be 0 or 128.'\n matches.append(RuleMatch(path, message.format(items[0])))\n\n # Check the tag value\n if not re.match(REGEX_ALPHANUMERIC, items[1]):\n message = 'Invalid CAA record tag setting {}. Value has to be alphanumeric.'\n matches.append(RuleMatch(path, message.format(items[1])))\n\n # Check the value\n if not items[2].startswith('\"') or not items[2].endswith('\"'):\n message = 'CAA record value setting has to be enclosed in double quotation marks (\").'\n matches.append(RuleMatch(path, message))\n\n return matches\n\n def check_cname_record(self, value, path):\n \"\"\"Check CNAME record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n if (not re.match(self.REGEX_DOMAINNAME, value) and\n not re.match(self.REGEX_CNAME_VALIDATIONS, value)):\n # ACM Route 53 validation uses invalid CNAMEs starting with `_`,\n # special-case them rather than complicate the regex.\n message = 'CNAME record ({}) does not contain a valid domain name'\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def check_mx_record(self, value, path):\n \"\"\"Check MX record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n # Split the record up to the mandatory settings (priority domainname)\n items = value.split(' ')\n\n # Check if the 3 settings are given.\n if len(items) != 2:\n message = 'MX record must contain 2 settings (priority domainname), record contains {} settings.'\n matches.append(RuleMatch(path, message.format(len(items), value)))\n else:\n # Check the priority value\n if not items[0].isdigit():\n message = 'MX record priority setting ({}) should be of type Integer.'\n matches.append(RuleMatch(path, message.format(items[0], value)))\n else:\n if not 0 <= int(items[0]) <= 65535:\n message = 'Invalid MX record priority setting ({}) given, must be between 0 and 65535.'\n matches.append(RuleMatch(path, message.format(items[0], value)))\n\n # Check the domainname value\n if not re.match(self.REGEX_DOMAINNAME, items[1]):\n matches.append(RuleMatch(path, message.format(items[1])))\n\n return matches\n\n def check_ns_record(self, value, path):\n \"\"\"Check NS record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n if not re.match(self.REGEX_DOMAINNAME, value):\n message = 'NS record ({}) does not contain a valid domain name'\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def check_ptr_record(self, value, path):\n \"\"\"Check PTR record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n if not re.match(self.REGEX_DOMAINNAME, value):\n message = 'PTR record ({}) does not contain a valid domain name'\n matches.append(RuleMatch(path, message.format(value)))\n return matches\n\n def check_txt_record(self, value, path):\n \"\"\"Check TXT record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict) and not re.match(self.REGEX_TXT, value):\n message = 'TXT record is not structured as one or more items up to 255 characters ' \\\n 'enclosed in double quotation marks at {0}'\n matches.append(RuleMatch(\n path,\n (\n message.format('/'.join(map(str, path)))\n ),\n ))\n\n return matches\n\n def check_recordset(self, path, recordset, cfn):\n \"\"\"Check record configuration\"\"\"\n\n matches = []\n recordset_type = recordset.get('Type')\n\n # Skip Intrinsic functions\n if not isinstance(recordset_type, dict):\n if not recordset.get('AliasTarget'):\n # If no Alias is specified, ResourceRecords has to be specified\n if not recordset.get('ResourceRecords'):\n return matches\n # Record type specific checks\n if recordset_type == 'A':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_a_record,\n )\n )\n elif recordset_type == 'AAAA':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_aaaa_record,\n )\n )\n elif recordset_type == 'CAA':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_caa_record,\n )\n )\n elif recordset_type == 'CNAME':\n matches.extend(\n self.count_c_names(\n recordset.get('ResourceRecords'), path[:] + ['ResourceRecords'], cfn\n )\n )\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_cname_record,\n )\n )\n elif recordset_type == 'MX':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_mx_record,\n )\n )\n elif recordset_type == 'NS':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_ns_record,\n )\n )\n elif recordset_type == 'PTR':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_ptr_record,\n )\n )\n elif recordset_type == 'TXT':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_txt_record,\n )\n )\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check RecordSets and RecordSetGroups Properties\"\"\"\n\n matches = []\n\n recordsets = cfn.get_resources(['AWS::Route53::RecordSet'])\n\n for name, recordset in recordsets.items():\n path = ['Resources', name, 'Properties']\n\n if isinstance(recordset, dict):\n props = recordset.get('Properties')\n if props:\n matches.extend(self.check_recordset(path, props, cfn))\n\n recordsetgroups = cfn.get_resource_properties(['AWS::Route53::RecordSetGroup', 'RecordSets'])\n\n for recordsetgroup in recordsetgroups:\n path = recordsetgroup['Path']\n value = recordsetgroup['Value']\n if isinstance(value, list):\n for index, recordset in enumerate(value):\n tree = path[:] + [index]\n matches.extend(self.check_recordset(tree, recordset, cfn))\n\n return matches\n", "path": "src/cfnlint/rules/resources/route53/RecordSet.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nfrom cfnlint.helpers import REGEX_IPV4, REGEX_IPV6, REGEX_ALPHANUMERIC\n\n\nclass RecordSet(CloudFormationLintRule):\n \"\"\"Check Route53 Recordset Configuration\"\"\"\n id = 'E3020'\n shortdesc = 'Validate Route53 RecordSets'\n description = 'Check if all RecordSets are correctly configured'\n source_url = 'https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html'\n tags = ['resources', 'route53', 'record_set']\n\n # Regex generated from https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html\n REGEX_DOMAINNAME = re.compile(r'^[a-zA-Z0-9\\!\\\"\\#\\$\\%\\&\\'\\(\\)\\*\\+\\,-\\/\\:\\;\\<\\=\\>\\?\\@\\[\\\\\\]\\^\\_\\`\\{\\|\\}\\~\\.]+$')\n REGEX_TXT = re.compile(r'^(\"[^\"]{1,255}\" *)*\"[^\"]{1,255}\"$')\n REGEX_CNAME_VALIDATIONS = re.compile(r'^.*\\.acm-validations\\.aws\\.?$')\n\n def count_c_names(self, records, path, cfn):\n \"\"\" Count C Names \"\"\"\n matches = []\n\n scenarios = cfn.get_object_without_nested_conditions(records, path)\n for scenario in scenarios:\n if len(scenario.get('Object')) > 1:\n scenario = scenario.get('Scenario')\n message = 'A CNAME recordset can only contain 1 value'\n if scenario is None:\n message = 'A CNAME recordset can only contain 1 value'\n matches.append(\n RuleMatch(path, message.format('/'.join(map(str, message)))))\n else:\n message = 'A CNAME recordset can only contain 1 value {0} at {1}'\n scenario_text = ' and '.join(['when condition \"%s\" is %s' % (k, v) for (k, v) in scenario.items()])\n matches.append(\n RuleMatch(path, message.format(scenario_text, '/'.join(map(str, path)))))\n\n return matches\n\n def check_a_record(self, value, path):\n \"\"\"Check A record Configuration\"\"\"\n matches = []\n\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV4, value):\n message = 'A record ({}) is not a valid IPv4 address'\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def check_aaaa_record(self, value, path):\n \"\"\"Check AAAA record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n # Check if a valid IPv4 address is specified\n if not re.match(REGEX_IPV6, value):\n message = 'AAAA record ({}) is not a valid IPv6 address'\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def check_caa_record(self, value, path):\n \"\"\"Check CAA record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n # Split the record up to the mandatory settings (flags tag \"value\")\n items = value.split(' ', 2)\n # Check if the 3 settings are given.\n if len(items) != 3:\n message = 'CAA record must contain 3 settings (flags tag \"value\"), record contains {} settings.'\n matches.append(RuleMatch(path, message.format(len(items))))\n else:\n # Check the flag value\n if not items[0].isdigit():\n message = 'CAA record flag setting ({}) should be of type Integer.'\n matches.append(RuleMatch(path, message.format(items[0])))\n else:\n if int(items[0]) not in [0, 128]:\n message = 'Invalid CAA record flag setting ({}) given, must be 0 or 128.'\n matches.append(RuleMatch(path, message.format(items[0])))\n\n # Check the tag value\n if not re.match(REGEX_ALPHANUMERIC, items[1]):\n message = 'Invalid CAA record tag setting {}. Value has to be alphanumeric.'\n matches.append(RuleMatch(path, message.format(items[1])))\n\n # Check the value\n if not items[2].startswith('\"') or not items[2].endswith('\"'):\n message = 'CAA record value setting has to be enclosed in double quotation marks (\").'\n matches.append(RuleMatch(path, message))\n\n return matches\n\n def check_cname_record(self, value, path):\n \"\"\"Check CNAME record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n if (not re.match(self.REGEX_DOMAINNAME, value) and\n not re.match(self.REGEX_CNAME_VALIDATIONS, value)):\n # ACM Route 53 validation uses invalid CNAMEs starting with `_`,\n # special-case them rather than complicate the regex.\n message = 'CNAME record ({}) does not contain a valid domain name'\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def check_mx_record(self, value, path):\n \"\"\"Check MX record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n # Split the record up to the mandatory settings (priority domainname)\n items = value.split(' ')\n\n # Check if the 3 settings are given.\n if len(items) != 2:\n message = 'MX record must contain 2 settings (priority domainname), record contains {} settings.'\n matches.append(RuleMatch(path, message.format(len(items), value)))\n else:\n # Check the priority value\n if not items[0].isdigit():\n message = 'MX record priority setting ({}) should be of type Integer.'\n matches.append(RuleMatch(path, message.format(items[0], value)))\n else:\n if not 0 <= int(items[0]) <= 65535:\n message = 'Invalid MX record priority setting ({}) given, must be between 0 and 65535.'\n matches.append(RuleMatch(path, message.format(items[0], value)))\n\n # Check the domainname value\n if not re.match(self.REGEX_DOMAINNAME, items[1]):\n matches.append(RuleMatch(path, message.format(items[1])))\n\n return matches\n\n def check_ns_record(self, value, path):\n \"\"\"Check NS record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n if not re.match(self.REGEX_DOMAINNAME, value):\n message = 'NS record ({}) does not contain a valid domain name'\n matches.append(RuleMatch(path, message.format(value)))\n\n return matches\n\n def check_ptr_record(self, value, path):\n \"\"\"Check PTR record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict):\n if not re.match(self.REGEX_DOMAINNAME, value):\n message = 'PTR record ({}) does not contain a valid domain name'\n matches.append(RuleMatch(path, message.format(value)))\n return matches\n\n def check_txt_record(self, value, path):\n \"\"\"Check TXT record Configuration\"\"\"\n matches = []\n\n if not isinstance(value, dict) and not re.match(self.REGEX_TXT, value):\n message = 'TXT record is not structured as one or more items up to 255 characters ' \\\n 'enclosed in double quotation marks at {0}'\n matches.append(RuleMatch(\n path,\n (\n message.format('/'.join(map(str, path)))\n ),\n ))\n\n return matches\n\n def check_recordset(self, path, recordset, cfn):\n \"\"\"Check record configuration\"\"\"\n\n matches = []\n recordset_type = recordset.get('Type')\n\n # Skip Intrinsic functions\n if not isinstance(recordset_type, dict):\n if not recordset.get('AliasTarget'):\n # If no Alias is specified, ResourceRecords has to be specified\n if not recordset.get('ResourceRecords'):\n return matches\n # Record type specific checks\n if recordset_type == 'A':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_a_record,\n )\n )\n elif recordset_type == 'AAAA':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_aaaa_record,\n )\n )\n elif recordset_type == 'CAA':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_caa_record,\n )\n )\n elif recordset_type == 'CNAME':\n matches.extend(\n self.count_c_names(\n recordset.get('ResourceRecords'), path[:] + ['ResourceRecords'], cfn\n )\n )\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_cname_record,\n )\n )\n elif recordset_type == 'MX':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_mx_record,\n )\n )\n elif recordset_type == 'NS':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_ns_record,\n )\n )\n elif recordset_type == 'PTR':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_ptr_record,\n )\n )\n elif recordset_type == 'TXT':\n matches.extend(\n cfn.check_value(\n recordset, 'ResourceRecords', path[:],\n check_value=self.check_txt_record,\n )\n )\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check RecordSets and RecordSetGroups Properties\"\"\"\n\n matches = []\n\n recordsets = cfn.get_resources(['AWS::Route53::RecordSet'])\n\n for name, recordset in recordsets.items():\n path = ['Resources', name, 'Properties']\n\n if isinstance(recordset, dict):\n props = recordset.get('Properties')\n if props:\n matches.extend(self.check_recordset(path, props, cfn))\n\n recordsetgroups = cfn.get_resource_properties(['AWS::Route53::RecordSetGroup', 'RecordSets'])\n\n for recordsetgroup in recordsetgroups:\n path = recordsetgroup['Path']\n value = recordsetgroup['Value']\n if isinstance(value, list):\n for index, recordset in enumerate(value):\n tree = path[:] + [index]\n matches.extend(self.check_recordset(tree, recordset, cfn))\n\n return matches\n", "path": "src/cfnlint/rules/resources/route53/RecordSet.py"}]} | 3,811 | 336 |
gh_patches_debug_20431 | rasdani/github-patches | git_diff | scikit-hep__awkward-2373 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`ak.unzip` touches all contents whilst typetracing
### Version of Awkward Array
HEAD, after #2370
### Description and code to reproduce
Even after applying #2370 (which fixes a similar bug), the following code:
```python
import json
import awkward as ak
def delta_r2(a, b):
return (a.eta - b.eta) ** 2 + (a.phi - b.phi) ** 2
fromjson = {
"class": "RecordArray",
"fields": ["muon", "jet"],
"contents": [
{
"class": "ListOffsetArray",
"offsets": "i64",
"content": {
"class": "RecordArray",
"fields": ["pt", "eta", "phi", "crossref"],
"contents": [
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "muon_pt!",
},
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "muon_eta!",
},
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "muon_phi!",
},
{
"class": "ListOffsetArray",
"offsets": "i64",
"content": {
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "muon_crossref_content!",
},
"parameters": {},
"form_key": "muon_crossref_index!",
},
],
"parameters": {},
"form_key": "muon_record!",
},
"parameters": {},
"form_key": "muon_list!",
},
{
"class": "ListOffsetArray",
"offsets": "i64",
"content": {
"class": "RecordArray",
"fields": ["pt", "eta", "phi", "crossref", "thing1"],
"contents": [
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "jet_pt!",
},
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "jet_eta!",
},
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "jet_phi!",
},
{
"class": "ListOffsetArray",
"offsets": "i64",
"content": {
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "jet_crossref_content!",
},
"parameters": {},
"form_key": "jet_crossref_index!",
},
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "jet_thing1!",
},
],
"parameters": {},
"form_key": "jet_record!",
},
"parameters": {},
"form_key": "jet_list!",
},
],
"parameters": {},
"form_key": "outer!",
}
form = ak.forms.from_json(json.dumps(fromjson))
ttlayout, report = ak._nplikes.typetracer.typetracer_with_report(form)
ttarray = ak.Array(ttlayout)
a, b = ak.unzip(ak.cartesian([ttarray.muon, ttarray.jet], axis=1, nested=True))
print("ab>>>", report.data_touched, "\n")
mval = delta_r2(a, b)
print("dr>>>>", report.data_touched, "\n")
mmin = ak.argmin(mval, axis=2)
print("mmin>>", report.data_touched, "\n")
ak.firsts(b[mmin], axis=2).pt
print("pt>>>>", report.data_touched, "\n")
```
produces the following output:
```
ab>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!']
dr>>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!']
mmin>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!']
pt>>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!', 'jet_pt!', 'jet_crossref_content!', 'jet_thing1!']
```
It's a little mysterious (but not a major pain point) that the "crossref indexes" are touched by the `ak.cartesian` step, which ought to treat the muons and jets as opaque objects. A little mysterious, and maybe related.
But the real problem is represented by the "jet thing" in the final output. The "jet thing" should never have been accessed by any of these operations: `delta_r2`, `ak.argmin`, the `b[mmin]` slice, or `ak.firsts`. It's also a stand-in for dozens of large fields, so it's a real pain point that needs to be addressed.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/awkward/operations/ak_unzip.py`
Content:
```
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2 __all__ = ("unzip",)
3 import awkward as ak
4 from awkward._behavior import behavior_of
5 from awkward._layout import wrap_layout
6 from awkward._nplikes.numpylike import NumpyMetadata
7
8 np = NumpyMetadata.instance()
9
10
11 def unzip(array, *, highlevel=True, behavior=None):
12 """
13 Args:
14 array: Array-like data (anything #ak.to_layout recognizes).
15 highlevel (bool): If True, return an #ak.Array; otherwise, return
16 a low-level #ak.contents.Content subclass.
17 behavior (None or dict): Custom #ak.behavior for the output array, if
18 high-level.
19
20 If the `array` contains tuples or records, this operation splits them
21 into a Python tuple of arrays, one for each field.
22
23 If the `array` does not contain tuples or records, the single `array`
24 is placed in a length 1 Python tuple.
25
26 For example,
27
28 >>> array = ak.Array([{"x": 1.1, "y": [1]},
29 ... {"x": 2.2, "y": [2, 2]},
30 ... {"x": 3.3, "y": [3, 3, 3]}])
31 >>> x, y = ak.unzip(array)
32 >>> x
33 <Array [1.1, 2.2, 3.3] type='3 * float64'>
34 >>> y
35 <Array [[1], [2, 2], [3, 3, 3]] type='3 * var * int64'>
36 """
37 with ak._errors.OperationErrorContext(
38 "ak.unzip",
39 {"array": array, "highlevel": highlevel, "behavior": behavior},
40 ):
41 return _impl(array, highlevel, behavior)
42
43
44 def _impl(array, highlevel, behavior):
45 behavior = behavior_of(array, behavior=behavior)
46 layout = ak.operations.to_layout(array, allow_record=True, allow_other=False)
47 fields = ak.operations.fields(layout)
48
49 def check_for_union(layout, **kwargs):
50 if isinstance(layout, (ak.contents.RecordArray, ak.Record)):
51 pass # don't descend into nested records
52
53 elif isinstance(layout, ak.contents.UnionArray):
54 for content in layout.contents:
55 if set(ak.operations.fields(content)) != set(fields):
56 raise ak._errors.wrap_error(
57 ValueError("union of different sets of fields, cannot ak.unzip")
58 )
59
60 elif hasattr(layout, "content"):
61 check_for_union(layout.content)
62
63 ak._do.recursively_apply(layout, check_for_union, behavior, return_array=False)
64
65 if len(fields) == 0:
66 return (wrap_layout(layout, behavior, highlevel, allow_other=True),)
67 else:
68 return tuple(
69 wrap_layout(layout[n], behavior, highlevel, allow_other=True)
70 for n in fields
71 )
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/awkward/operations/ak_unzip.py b/src/awkward/operations/ak_unzip.py
--- a/src/awkward/operations/ak_unzip.py
+++ b/src/awkward/operations/ak_unzip.py
@@ -48,18 +48,15 @@
def check_for_union(layout, **kwargs):
if isinstance(layout, (ak.contents.RecordArray, ak.Record)):
- pass # don't descend into nested records
+ return layout # don't descend into nested records
- elif isinstance(layout, ak.contents.UnionArray):
+ elif layout.is_union:
for content in layout.contents:
if set(ak.operations.fields(content)) != set(fields):
raise ak._errors.wrap_error(
ValueError("union of different sets of fields, cannot ak.unzip")
)
- elif hasattr(layout, "content"):
- check_for_union(layout.content)
-
ak._do.recursively_apply(layout, check_for_union, behavior, return_array=False)
if len(fields) == 0:
| {"golden_diff": "diff --git a/src/awkward/operations/ak_unzip.py b/src/awkward/operations/ak_unzip.py\n--- a/src/awkward/operations/ak_unzip.py\n+++ b/src/awkward/operations/ak_unzip.py\n@@ -48,18 +48,15 @@\n \n def check_for_union(layout, **kwargs):\n if isinstance(layout, (ak.contents.RecordArray, ak.Record)):\n- pass # don't descend into nested records\n+ return layout # don't descend into nested records\n \n- elif isinstance(layout, ak.contents.UnionArray):\n+ elif layout.is_union:\n for content in layout.contents:\n if set(ak.operations.fields(content)) != set(fields):\n raise ak._errors.wrap_error(\n ValueError(\"union of different sets of fields, cannot ak.unzip\")\n )\n \n- elif hasattr(layout, \"content\"):\n- check_for_union(layout.content)\n-\n ak._do.recursively_apply(layout, check_for_union, behavior, return_array=False)\n \n if len(fields) == 0:\n", "issue": "`ak.unzip` touches all contents whilst typetracing\n### Version of Awkward Array\n\nHEAD, after #2370\n\n### Description and code to reproduce\n\nEven after applying #2370 (which fixes a similar bug), the following code:\r\n\r\n```python\r\nimport json\r\nimport awkward as ak\r\n\r\n\r\ndef delta_r2(a, b):\r\n return (a.eta - b.eta) ** 2 + (a.phi - b.phi) ** 2\r\n\r\n\r\nfromjson = {\r\n \"class\": \"RecordArray\",\r\n \"fields\": [\"muon\", \"jet\"],\r\n \"contents\": [\r\n {\r\n \"class\": \"ListOffsetArray\",\r\n \"offsets\": \"i64\",\r\n \"content\": {\r\n \"class\": \"RecordArray\",\r\n \"fields\": [\"pt\", \"eta\", \"phi\", \"crossref\"],\r\n \"contents\": [\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"muon_pt!\",\r\n },\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"muon_eta!\",\r\n },\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"muon_phi!\",\r\n },\r\n {\r\n \"class\": \"ListOffsetArray\",\r\n \"offsets\": \"i64\",\r\n \"content\": {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"muon_crossref_content!\",\r\n },\r\n \"parameters\": {},\r\n \"form_key\": \"muon_crossref_index!\",\r\n },\r\n ],\r\n \"parameters\": {},\r\n \"form_key\": \"muon_record!\",\r\n },\r\n \"parameters\": {},\r\n \"form_key\": \"muon_list!\",\r\n },\r\n {\r\n \"class\": \"ListOffsetArray\",\r\n \"offsets\": \"i64\",\r\n \"content\": {\r\n \"class\": \"RecordArray\",\r\n \"fields\": [\"pt\", \"eta\", \"phi\", \"crossref\", \"thing1\"],\r\n \"contents\": [\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_pt!\",\r\n },\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_eta!\",\r\n },\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_phi!\",\r\n },\r\n {\r\n \"class\": \"ListOffsetArray\",\r\n \"offsets\": \"i64\",\r\n \"content\": {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_crossref_content!\",\r\n },\r\n \"parameters\": {},\r\n \"form_key\": \"jet_crossref_index!\",\r\n },\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_thing1!\",\r\n },\r\n ],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_record!\",\r\n },\r\n \"parameters\": {},\r\n \"form_key\": \"jet_list!\",\r\n },\r\n ],\r\n \"parameters\": {},\r\n \"form_key\": \"outer!\",\r\n}\r\n\r\nform = ak.forms.from_json(json.dumps(fromjson))\r\n\r\nttlayout, report = ak._nplikes.typetracer.typetracer_with_report(form)\r\n\r\nttarray = ak.Array(ttlayout)\r\n\r\na, b = ak.unzip(ak.cartesian([ttarray.muon, ttarray.jet], axis=1, nested=True))\r\n\r\nprint(\"ab>>>\", report.data_touched, \"\\n\")\r\n\r\nmval = delta_r2(a, b)\r\n\r\nprint(\"dr>>>>\", report.data_touched, \"\\n\")\r\n\r\nmmin = ak.argmin(mval, axis=2)\r\n\r\nprint(\"mmin>>\", report.data_touched, \"\\n\")\r\n\r\nak.firsts(b[mmin], axis=2).pt\r\n\r\nprint(\"pt>>>>\", report.data_touched, \"\\n\")\r\n```\r\n\r\nproduces the following output:\r\n\r\n```\r\nab>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!'] \r\n\r\ndr>>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!'] \r\n\r\nmmin>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!'] \r\n\r\npt>>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!', 'jet_pt!', 'jet_crossref_content!', 'jet_thing1!'] \r\n```\r\n\r\nIt's a little mysterious (but not a major pain point) that the \"crossref indexes\" are touched by the `ak.cartesian` step, which ought to treat the muons and jets as opaque objects. A little mysterious, and maybe related.\r\n\r\nBut the real problem is represented by the \"jet thing\" in the final output. The \"jet thing\" should never have been accessed by any of these operations: `delta_r2`, `ak.argmin`, the `b[mmin]` slice, or `ak.firsts`. It's also a stand-in for dozens of large fields, so it's a real pain point that needs to be addressed.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n__all__ = (\"unzip\",)\nimport awkward as ak\nfrom awkward._behavior import behavior_of\nfrom awkward._layout import wrap_layout\nfrom awkward._nplikes.numpylike import NumpyMetadata\n\nnp = NumpyMetadata.instance()\n\n\ndef unzip(array, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n If the `array` contains tuples or records, this operation splits them\n into a Python tuple of arrays, one for each field.\n\n If the `array` does not contain tuples or records, the single `array`\n is placed in a length 1 Python tuple.\n\n For example,\n\n >>> array = ak.Array([{\"x\": 1.1, \"y\": [1]},\n ... {\"x\": 2.2, \"y\": [2, 2]},\n ... {\"x\": 3.3, \"y\": [3, 3, 3]}])\n >>> x, y = ak.unzip(array)\n >>> x\n <Array [1.1, 2.2, 3.3] type='3 * float64'>\n >>> y\n <Array [[1], [2, 2], [3, 3, 3]] type='3 * var * int64'>\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.unzip\",\n {\"array\": array, \"highlevel\": highlevel, \"behavior\": behavior},\n ):\n return _impl(array, highlevel, behavior)\n\n\ndef _impl(array, highlevel, behavior):\n behavior = behavior_of(array, behavior=behavior)\n layout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n fields = ak.operations.fields(layout)\n\n def check_for_union(layout, **kwargs):\n if isinstance(layout, (ak.contents.RecordArray, ak.Record)):\n pass # don't descend into nested records\n\n elif isinstance(layout, ak.contents.UnionArray):\n for content in layout.contents:\n if set(ak.operations.fields(content)) != set(fields):\n raise ak._errors.wrap_error(\n ValueError(\"union of different sets of fields, cannot ak.unzip\")\n )\n\n elif hasattr(layout, \"content\"):\n check_for_union(layout.content)\n\n ak._do.recursively_apply(layout, check_for_union, behavior, return_array=False)\n\n if len(fields) == 0:\n return (wrap_layout(layout, behavior, highlevel, allow_other=True),)\n else:\n return tuple(\n wrap_layout(layout[n], behavior, highlevel, allow_other=True)\n for n in fields\n )\n", "path": "src/awkward/operations/ak_unzip.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n__all__ = (\"unzip\",)\nimport awkward as ak\nfrom awkward._behavior import behavior_of\nfrom awkward._layout import wrap_layout\nfrom awkward._nplikes.numpylike import NumpyMetadata\n\nnp = NumpyMetadata.instance()\n\n\ndef unzip(array, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n If the `array` contains tuples or records, this operation splits them\n into a Python tuple of arrays, one for each field.\n\n If the `array` does not contain tuples or records, the single `array`\n is placed in a length 1 Python tuple.\n\n For example,\n\n >>> array = ak.Array([{\"x\": 1.1, \"y\": [1]},\n ... {\"x\": 2.2, \"y\": [2, 2]},\n ... {\"x\": 3.3, \"y\": [3, 3, 3]}])\n >>> x, y = ak.unzip(array)\n >>> x\n <Array [1.1, 2.2, 3.3] type='3 * float64'>\n >>> y\n <Array [[1], [2, 2], [3, 3, 3]] type='3 * var * int64'>\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.unzip\",\n {\"array\": array, \"highlevel\": highlevel, \"behavior\": behavior},\n ):\n return _impl(array, highlevel, behavior)\n\n\ndef _impl(array, highlevel, behavior):\n behavior = behavior_of(array, behavior=behavior)\n layout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n fields = ak.operations.fields(layout)\n\n def check_for_union(layout, **kwargs):\n if isinstance(layout, (ak.contents.RecordArray, ak.Record)):\n return layout # don't descend into nested records\n\n elif layout.is_union:\n for content in layout.contents:\n if set(ak.operations.fields(content)) != set(fields):\n raise ak._errors.wrap_error(\n ValueError(\"union of different sets of fields, cannot ak.unzip\")\n )\n\n ak._do.recursively_apply(layout, check_for_union, behavior, return_array=False)\n\n if len(fields) == 0:\n return (wrap_layout(layout, behavior, highlevel, allow_other=True),)\n else:\n return tuple(\n wrap_layout(layout[n], behavior, highlevel, allow_other=True)\n for n in fields\n )\n", "path": "src/awkward/operations/ak_unzip.py"}]} | 2,366 | 232 |
gh_patches_debug_13140 | rasdani/github-patches | git_diff | google__flax-2553 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DynamicScale goes to infinity
### System information
- OS Platform and Distribution: Linux 5.15.74-3-MANJARO
- Flax, jax, jaxlib versions: flax 0.6.0, jax 0.3.21, jaxlib 0.3.20+cuda11.cudnn82
- Python version: 3.10.4
- GPU/TPU model and memory: NVIDIA RTX 2060 Super 8GB
- CUDA version (if applicable): cuda11, cudnn82
### Problem you have encountered:
I adapted the gradient scaling code of the imagenet example in order to use fp16 training for my model. During training, the `scale` parameter of `DynamicScaling` became `inf`, because it kept doubling every 2000 steps without the gradients ever becoming `nan`. Once the `scale` hit `inf` it never went down, since dividing `inf` by any number still returns `inf`.
### What you expected to happen:
Either have a `maximum_scale` parameter or add a check to see if `scale` is finite.
I think it's a one line change in the `DynamicScale` code, so I can make a pull request if you agree.
### Steps to reproduce:
```py
import jax.numpy as jnp
from flax.training import dynamic_scale
ds = dynamic_scale.DynamicScale(growth_factor=10.0, growth_interval=1)
x = jnp.float32(1)
for _ in range(70):
ds, is_fin, aux, grad = ds.value_and_grad(lambda p: p**2)(x)
x = x - 0.1 * grad
print(is_fin, ds.scale, x, grad)
```
### Code:
Could check here ([dynamic scale code](https://github.com/google/flax/blob/6b80cbb239b07e370fd0af6655b39ef40de061c0/flax/training/dynamic_scale.py#L139)) if `fin_scale` is finite.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flax/training/dynamic_scale.py`
Content:
```
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Dynamic loss scaling for mixed precision gradients.
16 """
17
18 import functools
19 from typing import Any, Callable, NamedTuple, Optional, Sequence, Union
20
21 from .. import struct
22
23 import jax
24 from jax import lax
25 import jax.numpy as jnp
26
27
28
29 Array = Any
30
31
32 class DynamicScaleResult(NamedTuple):
33 dynamic_scale: 'DynamicScale'
34 finite: Array
35 aux: Any
36 grad: Any
37
38
39 class DynamicScale(struct.PyTreeNode):
40 """Dynamic loss scaling for mixed precision gradients.
41
42 For many models gradient computations in float16 will result in numerical
43 issues because small/large gradients being flushed to zero/infinity.
44 Dynamic loss scaling is an algorithm that aims to find the largest scalar
45 multiple for which the gradient does not overflow. This way the risk of
46 underflow is minimized.
47
48 the `value_and_grad` method mimicks `jax.value_and_grad`. Beside the loss
49 and gradients it also ouputs and updated `DynamicScale` instance with the
50 current loss scale factor. This method also returns a boolean value indicating
51 whether the gradients are finite.
52
53 Example::
54
55 from flax.training.dynamic_scale import DynamicScale
56
57 def loss_fn(p):
58 return jnp.asarray(p, jnp.float16) ** 2
59 p = jnp.array(1., jnp.float32)
60
61 dyn_scale = DynamicScale(growth_interval=10)
62 compute_grad = jax.jit(lambda ds, p: ds.value_and_grad(loss_fn)(p))
63 for _ in range(100):
64 dyn_scale, is_fin, loss, grad = compute_grad(dyn_scale, p)
65 p += jnp.where(is_fin, 0.01 * grad, 0.)
66 print(loss)
67
68 Jax currently cannot execute conditionals efficiently on GPUs therefore we
69 selectifly ignore the gradient update using `jax.numpy.where` in case of
70 non-finite gradients.
71
72 Attributes:
73 growth_factor: how much to grow the scalar after a period of finite
74 gradients (default: 2.).
75 backoff_factor: how much to shrink the scalar after a non-finite gradient
76 (default: 0.5).
77 growth_interval: after how many steps of finite gradients the scale should
78 be increased (default: 2000).
79 fin_steps: indicates how many gradient steps in a row have been finite.
80 scale: the current scale by which the loss is multiplied.
81 """
82 growth_factor: float = struct.field(pytree_node=False, default=2.0)
83 backoff_factor: float = struct.field(pytree_node=False, default=0.5)
84 growth_interval: int = struct.field(pytree_node=False, default=2000)
85 fin_steps: Array = 0
86 scale: Array = 65536.0
87
88 def value_and_grad(self, fun: Callable[..., Any],
89 argnums: Union[int, Sequence[int]] = 0,
90 has_aux: bool = False,
91 axis_name: Optional[str] = None,
92 ) -> Callable[..., DynamicScaleResult]:
93 """Wrapper around `jax.value_and_grad`.
94
95 Args:
96 fun: Function to be differentiated. Its arguments at positions specified
97 by ``argnums`` should be arrays, scalars, or standard Python containers.
98 It should return a scalar (which includes arrays with shape ``()``
99 but not arrays with shape ``(1,)`` etc.)
100 argnums: Optional, integer or sequence of integers. Specifies which
101 positional argument(s) to differentiate with respect to (default 0).
102 has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where
103 the first element is considered the output of the mathematical function
104 to be differentiated and the second element is auxiliary data.
105 Default False.
106 axis_name: If an axis is given the gradients will be averaged across
107 replicas (default: None).
108 Returns:
109 A function that takes the same arguments as `fun` and
110 returns a DynamicScaleResult
111 """
112 @functools.wraps(fun)
113 def loss_wrapper(*args):
114 aux = fun(*args)
115 if has_aux:
116 return (self.scale * aux[0], aux[1])
117 else:
118 return self.scale * aux
119
120 grad_fn = jax.value_and_grad(loss_wrapper, argnums, has_aux)
121 def grad_fn_wrapper(*args):
122 aux, grad = grad_fn(*args)
123 aux = (aux[0] / self.scale, aux[1]) if has_aux else aux / self.scale
124
125 grad = jax.tree_util.tree_map(
126 lambda g: jnp.asarray(g, jnp.float32) / self.scale, grad)
127 if axis_name is not None:
128 grad = lax.pmean(grad, axis_name)
129
130 finite = jnp.array(True)
131 for g in jax.tree_util.tree_leaves(grad):
132 finite &= jnp.all(lax.is_finite(g))
133
134 grow = self.fin_steps == self.growth_interval
135 fin_scale = jnp.where(grow & finite,
136 self.scale * self.growth_factor,
137 self.scale)
138 inf_scale = self.scale * self.backoff_factor
139 new_scale = jnp.where(finite, fin_scale, inf_scale)
140 new_fin_steps = jnp.where(grow | (~finite), 0, self.fin_steps + 1)
141
142 new_self = self.replace(fin_steps=new_fin_steps, scale=new_scale)
143 return DynamicScaleResult(new_self, finite, aux, grad)
144 return grad_fn_wrapper
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flax/training/dynamic_scale.py b/flax/training/dynamic_scale.py
--- a/flax/training/dynamic_scale.py
+++ b/flax/training/dynamic_scale.py
@@ -132,9 +132,10 @@
finite &= jnp.all(lax.is_finite(g))
grow = self.fin_steps == self.growth_interval
- fin_scale = jnp.where(grow & finite,
- self.scale * self.growth_factor,
- self.scale)
+ fin_scale = jnp.where(
+ grow & finite,
+ jnp.minimum(self.scale * self.growth_factor, jnp.finfo(jnp.float32).max),
+ self.scale)
inf_scale = self.scale * self.backoff_factor
new_scale = jnp.where(finite, fin_scale, inf_scale)
new_fin_steps = jnp.where(grow | (~finite), 0, self.fin_steps + 1)
| {"golden_diff": "diff --git a/flax/training/dynamic_scale.py b/flax/training/dynamic_scale.py\n--- a/flax/training/dynamic_scale.py\n+++ b/flax/training/dynamic_scale.py\n@@ -132,9 +132,10 @@\n finite &= jnp.all(lax.is_finite(g))\n \n grow = self.fin_steps == self.growth_interval\n- fin_scale = jnp.where(grow & finite,\n- self.scale * self.growth_factor,\n- self.scale)\n+ fin_scale = jnp.where(\n+ grow & finite,\n+ jnp.minimum(self.scale * self.growth_factor, jnp.finfo(jnp.float32).max),\n+ self.scale)\n inf_scale = self.scale * self.backoff_factor\n new_scale = jnp.where(finite, fin_scale, inf_scale)\n new_fin_steps = jnp.where(grow | (~finite), 0, self.fin_steps + 1)\n", "issue": "DynamicScale goes to infinity\n### System information\r\n- OS Platform and Distribution: Linux 5.15.74-3-MANJARO\r\n- Flax, jax, jaxlib versions: flax 0.6.0, jax 0.3.21, jaxlib 0.3.20+cuda11.cudnn82\r\n- Python version: 3.10.4\r\n- GPU/TPU model and memory: NVIDIA RTX 2060 Super 8GB\r\n- CUDA version (if applicable): cuda11, cudnn82\r\n\r\n\r\n### Problem you have encountered:\r\nI adapted the gradient scaling code of the imagenet example in order to use fp16 training for my model. During training, the `scale` parameter of `DynamicScaling` became `inf`, because it kept doubling every 2000 steps without the gradients ever becoming `nan`. Once the `scale` hit `inf` it never went down, since dividing `inf` by any number still returns `inf`.\r\n\r\n### What you expected to happen:\r\nEither have a `maximum_scale` parameter or add a check to see if `scale` is finite.\r\nI think it's a one line change in the `DynamicScale` code, so I can make a pull request if you agree.\r\n\r\n### Steps to reproduce:\r\n```py\r\nimport jax.numpy as jnp\r\nfrom flax.training import dynamic_scale\r\n\r\nds = dynamic_scale.DynamicScale(growth_factor=10.0, growth_interval=1)\r\nx = jnp.float32(1)\r\n\r\nfor _ in range(70):\r\n ds, is_fin, aux, grad = ds.value_and_grad(lambda p: p**2)(x)\r\n x = x - 0.1 * grad\r\n print(is_fin, ds.scale, x, grad)\r\n```\r\n\r\n### Code:\r\nCould check here ([dynamic scale code](https://github.com/google/flax/blob/6b80cbb239b07e370fd0af6655b39ef40de061c0/flax/training/dynamic_scale.py#L139)) if `fin_scale` is finite.\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Dynamic loss scaling for mixed precision gradients.\n\"\"\"\n\nimport functools\nfrom typing import Any, Callable, NamedTuple, Optional, Sequence, Union\n\nfrom .. import struct\n\nimport jax\nfrom jax import lax\nimport jax.numpy as jnp\n\n\n\nArray = Any\n\n\nclass DynamicScaleResult(NamedTuple):\n dynamic_scale: 'DynamicScale'\n finite: Array\n aux: Any\n grad: Any\n\n\nclass DynamicScale(struct.PyTreeNode):\n \"\"\"Dynamic loss scaling for mixed precision gradients.\n\n For many models gradient computations in float16 will result in numerical\n issues because small/large gradients being flushed to zero/infinity.\n Dynamic loss scaling is an algorithm that aims to find the largest scalar\n multiple for which the gradient does not overflow. This way the risk of\n underflow is minimized.\n\n the `value_and_grad` method mimicks `jax.value_and_grad`. Beside the loss\n and gradients it also ouputs and updated `DynamicScale` instance with the\n current loss scale factor. This method also returns a boolean value indicating\n whether the gradients are finite.\n\n Example::\n\n from flax.training.dynamic_scale import DynamicScale\n\n def loss_fn(p):\n return jnp.asarray(p, jnp.float16) ** 2\n p = jnp.array(1., jnp.float32)\n\n dyn_scale = DynamicScale(growth_interval=10)\n compute_grad = jax.jit(lambda ds, p: ds.value_and_grad(loss_fn)(p))\n for _ in range(100):\n dyn_scale, is_fin, loss, grad = compute_grad(dyn_scale, p)\n p += jnp.where(is_fin, 0.01 * grad, 0.)\n print(loss)\n\n Jax currently cannot execute conditionals efficiently on GPUs therefore we\n selectifly ignore the gradient update using `jax.numpy.where` in case of\n non-finite gradients.\n\n Attributes:\n growth_factor: how much to grow the scalar after a period of finite\n gradients (default: 2.).\n backoff_factor: how much to shrink the scalar after a non-finite gradient\n (default: 0.5).\n growth_interval: after how many steps of finite gradients the scale should\n be increased (default: 2000).\n fin_steps: indicates how many gradient steps in a row have been finite.\n scale: the current scale by which the loss is multiplied.\n \"\"\"\n growth_factor: float = struct.field(pytree_node=False, default=2.0)\n backoff_factor: float = struct.field(pytree_node=False, default=0.5)\n growth_interval: int = struct.field(pytree_node=False, default=2000)\n fin_steps: Array = 0\n scale: Array = 65536.0\n\n def value_and_grad(self, fun: Callable[..., Any],\n argnums: Union[int, Sequence[int]] = 0,\n has_aux: bool = False,\n axis_name: Optional[str] = None,\n ) -> Callable[..., DynamicScaleResult]:\n \"\"\"Wrapper around `jax.value_and_grad`.\n\n Args:\n fun: Function to be differentiated. Its arguments at positions specified\n by ``argnums`` should be arrays, scalars, or standard Python containers.\n It should return a scalar (which includes arrays with shape ``()``\n but not arrays with shape ``(1,)`` etc.)\n argnums: Optional, integer or sequence of integers. Specifies which\n positional argument(s) to differentiate with respect to (default 0).\n has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where\n the first element is considered the output of the mathematical function\n to be differentiated and the second element is auxiliary data.\n Default False.\n axis_name: If an axis is given the gradients will be averaged across\n replicas (default: None).\n Returns:\n A function that takes the same arguments as `fun` and\n returns a DynamicScaleResult\n \"\"\"\n @functools.wraps(fun)\n def loss_wrapper(*args):\n aux = fun(*args)\n if has_aux:\n return (self.scale * aux[0], aux[1])\n else:\n return self.scale * aux\n\n grad_fn = jax.value_and_grad(loss_wrapper, argnums, has_aux)\n def grad_fn_wrapper(*args):\n aux, grad = grad_fn(*args)\n aux = (aux[0] / self.scale, aux[1]) if has_aux else aux / self.scale\n\n grad = jax.tree_util.tree_map(\n lambda g: jnp.asarray(g, jnp.float32) / self.scale, grad)\n if axis_name is not None:\n grad = lax.pmean(grad, axis_name)\n\n finite = jnp.array(True)\n for g in jax.tree_util.tree_leaves(grad):\n finite &= jnp.all(lax.is_finite(g))\n\n grow = self.fin_steps == self.growth_interval\n fin_scale = jnp.where(grow & finite,\n self.scale * self.growth_factor,\n self.scale)\n inf_scale = self.scale * self.backoff_factor\n new_scale = jnp.where(finite, fin_scale, inf_scale)\n new_fin_steps = jnp.where(grow | (~finite), 0, self.fin_steps + 1)\n\n new_self = self.replace(fin_steps=new_fin_steps, scale=new_scale)\n return DynamicScaleResult(new_self, finite, aux, grad)\n return grad_fn_wrapper\n", "path": "flax/training/dynamic_scale.py"}], "after_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Dynamic loss scaling for mixed precision gradients.\n\"\"\"\n\nimport functools\nfrom typing import Any, Callable, NamedTuple, Optional, Sequence, Union\n\nfrom .. import struct\n\nimport jax\nfrom jax import lax\nimport jax.numpy as jnp\n\n\n\nArray = Any\n\n\nclass DynamicScaleResult(NamedTuple):\n dynamic_scale: 'DynamicScale'\n finite: Array\n aux: Any\n grad: Any\n\n\nclass DynamicScale(struct.PyTreeNode):\n \"\"\"Dynamic loss scaling for mixed precision gradients.\n\n For many models gradient computations in float16 will result in numerical\n issues because small/large gradients being flushed to zero/infinity.\n Dynamic loss scaling is an algorithm that aims to find the largest scalar\n multiple for which the gradient does not overflow. This way the risk of\n underflow is minimized.\n\n the `value_and_grad` method mimicks `jax.value_and_grad`. Beside the loss\n and gradients it also ouputs and updated `DynamicScale` instance with the\n current loss scale factor. This method also returns a boolean value indicating\n whether the gradients are finite.\n\n Example::\n\n from flax.training.dynamic_scale import DynamicScale\n\n def loss_fn(p):\n return jnp.asarray(p, jnp.float16) ** 2\n p = jnp.array(1., jnp.float32)\n\n dyn_scale = DynamicScale(growth_interval=10)\n compute_grad = jax.jit(lambda ds, p: ds.value_and_grad(loss_fn)(p))\n for _ in range(100):\n dyn_scale, is_fin, loss, grad = compute_grad(dyn_scale, p)\n p += jnp.where(is_fin, 0.01 * grad, 0.)\n print(loss)\n\n Jax currently cannot execute conditionals efficiently on GPUs therefore we\n selectifly ignore the gradient update using `jax.numpy.where` in case of\n non-finite gradients.\n\n Attributes:\n growth_factor: how much to grow the scalar after a period of finite\n gradients (default: 2.).\n backoff_factor: how much to shrink the scalar after a non-finite gradient\n (default: 0.5).\n growth_interval: after how many steps of finite gradients the scale should\n be increased (default: 2000).\n fin_steps: indicates how many gradient steps in a row have been finite.\n scale: the current scale by which the loss is multiplied.\n \"\"\"\n growth_factor: float = struct.field(pytree_node=False, default=2.0)\n backoff_factor: float = struct.field(pytree_node=False, default=0.5)\n growth_interval: int = struct.field(pytree_node=False, default=2000)\n fin_steps: Array = 0\n scale: Array = 65536.0\n\n def value_and_grad(self, fun: Callable[..., Any],\n argnums: Union[int, Sequence[int]] = 0,\n has_aux: bool = False,\n axis_name: Optional[str] = None,\n ) -> Callable[..., DynamicScaleResult]:\n \"\"\"Wrapper around `jax.value_and_grad`.\n\n Args:\n fun: Function to be differentiated. Its arguments at positions specified\n by ``argnums`` should be arrays, scalars, or standard Python containers.\n It should return a scalar (which includes arrays with shape ``()``\n but not arrays with shape ``(1,)`` etc.)\n argnums: Optional, integer or sequence of integers. Specifies which\n positional argument(s) to differentiate with respect to (default 0).\n has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where\n the first element is considered the output of the mathematical function\n to be differentiated and the second element is auxiliary data.\n Default False.\n axis_name: If an axis is given the gradients will be averaged across\n replicas (default: None).\n Returns:\n A function that takes the same arguments as `fun` and\n returns a DynamicScaleResult\n \"\"\"\n @functools.wraps(fun)\n def loss_wrapper(*args):\n aux = fun(*args)\n if has_aux:\n return (self.scale * aux[0], aux[1])\n else:\n return self.scale * aux\n\n grad_fn = jax.value_and_grad(loss_wrapper, argnums, has_aux)\n def grad_fn_wrapper(*args):\n aux, grad = grad_fn(*args)\n aux = (aux[0] / self.scale, aux[1]) if has_aux else aux / self.scale\n\n grad = jax.tree_util.tree_map(\n lambda g: jnp.asarray(g, jnp.float32) / self.scale, grad)\n if axis_name is not None:\n grad = lax.pmean(grad, axis_name)\n\n finite = jnp.array(True)\n for g in jax.tree_util.tree_leaves(grad):\n finite &= jnp.all(lax.is_finite(g))\n\n grow = self.fin_steps == self.growth_interval\n fin_scale = jnp.where(\n grow & finite,\n jnp.minimum(self.scale * self.growth_factor, jnp.finfo(jnp.float32).max),\n self.scale)\n inf_scale = self.scale * self.backoff_factor\n new_scale = jnp.where(finite, fin_scale, inf_scale)\n new_fin_steps = jnp.where(grow | (~finite), 0, self.fin_steps + 1)\n\n new_self = self.replace(fin_steps=new_fin_steps, scale=new_scale)\n return DynamicScaleResult(new_self, finite, aux, grad)\n return grad_fn_wrapper\n", "path": "flax/training/dynamic_scale.py"}]} | 2,395 | 210 |
gh_patches_debug_9395 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-741 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix versioning on dependencies
Fix dependency package versions to be consistent with the rest of the libraries
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 REQUIRES = [
8 "applicationinsights>=0.11.9",
9 "botbuilder-schema>=4.4.0b1",
10 "botframework-connector>=4.4.0b1",
11 "botbuilder-core>=4.4.0b1",
12 "botbuilder-applicationinsights>=4.4.0b1",
13 ]
14 TESTS_REQUIRES = [
15 "aiounittest==1.3.0",
16 "aiohttp==3.5.4",
17 ]
18
19 root = os.path.abspath(os.path.dirname(__file__))
20
21 with open(
22 os.path.join(
23 root, "botbuilder", "integration", "applicationinsights", "aiohttp", "about.py"
24 )
25 ) as f:
26 package_info = {}
27 info = f.read()
28 exec(info, package_info)
29
30 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
31 long_description = f.read()
32
33 setup(
34 name=package_info["__title__"],
35 version=package_info["__version__"],
36 url=package_info["__uri__"],
37 author=package_info["__author__"],
38 description=package_info["__description__"],
39 keywords=[
40 "BotBuilderApplicationInsights",
41 "bots",
42 "ai",
43 "botframework",
44 "botbuilder",
45 "aiohttp",
46 ],
47 long_description=long_description,
48 long_description_content_type="text/x-rst",
49 license=package_info["__license__"],
50 packages=["botbuilder.integration.applicationinsights.aiohttp"],
51 install_requires=REQUIRES + TESTS_REQUIRES,
52 tests_require=TESTS_REQUIRES,
53 include_package_data=True,
54 classifiers=[
55 "Programming Language :: Python :: 3.7",
56 "Intended Audience :: Developers",
57 "License :: OSI Approved :: MIT License",
58 "Operating System :: OS Independent",
59 "Development Status :: 5 - Production/Stable",
60 "Topic :: Scientific/Engineering :: Artificial Intelligence",
61 ],
62 )
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py
--- a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py
+++ b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py
@@ -6,14 +6,14 @@
REQUIRES = [
"applicationinsights>=0.11.9",
- "botbuilder-schema>=4.4.0b1",
- "botframework-connector>=4.4.0b1",
- "botbuilder-core>=4.4.0b1",
- "botbuilder-applicationinsights>=4.4.0b1",
+ "aiohttp==3.6.2",
+ "botbuilder-schema>=4.7.1",
+ "botframework-connector>=4.7.1",
+ "botbuilder-core>=4.7.1",
+ "botbuilder-applicationinsights>=4.7.1",
]
TESTS_REQUIRES = [
"aiounittest==1.3.0",
- "aiohttp==3.5.4",
]
root = os.path.abspath(os.path.dirname(__file__))
| {"golden_diff": "diff --git a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n--- a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n+++ b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n@@ -6,14 +6,14 @@\n \n REQUIRES = [\n \"applicationinsights>=0.11.9\",\n- \"botbuilder-schema>=4.4.0b1\",\n- \"botframework-connector>=4.4.0b1\",\n- \"botbuilder-core>=4.4.0b1\",\n- \"botbuilder-applicationinsights>=4.4.0b1\",\n+ \"aiohttp==3.6.2\",\n+ \"botbuilder-schema>=4.7.1\",\n+ \"botframework-connector>=4.7.1\",\n+ \"botbuilder-core>=4.7.1\",\n+ \"botbuilder-applicationinsights>=4.7.1\",\n ]\n TESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n- \"aiohttp==3.5.4\",\n ]\n \n root = os.path.abspath(os.path.dirname(__file__))\n", "issue": "Fix versioning on dependencies\nFix dependency package versions to be consistent with the rest of the libraries\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights>=0.11.9\",\n \"botbuilder-schema>=4.4.0b1\",\n \"botframework-connector>=4.4.0b1\",\n \"botbuilder-core>=4.4.0b1\",\n \"botbuilder-applicationinsights>=4.4.0b1\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n \"aiohttp==3.5.4\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(\n os.path.join(\n root, \"botbuilder\", \"integration\", \"applicationinsights\", \"aiohttp\", \"about.py\"\n )\n) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n \"aiohttp\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.integration.applicationinsights.aiohttp\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights>=0.11.9\",\n \"aiohttp==3.6.2\",\n \"botbuilder-schema>=4.7.1\",\n \"botframework-connector>=4.7.1\",\n \"botbuilder-core>=4.7.1\",\n \"botbuilder-applicationinsights>=4.7.1\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(\n os.path.join(\n root, \"botbuilder\", \"integration\", \"applicationinsights\", \"aiohttp\", \"about.py\"\n )\n) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n \"aiohttp\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.integration.applicationinsights.aiohttp\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py"}]} | 865 | 278 |
gh_patches_debug_726 | rasdani/github-patches | git_diff | dotkom__onlineweb4-425 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Startet studie" in Profile -> Medlemskap requires defined format without specifying it
"Started studie" is a datefield. The problem is that most browsers (like FF, Chrome) don't render these fields with any additional tools which makes filling them out a pain in the ass (Safari@iOS has that fancy datepicker-shit).
The field requires the format 'yyyy-mm-dd', but does not specify this anywhere. This should be fixed somehow.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/profiles/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from django import forms
4 from django.utils.translation import ugettext as _
5
6 from apps.profiles.models import Privacy
7 from apps.authentication.models import OnlineUser, FIELD_OF_STUDY_CHOICES
8
9 class ProfileForm(forms.ModelForm):
10
11 class Meta:
12 model = OnlineUser
13
14 fields = ['nickname', 'website', 'phone_number', 'address', 'zip_code', 'allergies', 'mark_rules', ]
15 widgets = {
16 'allergies' : forms.Textarea(attrs={'id' : 'allergies'}),
17 }
18
19 def clean(self):
20 super(ProfileForm, self).clean()
21
22 cleaned_data = self.cleaned_data
23
24 # ZIP code digits only
25 zip_code = cleaned_data['zip_code']
26 if len(zip_code) != 0 and (len(zip_code) != 4 or not zip_code.isdigit()):
27 self._errors['zip_code'] = self.error_class([_(u"Postnummer må bestå av fire siffer.")])
28
29 return cleaned_data
30
31 class ImageForm(forms.ModelForm):
32
33 class Meta:
34 model = OnlineUser
35
36 fields = ['image']
37 widgets = {
38 'image': forms.FileInput(attrs={'class' : 'hidden-input', 'id' : 'image'}),
39 }
40
41 class PrivacyForm(forms.ModelForm):
42
43 class Meta:
44 model = Privacy
45 exclude = ['user']
46
47
48 class MailSettingsForm(forms.ModelForm):
49
50 class Meta:
51 model = OnlineUser
52 fields = ['infomail', ]
53
54
55 class MembershipSettingsForm(forms.ModelForm):
56
57 def __init__(self, *args, **kwargs):
58 super(MembershipSettingsForm, self).__init__(*args, **kwargs)
59 self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'
60
61 class Meta:
62 model = OnlineUser
63 fields = ['field_of_study', 'started_date', ]
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py
--- a/apps/profiles/forms.py
+++ b/apps/profiles/forms.py
@@ -61,3 +61,7 @@
class Meta:
model = OnlineUser
fields = ['field_of_study', 'started_date', ]
+
+ widgets = {
+ 'started_date' : forms.TextInput(attrs={'placeholder' : 'YYYY-MM-DD'}),
+ }
| {"golden_diff": "diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py\n--- a/apps/profiles/forms.py\n+++ b/apps/profiles/forms.py\n@@ -61,3 +61,7 @@\n class Meta:\n model = OnlineUser\n fields = ['field_of_study', 'started_date', ]\n+\n+ widgets = {\n+ 'started_date' : forms.TextInput(attrs={'placeholder' : 'YYYY-MM-DD'}),\n+ }\n", "issue": "\"Startet studie\" in Profile -> Medlemskap requires defined format without specifying it\n\"Started studie\" is a datefield. The problem is that most browsers (like FF, Chrome) don't render these fields with any additional tools which makes filling them out a pain in the ass (Safari@iOS has that fancy datepicker-shit).\n\nThe field requires the format 'yyyy-mm-dd', but does not specify this anywhere. This should be fixed somehow.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\n\nfrom apps.profiles.models import Privacy\nfrom apps.authentication.models import OnlineUser, FIELD_OF_STUDY_CHOICES\n\nclass ProfileForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n\n fields = ['nickname', 'website', 'phone_number', 'address', 'zip_code', 'allergies', 'mark_rules', ]\n widgets = {\n 'allergies' : forms.Textarea(attrs={'id' : 'allergies'}),\n }\n\n def clean(self):\n super(ProfileForm, self).clean()\n\n cleaned_data = self.cleaned_data\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 0 and (len(zip_code) != 4 or not zip_code.isdigit()):\n self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data\n\nclass ImageForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n\n fields = ['image']\n widgets = {\n 'image': forms.FileInput(attrs={'class' : 'hidden-input', 'id' : 'image'}),\n }\n\nclass PrivacyForm(forms.ModelForm):\n\n class Meta:\n model = Privacy\n exclude = ['user']\n\n\nclass MailSettingsForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta:\n model = OnlineUser\n fields = ['field_of_study', 'started_date', ]\n", "path": "apps/profiles/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\n\nfrom apps.profiles.models import Privacy\nfrom apps.authentication.models import OnlineUser, FIELD_OF_STUDY_CHOICES\n\nclass ProfileForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n\n fields = ['nickname', 'website', 'phone_number', 'address', 'zip_code', 'allergies', 'mark_rules', ]\n widgets = {\n 'allergies' : forms.Textarea(attrs={'id' : 'allergies'}),\n }\n\n def clean(self):\n super(ProfileForm, self).clean()\n\n cleaned_data = self.cleaned_data\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 0 and (len(zip_code) != 4 or not zip_code.isdigit()):\n self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data\n\nclass ImageForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n\n fields = ['image']\n widgets = {\n 'image': forms.FileInput(attrs={'class' : 'hidden-input', 'id' : 'image'}),\n }\n\nclass PrivacyForm(forms.ModelForm):\n\n class Meta:\n model = Privacy\n exclude = ['user']\n\n\nclass MailSettingsForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta:\n model = OnlineUser\n fields = ['field_of_study', 'started_date', ]\n\n widgets = {\n 'started_date' : forms.TextInput(attrs={'placeholder' : 'YYYY-MM-DD'}),\n }\n", "path": "apps/profiles/forms.py"}]} | 882 | 95 |
gh_patches_debug_19174 | rasdani/github-patches | git_diff | translate__pootle-4496 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
calculate_checks requires running refresh_stats afterwards
If you run `calculate_checks` you will have to also run `refresh_stats` afterwards in order for the detected critical errors to be shown on the stats. This is necessary since if no errors are detected in the stats no links are properly rendered to directly see the errors in the editor.
Documentation says that `calculate_checks` **will flush existing caches and update the quality checks cache**, so probably the right fix is to align `calculate_checks` behavior with that statement.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/core/checks/checker.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import logging
10 import time
11
12 from django.conf import settings
13 from django.utils import timezone
14 from django.utils.functional import cached_property
15 from django.utils.lru_cache import lru_cache
16
17 from pootle_misc.checks import run_given_filters
18 from pootle_misc.util import import_func
19 from pootle_store.models import QualityCheck, Store, Unit
20 from pootle_store.unit import UnitProxy
21 from pootle_store.util import OBSOLETE
22 from pootle_translationproject.models import TranslationProject
23
24
25 logger = logging.getLogger(__name__)
26
27
28 class CheckableUnit(UnitProxy):
29 """CheckableUnit wraps a `Unit` values dictionary to provide a `Unit` like
30 instance that can be used by UnitQualityCheck
31
32 At a minimum the dict should contain source_f, target_f, store__id, and
33 store__translation_project__id
34 """
35
36 @property
37 def store(self):
38 return self.store__id
39
40 @property
41 def tp(self):
42 return self.store__translation_project__id
43
44
45 class UnitQualityCheck(object):
46
47 def __init__(self, unit, checker, original_checks,
48 check_names, keep_false_positives=True):
49 """Refreshes QualityChecks for a Unit
50
51 As this class can work with either `Unit` or `CheckableUnit` it only
52 uses a minimum of `Unit` attributes from `self.unit`.
53
54 :param unit: an instance of Unit or CheckableUnit
55 :param checker: a Checker for this Unit.
56 :param original_checks: current QualityChecks for this Unit
57 :param check_names: limit checks to given list of quality check names.
58 :param keep_false_positives: when set to `False`, it will unmute any
59 existing false positive checks.
60 """
61 self.checker = checker
62 self.unit = unit
63 self.original_checks = original_checks
64 self.check_names = check_names
65 self.keep_false_positives = keep_false_positives
66 self.unmute_list = []
67
68 @cached_property
69 def check_failures(self):
70 """Current QualityCheck failure for the Unit
71 """
72 if self.check_names is None:
73 return self.checker.run_filters(
74 self.unit, categorised=True)
75 return run_given_filters(
76 self.checker, self.unit, self.check_names)
77
78 @cached_property
79 def checks_qs(self):
80 """QualityCheck queryset for the Unit
81 """
82 return QualityCheck.objects.filter(unit=self.unit.id)
83
84 def delete_checks(self, checks):
85 """Delete checks that are no longer used.
86 """
87 to_delete = self.checks_qs.filter(name__in=checks)
88 if to_delete.exists():
89 to_delete.delete()
90 return True
91 return False
92
93 def unmute_checks(self, checks):
94 """Unmute checks that should no longer be muted
95 """
96 to_unmute = self.checks_qs.filter(
97 name__in=checks, false_positive=True)
98 if to_unmute.exists():
99 to_unmute.update(false_positive=False)
100 return True
101 return False
102
103 def update(self):
104 """Update QualityChecks for a Unit, deleting and unmuting as appropriate.
105 """
106 # update the checks for this unit
107 updated = self.update_checks()
108
109 # delete any remaining checks that were only in the original list
110 deleted = (
111 self.original_checks and self.delete_checks(self.original_checks))
112
113 # unmute any checks that have been marked for unmuting
114 unmuted = (
115 self.unmute_list and self.unmute_checks(self.unmute_list))
116
117 return (updated or deleted or unmuted)
118
119 def update_checks(self):
120 """Compare self.original_checks to the Units calculated QualityCheck failures.
121
122 Removes members of self.original_checks as they have been compared.
123 """
124 updated = False
125 for name in self.check_failures.iterkeys():
126 if name in self.original_checks:
127 # keep false-positive checks if check is active
128 unmute = (
129 self.original_checks[name]['false_positive']
130 and not self.keep_false_positives)
131 if unmute:
132 self.unmute_list.append(name)
133 # if the check is valid remove from the list and continue
134 del self.original_checks[name]
135 continue
136
137 # the check didnt exist previously - so create it
138 self.checks_qs.create(
139 unit_id=self.unit.id,
140 name=name,
141 message=self.check_failures[name]['message'],
142 category=self.check_failures[name]['category'])
143 updated = True
144
145 return updated
146
147
148 class QualityCheckUpdater(object):
149
150 def __init__(self, check_names=None, translation_project=None,
151 keep_false_positives=True):
152 """Refreshes QualityChecks for Units
153
154 :param check_names: limit checks to given list of quality check names.
155 :param translation_project: an instance of `TranslationProject` to
156 restrict the update to.
157 :param keep_false_positives: when set to `False`, it will unmute any
158 existing false positive checks.
159 """
160
161 self.check_names = check_names
162 self.translation_project = translation_project
163 self.keep_false_positives = keep_false_positives
164 self.stores = set()
165 self._store_to_expire = None
166
167 @cached_property
168 def checks(self):
169 """Existing checks in the database for all units
170 """
171 checks = self.checks_qs
172 check_keys = (
173 'id', 'name', 'unit_id',
174 'category', 'false_positive')
175
176 if self.check_names is not None:
177 checks = checks.filter(name__in=self.check_names)
178
179 all_units_checks = {}
180 for check in checks.values(*check_keys):
181 all_units_checks.setdefault(
182 check['unit_id'], {})[check['name']] = check
183 return all_units_checks
184
185 @cached_property
186 def checks_qs(self):
187 """QualityCheck queryset for all units, restricted to TP if set
188 """
189 checks_qs = QualityCheck.objects.all()
190
191 if self.translation_project is not None:
192 tp_pk = self.translation_project.pk
193 checks_qs = checks_qs.filter(
194 unit__store__translation_project__pk=tp_pk)
195 return checks_qs
196
197 @cached_property
198 def units(self):
199 """Result set of Units, restricted to TP if set
200 """
201 units = Unit.simple_objects.all()
202 if self.translation_project is not None:
203 units = units.filter(
204 store__translation_project=self.translation_project)
205 return units
206
207 def clear_checks(self):
208 QualityCheck.delete_unknown_checks()
209
210 @lru_cache(maxsize=None)
211 def get_checker(self, tp_pk):
212 """Return the site QualityChecker or the QualityCheck associated with
213 the a Unit's TP otherwise.
214 """
215 if settings.POOTLE_QUALITY_CHECKER:
216 return import_func(settings.POOTLE_QUALITY_CHECKER)()
217 try:
218 return TranslationProject.objects.get(id=tp_pk).checker
219 except TranslationProject.DoesNotExist:
220 # There seems to be a risk of dangling Stores with no TP
221 return None
222
223 def expire_store_cache(self, store_pk=None):
224 """Whenever a store_pk is found it is queued for cache expiry
225
226 if a new store_pk is called the old one has its cache expired,
227 and the new store_pk is saved
228
229 call with None to expire the current Store's cache
230 """
231 if self._store_to_expire is None:
232 # there is no Store set - queue it for expiry
233 self._store_to_expire = store_pk
234 return
235 if store_pk == self._store_to_expire:
236 # its the same Store that we saw last time
237 return
238 # there was a _store_to_expire set and its changed - expire the cache
239 self.update_store_caches([self._store_to_expire])
240
241 # remember the new store_pk
242 self._store_to_expire = store_pk
243
244 def update(self):
245 """Update/purge all QualityChecks for Units, and expire Store caches.
246 """
247 start = time.time()
248 logger.debug("Clearing unknown checks...")
249 self.clear_checks()
250 logger.debug(
251 "Cleared unknown checks in %s seconds"
252 % (time.time() - start))
253
254 start = time.time()
255 logger.debug("Deleting checks for untranslated units...")
256 untrans = self.update_untranslated()
257 logger.debug(
258 "Deleted %s checks for untranslated units in %s seconds"
259 % (untrans, (time.time() - start)))
260
261 start = time.time()
262 logger.debug("Updating checks - this may take some time...")
263 trans = self.update_translated()
264 logger.debug(
265 "Updated checks for %s units in %s seconds"
266 % (trans, (time.time() - start)))
267
268 def update_store_caches(self, stores):
269 """After completing QualityCheck updates expire caches for affected Stores.
270 """
271 for store in Store.objects.filter(pk__in=stores):
272 store.update_dirty_cache()
273
274 def update_translated_unit(self, unit, checker=None):
275 """Update checks for a translated Unit
276 """
277 unit = CheckableUnit(unit)
278 checker = UnitQualityCheck(
279 unit,
280 checker,
281 self.checks.get(unit.id, {}),
282 self.check_names,
283 self.keep_false_positives)
284 if checker.update():
285 self.expire_store_cache(unit.store)
286 self.units.filter(id=unit.id).update(mtime=timezone.now())
287 return True
288 return False
289
290 def update_translated(self):
291 """Update checks for translated Units
292 """
293 unit_fields = [
294 "id", "source_f", "target_f", "locations", "store__id"]
295
296 tp_key = "store__translation_project__id"
297 if self.translation_project is None:
298 unit_fields.append(tp_key)
299
300 checker = None
301 if self.translation_project is not None:
302 # we only need to get the checker once if TP is set
303 checker = self.get_checker(self.translation_project.id)
304
305 translated = (
306 self.units.filter(state__gte=OBSOLETE)
307 .order_by("store", "index"))
308 updated_count = 0
309 for unit in translated.values(*unit_fields).iterator():
310 if self.translation_project is not None:
311 # if TP is set then manually add TP.id to the Unit value dict
312 unit[tp_key] = self.translation_project.id
313 if checker is None:
314 checker = self.get_checker(unit[tp_key])
315 if checker and self.update_translated_unit(unit, checker=checker):
316 updated_count += 1
317 # clear the cache of the remaining Store
318 self.expire_store_cache()
319 return updated_count
320
321 def update_untranslated(self):
322 """Delete QualityChecks for untranslated Units
323 """
324 checks_qs = self.checks_qs.exclude(unit__state__gte=OBSOLETE)
325 self.update_store_caches(
326 set(checks_qs.values_list("unit__store__pk", flat=True).distinct()))
327 deleted = checks_qs.count()
328 checks_qs.delete()
329 return deleted
330
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/core/checks/checker.py b/pootle/core/checks/checker.py
--- a/pootle/core/checks/checker.py
+++ b/pootle/core/checks/checker.py
@@ -14,6 +14,7 @@
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
+from pootle.core.mixins.treeitem import CachedMethods
from pootle_misc.checks import run_given_filters
from pootle_misc.util import import_func
from pootle_store.models import QualityCheck, Store, Unit
@@ -269,6 +270,7 @@
"""After completing QualityCheck updates expire caches for affected Stores.
"""
for store in Store.objects.filter(pk__in=stores):
+ store.mark_dirty(CachedMethods.CHECKS, CachedMethods.MTIME)
store.update_dirty_cache()
def update_translated_unit(self, unit, checker=None):
| {"golden_diff": "diff --git a/pootle/core/checks/checker.py b/pootle/core/checks/checker.py\n--- a/pootle/core/checks/checker.py\n+++ b/pootle/core/checks/checker.py\n@@ -14,6 +14,7 @@\n from django.utils.functional import cached_property\n from django.utils.lru_cache import lru_cache\n \n+from pootle.core.mixins.treeitem import CachedMethods\n from pootle_misc.checks import run_given_filters\n from pootle_misc.util import import_func\n from pootle_store.models import QualityCheck, Store, Unit\n@@ -269,6 +270,7 @@\n \"\"\"After completing QualityCheck updates expire caches for affected Stores.\n \"\"\"\n for store in Store.objects.filter(pk__in=stores):\n+ store.mark_dirty(CachedMethods.CHECKS, CachedMethods.MTIME)\n store.update_dirty_cache()\n \n def update_translated_unit(self, unit, checker=None):\n", "issue": "calculate_checks requires running refresh_stats afterwards\nIf you run `calculate_checks` you will have to also run `refresh_stats` afterwards in order for the detected critical errors to be shown on the stats. This is necessary since if no errors are detected in the stats no links are properly rendered to directly see the errors in the editor.\n\nDocumentation says that `calculate_checks` **will flush existing caches and update the quality checks cache**, so probably the right fix is to align `calculate_checks` behavior with that statement.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\nimport time\n\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.lru_cache import lru_cache\n\nfrom pootle_misc.checks import run_given_filters\nfrom pootle_misc.util import import_func\nfrom pootle_store.models import QualityCheck, Store, Unit\nfrom pootle_store.unit import UnitProxy\nfrom pootle_store.util import OBSOLETE\nfrom pootle_translationproject.models import TranslationProject\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CheckableUnit(UnitProxy):\n \"\"\"CheckableUnit wraps a `Unit` values dictionary to provide a `Unit` like\n instance that can be used by UnitQualityCheck\n\n At a minimum the dict should contain source_f, target_f, store__id, and\n store__translation_project__id\n \"\"\"\n\n @property\n def store(self):\n return self.store__id\n\n @property\n def tp(self):\n return self.store__translation_project__id\n\n\nclass UnitQualityCheck(object):\n\n def __init__(self, unit, checker, original_checks,\n check_names, keep_false_positives=True):\n \"\"\"Refreshes QualityChecks for a Unit\n\n As this class can work with either `Unit` or `CheckableUnit` it only\n uses a minimum of `Unit` attributes from `self.unit`.\n\n :param unit: an instance of Unit or CheckableUnit\n :param checker: a Checker for this Unit.\n :param original_checks: current QualityChecks for this Unit\n :param check_names: limit checks to given list of quality check names.\n :param keep_false_positives: when set to `False`, it will unmute any\n existing false positive checks.\n \"\"\"\n self.checker = checker\n self.unit = unit\n self.original_checks = original_checks\n self.check_names = check_names\n self.keep_false_positives = keep_false_positives\n self.unmute_list = []\n\n @cached_property\n def check_failures(self):\n \"\"\"Current QualityCheck failure for the Unit\n \"\"\"\n if self.check_names is None:\n return self.checker.run_filters(\n self.unit, categorised=True)\n return run_given_filters(\n self.checker, self.unit, self.check_names)\n\n @cached_property\n def checks_qs(self):\n \"\"\"QualityCheck queryset for the Unit\n \"\"\"\n return QualityCheck.objects.filter(unit=self.unit.id)\n\n def delete_checks(self, checks):\n \"\"\"Delete checks that are no longer used.\n \"\"\"\n to_delete = self.checks_qs.filter(name__in=checks)\n if to_delete.exists():\n to_delete.delete()\n return True\n return False\n\n def unmute_checks(self, checks):\n \"\"\"Unmute checks that should no longer be muted\n \"\"\"\n to_unmute = self.checks_qs.filter(\n name__in=checks, false_positive=True)\n if to_unmute.exists():\n to_unmute.update(false_positive=False)\n return True\n return False\n\n def update(self):\n \"\"\"Update QualityChecks for a Unit, deleting and unmuting as appropriate.\n \"\"\"\n # update the checks for this unit\n updated = self.update_checks()\n\n # delete any remaining checks that were only in the original list\n deleted = (\n self.original_checks and self.delete_checks(self.original_checks))\n\n # unmute any checks that have been marked for unmuting\n unmuted = (\n self.unmute_list and self.unmute_checks(self.unmute_list))\n\n return (updated or deleted or unmuted)\n\n def update_checks(self):\n \"\"\"Compare self.original_checks to the Units calculated QualityCheck failures.\n\n Removes members of self.original_checks as they have been compared.\n \"\"\"\n updated = False\n for name in self.check_failures.iterkeys():\n if name in self.original_checks:\n # keep false-positive checks if check is active\n unmute = (\n self.original_checks[name]['false_positive']\n and not self.keep_false_positives)\n if unmute:\n self.unmute_list.append(name)\n # if the check is valid remove from the list and continue\n del self.original_checks[name]\n continue\n\n # the check didnt exist previously - so create it\n self.checks_qs.create(\n unit_id=self.unit.id,\n name=name,\n message=self.check_failures[name]['message'],\n category=self.check_failures[name]['category'])\n updated = True\n\n return updated\n\n\nclass QualityCheckUpdater(object):\n\n def __init__(self, check_names=None, translation_project=None,\n keep_false_positives=True):\n \"\"\"Refreshes QualityChecks for Units\n\n :param check_names: limit checks to given list of quality check names.\n :param translation_project: an instance of `TranslationProject` to\n restrict the update to.\n :param keep_false_positives: when set to `False`, it will unmute any\n existing false positive checks.\n \"\"\"\n\n self.check_names = check_names\n self.translation_project = translation_project\n self.keep_false_positives = keep_false_positives\n self.stores = set()\n self._store_to_expire = None\n\n @cached_property\n def checks(self):\n \"\"\"Existing checks in the database for all units\n \"\"\"\n checks = self.checks_qs\n check_keys = (\n 'id', 'name', 'unit_id',\n 'category', 'false_positive')\n\n if self.check_names is not None:\n checks = checks.filter(name__in=self.check_names)\n\n all_units_checks = {}\n for check in checks.values(*check_keys):\n all_units_checks.setdefault(\n check['unit_id'], {})[check['name']] = check\n return all_units_checks\n\n @cached_property\n def checks_qs(self):\n \"\"\"QualityCheck queryset for all units, restricted to TP if set\n \"\"\"\n checks_qs = QualityCheck.objects.all()\n\n if self.translation_project is not None:\n tp_pk = self.translation_project.pk\n checks_qs = checks_qs.filter(\n unit__store__translation_project__pk=tp_pk)\n return checks_qs\n\n @cached_property\n def units(self):\n \"\"\"Result set of Units, restricted to TP if set\n \"\"\"\n units = Unit.simple_objects.all()\n if self.translation_project is not None:\n units = units.filter(\n store__translation_project=self.translation_project)\n return units\n\n def clear_checks(self):\n QualityCheck.delete_unknown_checks()\n\n @lru_cache(maxsize=None)\n def get_checker(self, tp_pk):\n \"\"\"Return the site QualityChecker or the QualityCheck associated with\n the a Unit's TP otherwise.\n \"\"\"\n if settings.POOTLE_QUALITY_CHECKER:\n return import_func(settings.POOTLE_QUALITY_CHECKER)()\n try:\n return TranslationProject.objects.get(id=tp_pk).checker\n except TranslationProject.DoesNotExist:\n # There seems to be a risk of dangling Stores with no TP\n return None\n\n def expire_store_cache(self, store_pk=None):\n \"\"\"Whenever a store_pk is found it is queued for cache expiry\n\n if a new store_pk is called the old one has its cache expired,\n and the new store_pk is saved\n\n call with None to expire the current Store's cache\n \"\"\"\n if self._store_to_expire is None:\n # there is no Store set - queue it for expiry\n self._store_to_expire = store_pk\n return\n if store_pk == self._store_to_expire:\n # its the same Store that we saw last time\n return\n # there was a _store_to_expire set and its changed - expire the cache\n self.update_store_caches([self._store_to_expire])\n\n # remember the new store_pk\n self._store_to_expire = store_pk\n\n def update(self):\n \"\"\"Update/purge all QualityChecks for Units, and expire Store caches.\n \"\"\"\n start = time.time()\n logger.debug(\"Clearing unknown checks...\")\n self.clear_checks()\n logger.debug(\n \"Cleared unknown checks in %s seconds\"\n % (time.time() - start))\n\n start = time.time()\n logger.debug(\"Deleting checks for untranslated units...\")\n untrans = self.update_untranslated()\n logger.debug(\n \"Deleted %s checks for untranslated units in %s seconds\"\n % (untrans, (time.time() - start)))\n\n start = time.time()\n logger.debug(\"Updating checks - this may take some time...\")\n trans = self.update_translated()\n logger.debug(\n \"Updated checks for %s units in %s seconds\"\n % (trans, (time.time() - start)))\n\n def update_store_caches(self, stores):\n \"\"\"After completing QualityCheck updates expire caches for affected Stores.\n \"\"\"\n for store in Store.objects.filter(pk__in=stores):\n store.update_dirty_cache()\n\n def update_translated_unit(self, unit, checker=None):\n \"\"\"Update checks for a translated Unit\n \"\"\"\n unit = CheckableUnit(unit)\n checker = UnitQualityCheck(\n unit,\n checker,\n self.checks.get(unit.id, {}),\n self.check_names,\n self.keep_false_positives)\n if checker.update():\n self.expire_store_cache(unit.store)\n self.units.filter(id=unit.id).update(mtime=timezone.now())\n return True\n return False\n\n def update_translated(self):\n \"\"\"Update checks for translated Units\n \"\"\"\n unit_fields = [\n \"id\", \"source_f\", \"target_f\", \"locations\", \"store__id\"]\n\n tp_key = \"store__translation_project__id\"\n if self.translation_project is None:\n unit_fields.append(tp_key)\n\n checker = None\n if self.translation_project is not None:\n # we only need to get the checker once if TP is set\n checker = self.get_checker(self.translation_project.id)\n\n translated = (\n self.units.filter(state__gte=OBSOLETE)\n .order_by(\"store\", \"index\"))\n updated_count = 0\n for unit in translated.values(*unit_fields).iterator():\n if self.translation_project is not None:\n # if TP is set then manually add TP.id to the Unit value dict\n unit[tp_key] = self.translation_project.id\n if checker is None:\n checker = self.get_checker(unit[tp_key])\n if checker and self.update_translated_unit(unit, checker=checker):\n updated_count += 1\n # clear the cache of the remaining Store\n self.expire_store_cache()\n return updated_count\n\n def update_untranslated(self):\n \"\"\"Delete QualityChecks for untranslated Units\n \"\"\"\n checks_qs = self.checks_qs.exclude(unit__state__gte=OBSOLETE)\n self.update_store_caches(\n set(checks_qs.values_list(\"unit__store__pk\", flat=True).distinct()))\n deleted = checks_qs.count()\n checks_qs.delete()\n return deleted\n", "path": "pootle/core/checks/checker.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\nimport time\n\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.lru_cache import lru_cache\n\nfrom pootle.core.mixins.treeitem import CachedMethods\nfrom pootle_misc.checks import run_given_filters\nfrom pootle_misc.util import import_func\nfrom pootle_store.models import QualityCheck, Store, Unit\nfrom pootle_store.unit import UnitProxy\nfrom pootle_store.util import OBSOLETE\nfrom pootle_translationproject.models import TranslationProject\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CheckableUnit(UnitProxy):\n \"\"\"CheckableUnit wraps a `Unit` values dictionary to provide a `Unit` like\n instance that can be used by UnitQualityCheck\n\n At a minimum the dict should contain source_f, target_f, store__id, and\n store__translation_project__id\n \"\"\"\n\n @property\n def store(self):\n return self.store__id\n\n @property\n def tp(self):\n return self.store__translation_project__id\n\n\nclass UnitQualityCheck(object):\n\n def __init__(self, unit, checker, original_checks,\n check_names, keep_false_positives=True):\n \"\"\"Refreshes QualityChecks for a Unit\n\n As this class can work with either `Unit` or `CheckableUnit` it only\n uses a minimum of `Unit` attributes from `self.unit`.\n\n :param unit: an instance of Unit or CheckableUnit\n :param checker: a Checker for this Unit.\n :param original_checks: current QualityChecks for this Unit\n :param check_names: limit checks to given list of quality check names.\n :param keep_false_positives: when set to `False`, it will unmute any\n existing false positive checks.\n \"\"\"\n self.checker = checker\n self.unit = unit\n self.original_checks = original_checks\n self.check_names = check_names\n self.keep_false_positives = keep_false_positives\n self.unmute_list = []\n\n @cached_property\n def check_failures(self):\n \"\"\"Current QualityCheck failure for the Unit\n \"\"\"\n if self.check_names is None:\n return self.checker.run_filters(\n self.unit, categorised=True)\n return run_given_filters(\n self.checker, self.unit, self.check_names)\n\n @cached_property\n def checks_qs(self):\n \"\"\"QualityCheck queryset for the Unit\n \"\"\"\n return QualityCheck.objects.filter(unit=self.unit.id)\n\n def delete_checks(self, checks):\n \"\"\"Delete checks that are no longer used.\n \"\"\"\n to_delete = self.checks_qs.filter(name__in=checks)\n if to_delete.exists():\n to_delete.delete()\n return True\n return False\n\n def unmute_checks(self, checks):\n \"\"\"Unmute checks that should no longer be muted\n \"\"\"\n to_unmute = self.checks_qs.filter(\n name__in=checks, false_positive=True)\n if to_unmute.exists():\n to_unmute.update(false_positive=False)\n return True\n return False\n\n def update(self):\n \"\"\"Update QualityChecks for a Unit, deleting and unmuting as appropriate.\n \"\"\"\n # update the checks for this unit\n updated = self.update_checks()\n\n # delete any remaining checks that were only in the original list\n deleted = (\n self.original_checks and self.delete_checks(self.original_checks))\n\n # unmute any checks that have been marked for unmuting\n unmuted = (\n self.unmute_list and self.unmute_checks(self.unmute_list))\n\n return (updated or deleted or unmuted)\n\n def update_checks(self):\n \"\"\"Compare self.original_checks to the Units calculated QualityCheck failures.\n\n Removes members of self.original_checks as they have been compared.\n \"\"\"\n updated = False\n for name in self.check_failures.iterkeys():\n if name in self.original_checks:\n # keep false-positive checks if check is active\n unmute = (\n self.original_checks[name]['false_positive']\n and not self.keep_false_positives)\n if unmute:\n self.unmute_list.append(name)\n # if the check is valid remove from the list and continue\n del self.original_checks[name]\n continue\n\n # the check didnt exist previously - so create it\n self.checks_qs.create(\n unit_id=self.unit.id,\n name=name,\n message=self.check_failures[name]['message'],\n category=self.check_failures[name]['category'])\n updated = True\n\n return updated\n\n\nclass QualityCheckUpdater(object):\n\n def __init__(self, check_names=None, translation_project=None,\n keep_false_positives=True):\n \"\"\"Refreshes QualityChecks for Units\n\n :param check_names: limit checks to given list of quality check names.\n :param translation_project: an instance of `TranslationProject` to\n restrict the update to.\n :param keep_false_positives: when set to `False`, it will unmute any\n existing false positive checks.\n \"\"\"\n\n self.check_names = check_names\n self.translation_project = translation_project\n self.keep_false_positives = keep_false_positives\n self.stores = set()\n self._store_to_expire = None\n\n @cached_property\n def checks(self):\n \"\"\"Existing checks in the database for all units\n \"\"\"\n checks = self.checks_qs\n check_keys = (\n 'id', 'name', 'unit_id',\n 'category', 'false_positive')\n\n if self.check_names is not None:\n checks = checks.filter(name__in=self.check_names)\n\n all_units_checks = {}\n for check in checks.values(*check_keys):\n all_units_checks.setdefault(\n check['unit_id'], {})[check['name']] = check\n return all_units_checks\n\n @cached_property\n def checks_qs(self):\n \"\"\"QualityCheck queryset for all units, restricted to TP if set\n \"\"\"\n checks_qs = QualityCheck.objects.all()\n\n if self.translation_project is not None:\n tp_pk = self.translation_project.pk\n checks_qs = checks_qs.filter(\n unit__store__translation_project__pk=tp_pk)\n return checks_qs\n\n @cached_property\n def units(self):\n \"\"\"Result set of Units, restricted to TP if set\n \"\"\"\n units = Unit.simple_objects.all()\n if self.translation_project is not None:\n units = units.filter(\n store__translation_project=self.translation_project)\n return units\n\n def clear_checks(self):\n QualityCheck.delete_unknown_checks()\n\n @lru_cache(maxsize=None)\n def get_checker(self, tp_pk):\n \"\"\"Return the site QualityChecker or the QualityCheck associated with\n the a Unit's TP otherwise.\n \"\"\"\n if settings.POOTLE_QUALITY_CHECKER:\n return import_func(settings.POOTLE_QUALITY_CHECKER)()\n try:\n return TranslationProject.objects.get(id=tp_pk).checker\n except TranslationProject.DoesNotExist:\n # There seems to be a risk of dangling Stores with no TP\n return None\n\n def expire_store_cache(self, store_pk=None):\n \"\"\"Whenever a store_pk is found it is queued for cache expiry\n\n if a new store_pk is called the old one has its cache expired,\n and the new store_pk is saved\n\n call with None to expire the current Store's cache\n \"\"\"\n if self._store_to_expire is None:\n # there is no Store set - queue it for expiry\n self._store_to_expire = store_pk\n return\n if store_pk == self._store_to_expire:\n # its the same Store that we saw last time\n return\n # there was a _store_to_expire set and its changed - expire the cache\n self.update_store_caches([self._store_to_expire])\n\n # remember the new store_pk\n self._store_to_expire = store_pk\n\n def update(self):\n \"\"\"Update/purge all QualityChecks for Units, and expire Store caches.\n \"\"\"\n start = time.time()\n logger.debug(\"Clearing unknown checks...\")\n self.clear_checks()\n logger.debug(\n \"Cleared unknown checks in %s seconds\"\n % (time.time() - start))\n\n start = time.time()\n logger.debug(\"Deleting checks for untranslated units...\")\n untrans = self.update_untranslated()\n logger.debug(\n \"Deleted %s checks for untranslated units in %s seconds\"\n % (untrans, (time.time() - start)))\n\n start = time.time()\n logger.debug(\"Updating checks - this may take some time...\")\n trans = self.update_translated()\n logger.debug(\n \"Updated checks for %s units in %s seconds\"\n % (trans, (time.time() - start)))\n\n def update_store_caches(self, stores):\n \"\"\"After completing QualityCheck updates expire caches for affected Stores.\n \"\"\"\n for store in Store.objects.filter(pk__in=stores):\n store.mark_dirty(CachedMethods.CHECKS, CachedMethods.MTIME)\n store.update_dirty_cache()\n\n def update_translated_unit(self, unit, checker=None):\n \"\"\"Update checks for a translated Unit\n \"\"\"\n unit = CheckableUnit(unit)\n checker = UnitQualityCheck(\n unit,\n checker,\n self.checks.get(unit.id, {}),\n self.check_names,\n self.keep_false_positives)\n if checker.update():\n self.expire_store_cache(unit.store)\n self.units.filter(id=unit.id).update(mtime=timezone.now())\n return True\n return False\n\n def update_translated(self):\n \"\"\"Update checks for translated Units\n \"\"\"\n unit_fields = [\n \"id\", \"source_f\", \"target_f\", \"locations\", \"store__id\"]\n\n tp_key = \"store__translation_project__id\"\n if self.translation_project is None:\n unit_fields.append(tp_key)\n\n checker = None\n if self.translation_project is not None:\n # we only need to get the checker once if TP is set\n checker = self.get_checker(self.translation_project.id)\n\n translated = (\n self.units.filter(state__gte=OBSOLETE)\n .order_by(\"store\", \"index\"))\n updated_count = 0\n for unit in translated.values(*unit_fields).iterator():\n if self.translation_project is not None:\n # if TP is set then manually add TP.id to the Unit value dict\n unit[tp_key] = self.translation_project.id\n if checker is None:\n checker = self.get_checker(unit[tp_key])\n if checker and self.update_translated_unit(unit, checker=checker):\n updated_count += 1\n # clear the cache of the remaining Store\n self.expire_store_cache()\n return updated_count\n\n def update_untranslated(self):\n \"\"\"Delete QualityChecks for untranslated Units\n \"\"\"\n checks_qs = self.checks_qs.exclude(unit__state__gte=OBSOLETE)\n self.update_store_caches(\n set(checks_qs.values_list(\"unit__store__pk\", flat=True).distinct()))\n deleted = checks_qs.count()\n checks_qs.delete()\n return deleted\n", "path": "pootle/core/checks/checker.py"}]} | 3,680 | 208 |
gh_patches_debug_24530 | rasdani/github-patches | git_diff | piskvorky__gensim-2390 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
loading fastText model trained with pretrained_vectors still fails (see: #2350)
## Description
Loading pretrained `fastext_model.bin` with `gensim.models.fasttext.FastText.load_fasttext_format('wiki-news-300d-1M-subword.bin')` fails with `AssertionError: unexpected number of vectors` despite fix for #2350.
## Steps/Code/Corpus to Reproduce
first install `develop` branch with: `pip install --upgrade git+git://github.com/RaRe-Technologies/gensim@develop`, then:
```python
#dependencies
import requests, zipfile, io
from gensim.models.fasttext import FastText
#download model
ft_url = 'https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki-news-300d-1M-subword.bin.zip'
r = requests.get(ft_url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall()
#attempt load
mod = FastText.load_fasttext_format('wiki-news-300d-1M-subword.bin')
```
#### Expected Results
Loaded model.
#### Actual Results
```
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-29-a054256d6f88> in <module>
1 #load model
2 from gensim.models.fasttext import FastText
----> 3 mod = FastText.load_fasttext_format('wiki-news-300d-1M-subword.bin')
4 # from gensim.models import KeyedVectors
5 # wv = KeyedVectors.load_word2vec_format('wiki-news-300d-1M-subword.vec')
/anaconda3/envs/tensor_env/lib/python3.7/site-packages/gensim/models/fasttext.py in load_fasttext_format(cls, model_file, encoding, full_model)
1012
1013 """
-> 1014 return _load_fasttext_format(model_file, encoding=encoding, full_model=full_model)
1015
1016 def load_binary_data(self, encoding='utf8'):
/anaconda3/envs/tensor_env/lib/python3.7/site-packages/gensim/models/fasttext.py in _load_fasttext_format(model_file, encoding, full_model)
1270 #
1271 # We explicitly set min_count=1 regardless of the model's parameters to
-> 1272 # ignore the trim rule when building the vocabulary. We do this in order
1273 # to support loading native models that were trained with pretrained vectors.
1274 # Such models will contain vectors for _all_ encountered words, not only
/anaconda3/envs/tensor_env/lib/python3.7/site-packages/gensim/models/keyedvectors.py in init_post_load(self, vectors, match_gensim)
2205 """
2206 vocab_words = len(self.vocab)
-> 2207 assert vectors.shape[0] == vocab_words + self.bucket, 'unexpected number of vectors'
2208 assert vectors.shape[1] == self.vector_size, 'unexpected vector dimensionality'
2209
AssertionError: unexpected number of vectors
```
## Versions
Darwin-18.2.0-x86_64-i386-64bit
Python 3.7.2 (default, Dec 29 2018, 00:00:04)
[Clang 4.0.1 (tags/RELEASE_401/final)]
NumPy 1.16.1
SciPy 1.2.0
gensim 3.7.1
FAST_VERSION 1
#### thanks for your work!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gensim/models/_fasttext_bin.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Load models from the native binary format released by Facebook.
3
4 The main entry point is the :func:`~gensim.models._fasttext_bin.load` function.
5 It returns a :class:`~gensim.models._fasttext_bin.Model` namedtuple containing everything loaded from the binary.
6
7 Examples
8 --------
9
10 Load a model from a binary file:
11
12 .. sourcecode:: pycon
13
14 >>> from gensim.test.utils import datapath
15 >>> from gensim.models.fasttext_bin import load
16 >>> with open(datapath('crime-and-punishment.bin'), 'rb') as fin:
17 ... model = load(fin)
18 >>> model.nwords
19 291
20 >>> model.vectors_ngrams.shape
21 (391, 5)
22 >>> sorted(model.raw_vocab, key=lambda w: len(w), reverse=True)[:5]
23 ['останавливаться', 'изворачиваться,', 'раздражительном', 'exceptionally', 'проскользнуть']
24
25 See Also
26 --------
27
28 `FB Implementation <https://github.com/facebookresearch/fastText/blob/master/src/matrix.cc>`_.
29
30 """
31
32 import collections
33 import logging
34 import struct
35
36 import numpy as np
37
38 logger = logging.getLogger(__name__)
39
40 _FASTTEXT_FILEFORMAT_MAGIC = 793712314
41
42 _NEW_HEADER_FORMAT = [
43 ('dim', 'i'),
44 ('ws', 'i'),
45 ('epoch', 'i'),
46 ('min_count', 'i'),
47 ('neg', 'i'),
48 ('_', 'i'),
49 ('loss', 'i'),
50 ('model', 'i'),
51 ('bucket', 'i'),
52 ('minn', 'i'),
53 ('maxn', 'i'),
54 ('_', 'i'),
55 ('t', 'd'),
56 ]
57
58 _OLD_HEADER_FORMAT = [
59 ('epoch', 'i'),
60 ('min_count', 'i'),
61 ('neg', 'i'),
62 ('_', 'i'),
63 ('loss', 'i'),
64 ('model', 'i'),
65 ('bucket', 'i'),
66 ('minn', 'i'),
67 ('maxn', 'i'),
68 ('_', 'i'),
69 ('t', 'd'),
70 ]
71
72
73 def _yield_field_names():
74 for name, _ in _OLD_HEADER_FORMAT + _NEW_HEADER_FORMAT:
75 if not name.startswith('_'):
76 yield name
77 yield 'raw_vocab'
78 yield 'vocab_size'
79 yield 'nwords'
80 yield 'vectors_ngrams'
81 yield 'hidden_output'
82
83
84 _FIELD_NAMES = sorted(set(_yield_field_names()))
85 Model = collections.namedtuple('Model', _FIELD_NAMES)
86 """Holds data loaded from the Facebook binary.
87
88 Parameters
89 ----------
90 dim : int
91 The dimensionality of the vectors.
92 ws : int
93 The window size.
94 epoch : int
95 The number of training epochs.
96 neg : int
97 If non-zero, indicates that the model uses negative sampling.
98 loss : int
99 If equal to 1, indicates that the model uses hierarchical sampling.
100 model : int
101 If equal to 2, indicates that the model uses skip-grams.
102 bucket : int
103 The number of buckets.
104 min_count : int
105 The threshold below which the model ignores terms.
106 t : float
107 The sample threshold.
108 minn : int
109 The minimum ngram length.
110 maxn : int
111 The maximum ngram length.
112 raw_vocab : collections.OrderedDict
113 A map from words (str) to their frequency (int). The order in the dict
114 corresponds to the order of the words in the Facebook binary.
115 nwords : int
116 The number of words.
117 vocab_size : int
118 The size of the vocabulary.
119 vectors_ngrams : numpy.array
120 This is a matrix that contains vectors learned by the model.
121 Each row corresponds to a vector.
122 The number of vectors is equal to the number of words plus the number of buckets.
123 The number of columns is equal to the vector dimensionality.
124 hidden_output : numpy.array
125 This is a matrix that contains the shallow neural network output.
126 This array has the same dimensions as vectors_ngrams.
127 May be None - in that case, it is impossible to continue training the model.
128 """
129
130
131 def _struct_unpack(fin, fmt):
132 num_bytes = struct.calcsize(fmt)
133 return struct.unpack(fmt, fin.read(num_bytes))
134
135
136 def _load_vocab(fin, new_format, encoding='utf-8'):
137 """Load a vocabulary from a FB binary.
138
139 Before the vocab is ready for use, call the prepare_vocab function and pass
140 in the relevant parameters from the model.
141
142 Parameters
143 ----------
144 fin : file
145 An open file pointer to the binary.
146 new_format: boolean
147 True if the binary is of the newer format.
148 encoding : str
149 The encoding to use when decoding binary data into words.
150
151 Returns
152 -------
153 tuple
154 The loaded vocabulary. Keys are words, values are counts.
155 The vocabulary size.
156 The number of words.
157 """
158 vocab_size, nwords, nlabels = _struct_unpack(fin, '@3i')
159
160 # Vocab stored by [Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)
161 if nlabels > 0:
162 raise NotImplementedError("Supervised fastText models are not supported")
163 logger.info("loading %s words for fastText model from %s", vocab_size, fin.name)
164
165 _struct_unpack(fin, '@1q') # number of tokens
166 if new_format:
167 pruneidx_size, = _struct_unpack(fin, '@q')
168
169 raw_vocab = collections.OrderedDict()
170 for i in range(vocab_size):
171 word_bytes = b''
172 char_byte = fin.read(1)
173 # Read vocab word
174 while char_byte != b'\x00':
175 word_bytes += char_byte
176 char_byte = fin.read(1)
177 word = word_bytes.decode(encoding)
178 count, _ = _struct_unpack(fin, '@qb')
179 raw_vocab[word] = count
180
181 if new_format:
182 for j in range(pruneidx_size):
183 _struct_unpack(fin, '@2i')
184
185 return raw_vocab, vocab_size, nwords
186
187
188 def _load_matrix(fin, new_format=True):
189 """Load a matrix from fastText native format.
190
191 Interprets the matrix dimensions and type from the file stream.
192
193 Parameters
194 ----------
195 fin : file
196 A file handle opened for reading.
197 new_format : bool, optional
198 True if the quant_input variable precedes
199 the matrix declaration. Should be True for newer versions of fastText.
200
201 Returns
202 -------
203 :class:`numpy.array`
204 The vectors as an array.
205 Each vector will be a row in the array.
206 The number of columns of the array will correspond to the vector size.
207
208 """
209 if new_format:
210 _struct_unpack(fin, '@?') # bool quant_input in fasttext.cc
211
212 num_vectors, dim = _struct_unpack(fin, '@2q')
213
214 float_size = struct.calcsize('@f')
215 if float_size == 4:
216 dtype = np.dtype(np.float32)
217 elif float_size == 8:
218 dtype = np.dtype(np.float64)
219 else:
220 raise ValueError("Incompatible float size: %r" % float_size)
221
222 matrix = np.fromfile(fin, dtype=dtype, count=num_vectors * dim)
223 matrix = matrix.reshape((num_vectors, dim))
224 return matrix
225
226
227 def load(fin, encoding='utf-8', full_model=True):
228 """Load a model from a binary stream.
229
230 Parameters
231 ----------
232 fin : file
233 The readable binary stream.
234 encoding : str, optional
235 The encoding to use for decoding text
236 full_model : boolean, optional
237 If False, skips loading the hidden output matrix. This saves a fair bit
238 of CPU time and RAM, but prevents training continuation.
239
240 Returns
241 -------
242 :class:`~gensim.models._fasttext_bin.Model`
243 The loaded model.
244
245 """
246 if isinstance(fin, str):
247 fin = open(fin, 'rb')
248
249 magic, version = _struct_unpack(fin, '@2i')
250 new_format = magic == _FASTTEXT_FILEFORMAT_MAGIC
251
252 header_spec = _NEW_HEADER_FORMAT if new_format else _OLD_HEADER_FORMAT
253 model = {name: _struct_unpack(fin, fmt)[0] for (name, fmt) in header_spec}
254 if not new_format:
255 model.update(dim=magic, ws=version)
256
257 raw_vocab, vocab_size, nwords = _load_vocab(fin, new_format, encoding=encoding)
258 model.update(raw_vocab=raw_vocab, vocab_size=vocab_size, nwords=nwords)
259
260 vectors_ngrams = _load_matrix(fin, new_format=new_format)
261
262 if not full_model:
263 hidden_output = None
264 else:
265 hidden_output = _load_matrix(fin, new_format=new_format)
266 assert fin.read() == b'', 'expected to reach EOF'
267
268 model.update(vectors_ngrams=vectors_ngrams, hidden_output=hidden_output)
269 model = {k: v for k, v in model.items() if k in _FIELD_NAMES}
270 return Model(**model)
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gensim/models/_fasttext_bin.py b/gensim/models/_fasttext_bin.py
--- a/gensim/models/_fasttext_bin.py
+++ b/gensim/models/_fasttext_bin.py
@@ -30,11 +30,14 @@
"""
import collections
+import io
import logging
import struct
import numpy as np
+_END_OF_WORD_MARKER = b'\x00'
+
logger = logging.getLogger(__name__)
_FASTTEXT_FILEFORMAT_MAGIC = 793712314
@@ -168,13 +171,22 @@
raw_vocab = collections.OrderedDict()
for i in range(vocab_size):
- word_bytes = b''
+ word_bytes = io.BytesIO()
char_byte = fin.read(1)
- # Read vocab word
- while char_byte != b'\x00':
- word_bytes += char_byte
+
+ while char_byte != _END_OF_WORD_MARKER:
+ word_bytes.write(char_byte)
char_byte = fin.read(1)
- word = word_bytes.decode(encoding)
+
+ word_bytes = word_bytes.getvalue()
+ try:
+ word = word_bytes.decode(encoding)
+ except UnicodeDecodeError:
+ word = word_bytes.decode(encoding, errors='ignore')
+ logger.error(
+ 'failed to decode invalid unicode bytes %r; ignoring invalid characters, using %r',
+ word_bytes, word
+ )
count, _ = _struct_unpack(fin, '@qb')
raw_vocab[word] = count
| {"golden_diff": "diff --git a/gensim/models/_fasttext_bin.py b/gensim/models/_fasttext_bin.py\n--- a/gensim/models/_fasttext_bin.py\n+++ b/gensim/models/_fasttext_bin.py\n@@ -30,11 +30,14 @@\n \"\"\"\n \n import collections\n+import io\n import logging\n import struct\n \n import numpy as np\n \n+_END_OF_WORD_MARKER = b'\\x00'\n+\n logger = logging.getLogger(__name__)\n \n _FASTTEXT_FILEFORMAT_MAGIC = 793712314\n@@ -168,13 +171,22 @@\n \n raw_vocab = collections.OrderedDict()\n for i in range(vocab_size):\n- word_bytes = b''\n+ word_bytes = io.BytesIO()\n char_byte = fin.read(1)\n- # Read vocab word\n- while char_byte != b'\\x00':\n- word_bytes += char_byte\n+\n+ while char_byte != _END_OF_WORD_MARKER:\n+ word_bytes.write(char_byte)\n char_byte = fin.read(1)\n- word = word_bytes.decode(encoding)\n+\n+ word_bytes = word_bytes.getvalue()\n+ try:\n+ word = word_bytes.decode(encoding)\n+ except UnicodeDecodeError:\n+ word = word_bytes.decode(encoding, errors='ignore')\n+ logger.error(\n+ 'failed to decode invalid unicode bytes %r; ignoring invalid characters, using %r',\n+ word_bytes, word\n+ )\n count, _ = _struct_unpack(fin, '@qb')\n raw_vocab[word] = count\n", "issue": "loading fastText model trained with pretrained_vectors still fails (see: #2350)\n## Description\r\nLoading pretrained `fastext_model.bin` with `gensim.models.fasttext.FastText.load_fasttext_format('wiki-news-300d-1M-subword.bin')` fails with `AssertionError: unexpected number of vectors` despite fix for #2350.\r\n\r\n## Steps/Code/Corpus to Reproduce\r\nfirst install `develop` branch with: `pip install --upgrade git+git://github.com/RaRe-Technologies/gensim@develop`, then:\r\n```python\r\n#dependencies \r\nimport requests, zipfile, io\r\nfrom gensim.models.fasttext import FastText\r\n\r\n#download model\r\nft_url = 'https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki-news-300d-1M-subword.bin.zip'\r\nr = requests.get(ft_url)\r\nz = zipfile.ZipFile(io.BytesIO(r.content))\r\nz.extractall()\r\n\r\n#attempt load\r\nmod = FastText.load_fasttext_format('wiki-news-300d-1M-subword.bin')\r\n```\r\n\r\n#### Expected Results\r\nLoaded model.\r\n\r\n#### Actual Results\r\n```\r\n---------------------------------------------------------------------------\r\nAssertionError Traceback (most recent call last)\r\n<ipython-input-29-a054256d6f88> in <module>\r\n 1 #load model\r\n 2 from gensim.models.fasttext import FastText\r\n----> 3 mod = FastText.load_fasttext_format('wiki-news-300d-1M-subword.bin')\r\n 4 # from gensim.models import KeyedVectors\r\n 5 # wv = KeyedVectors.load_word2vec_format('wiki-news-300d-1M-subword.vec')\r\n\r\n/anaconda3/envs/tensor_env/lib/python3.7/site-packages/gensim/models/fasttext.py in load_fasttext_format(cls, model_file, encoding, full_model)\r\n 1012 \r\n 1013 \"\"\"\r\n-> 1014 return _load_fasttext_format(model_file, encoding=encoding, full_model=full_model)\r\n 1015 \r\n 1016 def load_binary_data(self, encoding='utf8'):\r\n\r\n/anaconda3/envs/tensor_env/lib/python3.7/site-packages/gensim/models/fasttext.py in _load_fasttext_format(model_file, encoding, full_model)\r\n 1270 #\r\n 1271 # We explicitly set min_count=1 regardless of the model's parameters to\r\n-> 1272 # ignore the trim rule when building the vocabulary. We do this in order\r\n 1273 # to support loading native models that were trained with pretrained vectors.\r\n 1274 # Such models will contain vectors for _all_ encountered words, not only\r\n\r\n/anaconda3/envs/tensor_env/lib/python3.7/site-packages/gensim/models/keyedvectors.py in init_post_load(self, vectors, match_gensim)\r\n 2205 \"\"\"\r\n 2206 vocab_words = len(self.vocab)\r\n-> 2207 assert vectors.shape[0] == vocab_words + self.bucket, 'unexpected number of vectors'\r\n 2208 assert vectors.shape[1] == self.vector_size, 'unexpected vector dimensionality'\r\n 2209 \r\n\r\nAssertionError: unexpected number of vectors\r\n```\r\n\r\n## Versions\r\nDarwin-18.2.0-x86_64-i386-64bit\r\nPython 3.7.2 (default, Dec 29 2018, 00:00:04) \r\n[Clang 4.0.1 (tags/RELEASE_401/final)]\r\nNumPy 1.16.1\r\nSciPy 1.2.0\r\ngensim 3.7.1\r\nFAST_VERSION 1\r\n\r\n#### thanks for your work! \r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Load models from the native binary format released by Facebook.\n\nThe main entry point is the :func:`~gensim.models._fasttext_bin.load` function.\nIt returns a :class:`~gensim.models._fasttext_bin.Model` namedtuple containing everything loaded from the binary.\n\nExamples\n--------\n\nLoad a model from a binary file:\n\n.. sourcecode:: pycon\n\n >>> from gensim.test.utils import datapath\n >>> from gensim.models.fasttext_bin import load\n >>> with open(datapath('crime-and-punishment.bin'), 'rb') as fin:\n ... model = load(fin)\n >>> model.nwords\n 291\n >>> model.vectors_ngrams.shape\n (391, 5)\n >>> sorted(model.raw_vocab, key=lambda w: len(w), reverse=True)[:5]\n ['\u043e\u0441\u0442\u0430\u043d\u0430\u0432\u043b\u0438\u0432\u0430\u0442\u044c\u0441\u044f', '\u0438\u0437\u0432\u043e\u0440\u0430\u0447\u0438\u0432\u0430\u0442\u044c\u0441\u044f,', '\u0440\u0430\u0437\u0434\u0440\u0430\u0436\u0438\u0442\u0435\u043b\u044c\u043d\u043e\u043c', 'exceptionally', '\u043f\u0440\u043e\u0441\u043a\u043e\u043b\u044c\u0437\u043d\u0443\u0442\u044c']\n\nSee Also\n--------\n\n`FB Implementation <https://github.com/facebookresearch/fastText/blob/master/src/matrix.cc>`_.\n\n\"\"\"\n\nimport collections\nimport logging\nimport struct\n\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n_FASTTEXT_FILEFORMAT_MAGIC = 793712314\n\n_NEW_HEADER_FORMAT = [\n ('dim', 'i'),\n ('ws', 'i'),\n ('epoch', 'i'),\n ('min_count', 'i'),\n ('neg', 'i'),\n ('_', 'i'),\n ('loss', 'i'),\n ('model', 'i'),\n ('bucket', 'i'),\n ('minn', 'i'),\n ('maxn', 'i'),\n ('_', 'i'),\n ('t', 'd'),\n]\n\n_OLD_HEADER_FORMAT = [\n ('epoch', 'i'),\n ('min_count', 'i'),\n ('neg', 'i'),\n ('_', 'i'),\n ('loss', 'i'),\n ('model', 'i'),\n ('bucket', 'i'),\n ('minn', 'i'),\n ('maxn', 'i'),\n ('_', 'i'),\n ('t', 'd'),\n]\n\n\ndef _yield_field_names():\n for name, _ in _OLD_HEADER_FORMAT + _NEW_HEADER_FORMAT:\n if not name.startswith('_'):\n yield name\n yield 'raw_vocab'\n yield 'vocab_size'\n yield 'nwords'\n yield 'vectors_ngrams'\n yield 'hidden_output'\n\n\n_FIELD_NAMES = sorted(set(_yield_field_names()))\nModel = collections.namedtuple('Model', _FIELD_NAMES)\n\"\"\"Holds data loaded from the Facebook binary.\n\nParameters\n----------\ndim : int\n The dimensionality of the vectors.\nws : int\n The window size.\nepoch : int\n The number of training epochs.\nneg : int\n If non-zero, indicates that the model uses negative sampling.\nloss : int\n If equal to 1, indicates that the model uses hierarchical sampling.\nmodel : int\n If equal to 2, indicates that the model uses skip-grams.\nbucket : int\n The number of buckets.\nmin_count : int\n The threshold below which the model ignores terms.\nt : float\n The sample threshold.\nminn : int\n The minimum ngram length.\nmaxn : int\n The maximum ngram length.\nraw_vocab : collections.OrderedDict\n A map from words (str) to their frequency (int). The order in the dict\n corresponds to the order of the words in the Facebook binary.\nnwords : int\n The number of words.\nvocab_size : int\n The size of the vocabulary.\nvectors_ngrams : numpy.array\n This is a matrix that contains vectors learned by the model.\n Each row corresponds to a vector.\n The number of vectors is equal to the number of words plus the number of buckets.\n The number of columns is equal to the vector dimensionality.\nhidden_output : numpy.array\n This is a matrix that contains the shallow neural network output.\n This array has the same dimensions as vectors_ngrams.\n May be None - in that case, it is impossible to continue training the model.\n\"\"\"\n\n\ndef _struct_unpack(fin, fmt):\n num_bytes = struct.calcsize(fmt)\n return struct.unpack(fmt, fin.read(num_bytes))\n\n\ndef _load_vocab(fin, new_format, encoding='utf-8'):\n \"\"\"Load a vocabulary from a FB binary.\n\n Before the vocab is ready for use, call the prepare_vocab function and pass\n in the relevant parameters from the model.\n\n Parameters\n ----------\n fin : file\n An open file pointer to the binary.\n new_format: boolean\n True if the binary is of the newer format.\n encoding : str\n The encoding to use when decoding binary data into words.\n\n Returns\n -------\n tuple\n The loaded vocabulary. Keys are words, values are counts.\n The vocabulary size.\n The number of words.\n \"\"\"\n vocab_size, nwords, nlabels = _struct_unpack(fin, '@3i')\n\n # Vocab stored by [Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)\n if nlabels > 0:\n raise NotImplementedError(\"Supervised fastText models are not supported\")\n logger.info(\"loading %s words for fastText model from %s\", vocab_size, fin.name)\n\n _struct_unpack(fin, '@1q') # number of tokens\n if new_format:\n pruneidx_size, = _struct_unpack(fin, '@q')\n\n raw_vocab = collections.OrderedDict()\n for i in range(vocab_size):\n word_bytes = b''\n char_byte = fin.read(1)\n # Read vocab word\n while char_byte != b'\\x00':\n word_bytes += char_byte\n char_byte = fin.read(1)\n word = word_bytes.decode(encoding)\n count, _ = _struct_unpack(fin, '@qb')\n raw_vocab[word] = count\n\n if new_format:\n for j in range(pruneidx_size):\n _struct_unpack(fin, '@2i')\n\n return raw_vocab, vocab_size, nwords\n\n\ndef _load_matrix(fin, new_format=True):\n \"\"\"Load a matrix from fastText native format.\n\n Interprets the matrix dimensions and type from the file stream.\n\n Parameters\n ----------\n fin : file\n A file handle opened for reading.\n new_format : bool, optional\n True if the quant_input variable precedes\n the matrix declaration. Should be True for newer versions of fastText.\n\n Returns\n -------\n :class:`numpy.array`\n The vectors as an array.\n Each vector will be a row in the array.\n The number of columns of the array will correspond to the vector size.\n\n \"\"\"\n if new_format:\n _struct_unpack(fin, '@?') # bool quant_input in fasttext.cc\n\n num_vectors, dim = _struct_unpack(fin, '@2q')\n\n float_size = struct.calcsize('@f')\n if float_size == 4:\n dtype = np.dtype(np.float32)\n elif float_size == 8:\n dtype = np.dtype(np.float64)\n else:\n raise ValueError(\"Incompatible float size: %r\" % float_size)\n\n matrix = np.fromfile(fin, dtype=dtype, count=num_vectors * dim)\n matrix = matrix.reshape((num_vectors, dim))\n return matrix\n\n\ndef load(fin, encoding='utf-8', full_model=True):\n \"\"\"Load a model from a binary stream.\n\n Parameters\n ----------\n fin : file\n The readable binary stream.\n encoding : str, optional\n The encoding to use for decoding text\n full_model : boolean, optional\n If False, skips loading the hidden output matrix. This saves a fair bit\n of CPU time and RAM, but prevents training continuation.\n\n Returns\n -------\n :class:`~gensim.models._fasttext_bin.Model`\n The loaded model.\n\n \"\"\"\n if isinstance(fin, str):\n fin = open(fin, 'rb')\n\n magic, version = _struct_unpack(fin, '@2i')\n new_format = magic == _FASTTEXT_FILEFORMAT_MAGIC\n\n header_spec = _NEW_HEADER_FORMAT if new_format else _OLD_HEADER_FORMAT\n model = {name: _struct_unpack(fin, fmt)[0] for (name, fmt) in header_spec}\n if not new_format:\n model.update(dim=magic, ws=version)\n\n raw_vocab, vocab_size, nwords = _load_vocab(fin, new_format, encoding=encoding)\n model.update(raw_vocab=raw_vocab, vocab_size=vocab_size, nwords=nwords)\n\n vectors_ngrams = _load_matrix(fin, new_format=new_format)\n\n if not full_model:\n hidden_output = None\n else:\n hidden_output = _load_matrix(fin, new_format=new_format)\n assert fin.read() == b'', 'expected to reach EOF'\n\n model.update(vectors_ngrams=vectors_ngrams, hidden_output=hidden_output)\n model = {k: v for k, v in model.items() if k in _FIELD_NAMES}\n return Model(**model)\n", "path": "gensim/models/_fasttext_bin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Load models from the native binary format released by Facebook.\n\nThe main entry point is the :func:`~gensim.models._fasttext_bin.load` function.\nIt returns a :class:`~gensim.models._fasttext_bin.Model` namedtuple containing everything loaded from the binary.\n\nExamples\n--------\n\nLoad a model from a binary file:\n\n.. sourcecode:: pycon\n\n >>> from gensim.test.utils import datapath\n >>> from gensim.models.fasttext_bin import load\n >>> with open(datapath('crime-and-punishment.bin'), 'rb') as fin:\n ... model = load(fin)\n >>> model.nwords\n 291\n >>> model.vectors_ngrams.shape\n (391, 5)\n >>> sorted(model.raw_vocab, key=lambda w: len(w), reverse=True)[:5]\n ['\u043e\u0441\u0442\u0430\u043d\u0430\u0432\u043b\u0438\u0432\u0430\u0442\u044c\u0441\u044f', '\u0438\u0437\u0432\u043e\u0440\u0430\u0447\u0438\u0432\u0430\u0442\u044c\u0441\u044f,', '\u0440\u0430\u0437\u0434\u0440\u0430\u0436\u0438\u0442\u0435\u043b\u044c\u043d\u043e\u043c', 'exceptionally', '\u043f\u0440\u043e\u0441\u043a\u043e\u043b\u044c\u0437\u043d\u0443\u0442\u044c']\n\nSee Also\n--------\n\n`FB Implementation <https://github.com/facebookresearch/fastText/blob/master/src/matrix.cc>`_.\n\n\"\"\"\n\nimport collections\nimport io\nimport logging\nimport struct\n\nimport numpy as np\n\n_END_OF_WORD_MARKER = b'\\x00'\n\nlogger = logging.getLogger(__name__)\n\n_FASTTEXT_FILEFORMAT_MAGIC = 793712314\n\n_NEW_HEADER_FORMAT = [\n ('dim', 'i'),\n ('ws', 'i'),\n ('epoch', 'i'),\n ('min_count', 'i'),\n ('neg', 'i'),\n ('_', 'i'),\n ('loss', 'i'),\n ('model', 'i'),\n ('bucket', 'i'),\n ('minn', 'i'),\n ('maxn', 'i'),\n ('_', 'i'),\n ('t', 'd'),\n]\n\n_OLD_HEADER_FORMAT = [\n ('epoch', 'i'),\n ('min_count', 'i'),\n ('neg', 'i'),\n ('_', 'i'),\n ('loss', 'i'),\n ('model', 'i'),\n ('bucket', 'i'),\n ('minn', 'i'),\n ('maxn', 'i'),\n ('_', 'i'),\n ('t', 'd'),\n]\n\n\ndef _yield_field_names():\n for name, _ in _OLD_HEADER_FORMAT + _NEW_HEADER_FORMAT:\n if not name.startswith('_'):\n yield name\n yield 'raw_vocab'\n yield 'vocab_size'\n yield 'nwords'\n yield 'vectors_ngrams'\n yield 'hidden_output'\n\n\n_FIELD_NAMES = sorted(set(_yield_field_names()))\nModel = collections.namedtuple('Model', _FIELD_NAMES)\n\"\"\"Holds data loaded from the Facebook binary.\n\nParameters\n----------\ndim : int\n The dimensionality of the vectors.\nws : int\n The window size.\nepoch : int\n The number of training epochs.\nneg : int\n If non-zero, indicates that the model uses negative sampling.\nloss : int\n If equal to 1, indicates that the model uses hierarchical sampling.\nmodel : int\n If equal to 2, indicates that the model uses skip-grams.\nbucket : int\n The number of buckets.\nmin_count : int\n The threshold below which the model ignores terms.\nt : float\n The sample threshold.\nminn : int\n The minimum ngram length.\nmaxn : int\n The maximum ngram length.\nraw_vocab : collections.OrderedDict\n A map from words (str) to their frequency (int). The order in the dict\n corresponds to the order of the words in the Facebook binary.\nnwords : int\n The number of words.\nvocab_size : int\n The size of the vocabulary.\nvectors_ngrams : numpy.array\n This is a matrix that contains vectors learned by the model.\n Each row corresponds to a vector.\n The number of vectors is equal to the number of words plus the number of buckets.\n The number of columns is equal to the vector dimensionality.\nhidden_output : numpy.array\n This is a matrix that contains the shallow neural network output.\n This array has the same dimensions as vectors_ngrams.\n May be None - in that case, it is impossible to continue training the model.\n\"\"\"\n\n\ndef _struct_unpack(fin, fmt):\n num_bytes = struct.calcsize(fmt)\n return struct.unpack(fmt, fin.read(num_bytes))\n\n\ndef _load_vocab(fin, new_format, encoding='utf-8'):\n \"\"\"Load a vocabulary from a FB binary.\n\n Before the vocab is ready for use, call the prepare_vocab function and pass\n in the relevant parameters from the model.\n\n Parameters\n ----------\n fin : file\n An open file pointer to the binary.\n new_format: boolean\n True if the binary is of the newer format.\n encoding : str\n The encoding to use when decoding binary data into words.\n\n Returns\n -------\n tuple\n The loaded vocabulary. Keys are words, values are counts.\n The vocabulary size.\n The number of words.\n \"\"\"\n vocab_size, nwords, nlabels = _struct_unpack(fin, '@3i')\n\n # Vocab stored by [Dictionary::save](https://github.com/facebookresearch/fastText/blob/master/src/dictionary.cc)\n if nlabels > 0:\n raise NotImplementedError(\"Supervised fastText models are not supported\")\n logger.info(\"loading %s words for fastText model from %s\", vocab_size, fin.name)\n\n _struct_unpack(fin, '@1q') # number of tokens\n if new_format:\n pruneidx_size, = _struct_unpack(fin, '@q')\n\n raw_vocab = collections.OrderedDict()\n for i in range(vocab_size):\n word_bytes = io.BytesIO()\n char_byte = fin.read(1)\n\n while char_byte != _END_OF_WORD_MARKER:\n word_bytes.write(char_byte)\n char_byte = fin.read(1)\n\n word_bytes = word_bytes.getvalue()\n try:\n word = word_bytes.decode(encoding)\n except UnicodeDecodeError:\n word = word_bytes.decode(encoding, errors='ignore')\n logger.error(\n 'failed to decode invalid unicode bytes %r; ignoring invalid characters, using %r',\n word_bytes, word\n )\n count, _ = _struct_unpack(fin, '@qb')\n raw_vocab[word] = count\n\n if new_format:\n for j in range(pruneidx_size):\n _struct_unpack(fin, '@2i')\n\n return raw_vocab, vocab_size, nwords\n\n\ndef _load_matrix(fin, new_format=True):\n \"\"\"Load a matrix from fastText native format.\n\n Interprets the matrix dimensions and type from the file stream.\n\n Parameters\n ----------\n fin : file\n A file handle opened for reading.\n new_format : bool, optional\n True if the quant_input variable precedes\n the matrix declaration. Should be True for newer versions of fastText.\n\n Returns\n -------\n :class:`numpy.array`\n The vectors as an array.\n Each vector will be a row in the array.\n The number of columns of the array will correspond to the vector size.\n\n \"\"\"\n if new_format:\n _struct_unpack(fin, '@?') # bool quant_input in fasttext.cc\n\n num_vectors, dim = _struct_unpack(fin, '@2q')\n\n float_size = struct.calcsize('@f')\n if float_size == 4:\n dtype = np.dtype(np.float32)\n elif float_size == 8:\n dtype = np.dtype(np.float64)\n else:\n raise ValueError(\"Incompatible float size: %r\" % float_size)\n\n matrix = np.fromfile(fin, dtype=dtype, count=num_vectors * dim)\n matrix = matrix.reshape((num_vectors, dim))\n return matrix\n\n\ndef load(fin, encoding='utf-8', full_model=True):\n \"\"\"Load a model from a binary stream.\n\n Parameters\n ----------\n fin : file\n The readable binary stream.\n encoding : str, optional\n The encoding to use for decoding text\n full_model : boolean, optional\n If False, skips loading the hidden output matrix. This saves a fair bit\n of CPU time and RAM, but prevents training continuation.\n\n Returns\n -------\n :class:`~gensim.models._fasttext_bin.Model`\n The loaded model.\n\n \"\"\"\n if isinstance(fin, str):\n fin = open(fin, 'rb')\n\n magic, version = _struct_unpack(fin, '@2i')\n new_format = magic == _FASTTEXT_FILEFORMAT_MAGIC\n\n header_spec = _NEW_HEADER_FORMAT if new_format else _OLD_HEADER_FORMAT\n model = {name: _struct_unpack(fin, fmt)[0] for (name, fmt) in header_spec}\n if not new_format:\n model.update(dim=magic, ws=version)\n\n raw_vocab, vocab_size, nwords = _load_vocab(fin, new_format, encoding=encoding)\n model.update(raw_vocab=raw_vocab, vocab_size=vocab_size, nwords=nwords)\n\n vectors_ngrams = _load_matrix(fin, new_format=new_format)\n\n if not full_model:\n hidden_output = None\n else:\n hidden_output = _load_matrix(fin, new_format=new_format)\n assert fin.read() == b'', 'expected to reach EOF'\n\n model.update(vectors_ngrams=vectors_ngrams, hidden_output=hidden_output)\n model = {k: v for k, v in model.items() if k in _FIELD_NAMES}\n return Model(**model)\n", "path": "gensim/models/_fasttext_bin.py"}]} | 3,823 | 346 |
gh_patches_debug_15567 | rasdani/github-patches | git_diff | arviz-devs__arviz-1761 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding plot_forest example for multiple datasets
I was thinking that we could add an example of how to use `plot_forest` for multiple datasets. It would help newcomers and also the example would help us demonstrate the feature of legends as it is only valid for multiple datasets
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `arviz/plots/forestplot.py`
Content:
```
1 """Forest plot."""
2 from ..data import convert_to_dataset
3 from ..labels import BaseLabeller, NoModelLabeller
4 from ..rcparams import rcParams
5 from ..utils import _var_names, get_coords
6 from .plot_utils import get_plotting_function
7
8
9 def plot_forest(
10 data,
11 kind="forestplot",
12 model_names=None,
13 var_names=None,
14 filter_vars=None,
15 transform=None,
16 coords=None,
17 combined=False,
18 hdi_prob=None,
19 rope=None,
20 quartiles=True,
21 ess=False,
22 r_hat=False,
23 colors="cycle",
24 textsize=None,
25 linewidth=None,
26 markersize=None,
27 legend=True,
28 labeller=None,
29 ridgeplot_alpha=None,
30 ridgeplot_overlap=2,
31 ridgeplot_kind="auto",
32 ridgeplot_truncate=True,
33 ridgeplot_quantiles=None,
34 figsize=None,
35 ax=None,
36 backend=None,
37 backend_config=None,
38 backend_kwargs=None,
39 show=None,
40 ):
41 """Forest plot to compare HDI intervals from a number of distributions.
42
43 Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces.
44
45 Parameters
46 ----------
47 data: obj or list[obj]
48 Any object that can be converted to an az.InferenceData object
49 Refer to documentation of az.convert_to_dataset for details
50 kind: str
51 Choose kind of plot for main axis. Supports "forestplot" or "ridgeplot"
52 model_names: list[str], optional
53 List with names for the models in the list of data. Useful when plotting more that one
54 dataset
55 var_names: list[str], optional
56 List of variables to plot (defaults to None, which results in all variables plotted)
57 Prefix the variables by `~` when you want to exclude them from the plot.
58 filter_vars: {None, "like", "regex"}, optional, default=None
59 If `None` (default), interpret var_names as the real variables names. If "like", interpret
60 var_names as substrings of the real variables names. If "regex", interpret var_names as
61 regular expressions on the real variables names. A la `pandas.filter`.
62 transform: callable
63 Function to transform data (defaults to None i.e.the identity function)
64 coords: dict, optional
65 Coordinates of var_names to be plotted. Passed to `Dataset.sel`
66 combined: bool
67 Flag for combining multiple chains into a single chain. If False (default), chains will be
68 plotted separately.
69 hdi_prob: float, optional
70 Plots highest posterior density interval for chosen percentage of density. Defaults to 0.94.
71 rope: tuple or dictionary of tuples
72 Lower and upper values of the Region Of Practical Equivalence. If a list with one interval
73 only is provided, the ROPE will be displayed across the y-axis. If more than one interval is
74 provided the length of the list should match the number of variables.
75 quartiles: bool, optional
76 Flag for plotting the interquartile range, in addition to the hdi_prob intervals.
77 Defaults to True
78 r_hat: bool, optional
79 Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False
80 ess: bool, optional
81 Flag for plotting the effective sample size. Defaults to False
82 colors: list or string, optional
83 list with valid matplotlib colors, one color per model. Alternative a string can be passed.
84 If the string is `cycle`, it will automatically chose a color per model from the matplotlibs
85 cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all
86 models. Defaults to 'cycle'.
87 textsize: float
88 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
89 on figsize.
90 linewidth: int
91 Line width throughout. If None it will be autoscaled based on figsize.
92 markersize: int
93 Markersize throughout. If None it will be autoscaled based on figsize.
94 legend : bool, optional
95 Show a legend with the color encoded model information.
96 Defaults to true if there are multiple models
97 labeller : labeller instance, optional
98 Class providing the method `make_model_label` to generate the labels in the plot.
99 Read the :ref:`label_guide` for more details and usage examples.
100 ridgeplot_alpha: float
101 Transparency for ridgeplot fill. If 0, border is colored by model, otherwise
102 a black outline is used.
103 ridgeplot_overlap: float
104 Overlap height for ridgeplots.
105 ridgeplot_kind: string
106 By default ("auto") continuous variables are plotted using KDEs and discrete ones using
107 histograms. To override this use "hist" to plot histograms and "density" for KDEs
108 ridgeplot_truncate: bool
109 Whether to truncate densities according to the value of hdi_prop. Defaults to True
110 ridgeplot_quantiles: list
111 Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
112 Defaults to None.
113 figsize: tuple
114 Figure size. If None it will be defined automatically.
115 ax: axes, optional
116 Matplotlib axes or bokeh figures.
117 backend: str, optional
118 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
119 backend_config: dict, optional
120 Currently specifies the bounds to use for bokeh axes. Defaults to value set in rcParams.
121 backend_kwargs: bool, optional
122 These are kwargs specific to the backend being used. For additional documentation
123 check the plotting method of the backend.
124 show: bool, optional
125 Call backend show function.
126
127 Returns
128 -------
129 gridspec: matplotlib GridSpec or bokeh figures
130
131 Examples
132 --------
133 Forestpĺot
134
135 .. plot::
136 :context: close-figs
137
138 >>> import arviz as az
139 >>> non_centered_data = az.load_arviz_data('non_centered_eight')
140 >>> axes = az.plot_forest(non_centered_data,
141 >>> kind='forestplot',
142 >>> var_names=["^the"],
143 >>> filter_vars="regex",
144 >>> combined=True,
145 >>> figsize=(9, 7))
146 >>> axes[0].set_title('Estimated theta for 8 schools model')
147
148 Forestpĺot with ropes
149
150 .. plot::
151 :context: close-figs
152
153 >>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]}
154 >>> axes = az.plot_forest(non_centered_data,
155 >>> rope=rope,
156 >>> var_names='~tau',
157 >>> combined=True,
158 >>> figsize=(9, 7))
159 >>> axes[0].set_title('Estimated theta for 8 schools model')
160
161
162 Ridgeplot
163
164 .. plot::
165 :context: close-figs
166
167 >>> axes = az.plot_forest(non_centered_data,
168 >>> kind='ridgeplot',
169 >>> var_names=['theta'],
170 >>> combined=True,
171 >>> ridgeplot_overlap=3,
172 >>> colors='white',
173 >>> figsize=(9, 7))
174 >>> axes[0].set_title('Estimated theta for 8 schools model')
175
176 Ridgeplot non-truncated and with quantiles
177
178 .. plot::
179 :context: close-figs
180
181 >>> axes = az.plot_forest(non_centered_data,
182 >>> kind='ridgeplot',
183 >>> var_names=['theta'],
184 >>> combined=True,
185 >>> ridgeplot_truncate=False,
186 >>> ridgeplot_quantiles=[.25, .5, .75],
187 >>> ridgeplot_overlap=0.7,
188 >>> colors='white',
189 >>> figsize=(9, 7))
190 >>> axes[0].set_title('Estimated theta for 8 schools model')
191 """
192 if not isinstance(data, (list, tuple)):
193 data = [data]
194 if len(data) == 1:
195 legend = False
196
197 if coords is None:
198 coords = {}
199
200 if labeller is None:
201 labeller = NoModelLabeller() if legend else BaseLabeller()
202
203 datasets = [convert_to_dataset(datum) for datum in reversed(data)]
204 if transform is not None:
205 datasets = [transform(dataset) for dataset in datasets]
206 datasets = get_coords(
207 datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords
208 )
209
210 var_names = _var_names(var_names, datasets, filter_vars)
211
212 ncols, width_ratios = 1, [3]
213
214 if ess:
215 ncols += 1
216 width_ratios.append(1)
217
218 if r_hat:
219 ncols += 1
220 width_ratios.append(1)
221
222 if hdi_prob is None:
223 hdi_prob = rcParams["stats.hdi_prob"]
224 else:
225 if not 1 >= hdi_prob > 0:
226 raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
227
228 plot_forest_kwargs = dict(
229 ax=ax,
230 datasets=datasets,
231 var_names=var_names,
232 model_names=model_names,
233 combined=combined,
234 colors=colors,
235 figsize=figsize,
236 width_ratios=width_ratios,
237 linewidth=linewidth,
238 markersize=markersize,
239 kind=kind,
240 ncols=ncols,
241 hdi_prob=hdi_prob,
242 quartiles=quartiles,
243 rope=rope,
244 ridgeplot_overlap=ridgeplot_overlap,
245 ridgeplot_alpha=ridgeplot_alpha,
246 ridgeplot_kind=ridgeplot_kind,
247 ridgeplot_truncate=ridgeplot_truncate,
248 ridgeplot_quantiles=ridgeplot_quantiles,
249 textsize=textsize,
250 legend=legend,
251 labeller=labeller,
252 ess=ess,
253 r_hat=r_hat,
254 backend_kwargs=backend_kwargs,
255 backend_config=backend_config,
256 show=show,
257 )
258
259 if backend is None:
260 backend = rcParams["plot.backend"]
261 backend = backend.lower()
262
263 # TODO: Add backend kwargs
264 plot = get_plotting_function("plot_forest", "forestplot", backend)
265 axes = plot(**plot_forest_kwargs)
266 return axes
267
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/arviz/plots/forestplot.py b/arviz/plots/forestplot.py
--- a/arviz/plots/forestplot.py
+++ b/arviz/plots/forestplot.py
@@ -145,6 +145,21 @@
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
+ Forestplot with multiple datasets
+
+ .. plot::
+ :context: close-figs
+
+ >>> centered_data = az.load_arviz_data('centered_eight')
+ >>> axes = az.plot_forest([non_centered_data, centered_data],
+ >>> model_names = ["non centered eight", "centered eight"],
+ >>> kind='forestplot',
+ >>> var_names=["^the"],
+ >>> filter_vars="regex",
+ >>> combined=True,
+ >>> figsize=(9, 7))
+ >>> axes[0].set_title('Estimated theta for 8 schools models')
+
Forestpĺot with ropes
.. plot::
| {"golden_diff": "diff --git a/arviz/plots/forestplot.py b/arviz/plots/forestplot.py\n--- a/arviz/plots/forestplot.py\n+++ b/arviz/plots/forestplot.py\n@@ -145,6 +145,21 @@\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n \n+ Forestplot with multiple datasets\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> centered_data = az.load_arviz_data('centered_eight')\n+ >>> axes = az.plot_forest([non_centered_data, centered_data],\n+ >>> model_names = [\"non centered eight\", \"centered eight\"],\n+ >>> kind='forestplot',\n+ >>> var_names=[\"^the\"],\n+ >>> filter_vars=\"regex\",\n+ >>> combined=True,\n+ >>> figsize=(9, 7))\n+ >>> axes[0].set_title('Estimated theta for 8 schools models')\n+\n Forestp\u013aot with ropes\n \n .. plot::\n", "issue": "Adding plot_forest example for multiple datasets\nI was thinking that we could add an example of how to use `plot_forest` for multiple datasets. It would help newcomers and also the example would help us demonstrate the feature of legends as it is only valid for multiple datasets\n", "before_files": [{"content": "\"\"\"Forest plot.\"\"\"\nfrom ..data import convert_to_dataset\nfrom ..labels import BaseLabeller, NoModelLabeller\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names, get_coords\nfrom .plot_utils import get_plotting_function\n\n\ndef plot_forest(\n data,\n kind=\"forestplot\",\n model_names=None,\n var_names=None,\n filter_vars=None,\n transform=None,\n coords=None,\n combined=False,\n hdi_prob=None,\n rope=None,\n quartiles=True,\n ess=False,\n r_hat=False,\n colors=\"cycle\",\n textsize=None,\n linewidth=None,\n markersize=None,\n legend=True,\n labeller=None,\n ridgeplot_alpha=None,\n ridgeplot_overlap=2,\n ridgeplot_kind=\"auto\",\n ridgeplot_truncate=True,\n ridgeplot_quantiles=None,\n figsize=None,\n ax=None,\n backend=None,\n backend_config=None,\n backend_kwargs=None,\n show=None,\n):\n \"\"\"Forest plot to compare HDI intervals from a number of distributions.\n\n Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces.\n\n Parameters\n ----------\n data: obj or list[obj]\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n kind: str\n Choose kind of plot for main axis. Supports \"forestplot\" or \"ridgeplot\"\n model_names: list[str], optional\n List with names for the models in the list of data. Useful when plotting more that one\n dataset\n var_names: list[str], optional\n List of variables to plot (defaults to None, which results in all variables plotted)\n Prefix the variables by `~` when you want to exclude them from the plot.\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\", interpret\n var_names as substrings of the real variables names. If \"regex\", interpret var_names as\n regular expressions on the real variables names. A la `pandas.filter`.\n transform: callable\n Function to transform data (defaults to None i.e.the identity function)\n coords: dict, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n combined: bool\n Flag for combining multiple chains into a single chain. If False (default), chains will be\n plotted separately.\n hdi_prob: float, optional\n Plots highest posterior density interval for chosen percentage of density. Defaults to 0.94.\n rope: tuple or dictionary of tuples\n Lower and upper values of the Region Of Practical Equivalence. If a list with one interval\n only is provided, the ROPE will be displayed across the y-axis. If more than one interval is\n provided the length of the list should match the number of variables.\n quartiles: bool, optional\n Flag for plotting the interquartile range, in addition to the hdi_prob intervals.\n Defaults to True\n r_hat: bool, optional\n Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False\n ess: bool, optional\n Flag for plotting the effective sample size. Defaults to False\n colors: list or string, optional\n list with valid matplotlib colors, one color per model. Alternative a string can be passed.\n If the string is `cycle`, it will automatically chose a color per model from the matplotlibs\n cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all\n models. Defaults to 'cycle'.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n linewidth: int\n Line width throughout. If None it will be autoscaled based on figsize.\n markersize: int\n Markersize throughout. If None it will be autoscaled based on figsize.\n legend : bool, optional\n Show a legend with the color encoded model information.\n Defaults to true if there are multiple models\n labeller : labeller instance, optional\n Class providing the method `make_model_label` to generate the labels in the plot.\n Read the :ref:`label_guide` for more details and usage examples.\n ridgeplot_alpha: float\n Transparency for ridgeplot fill. If 0, border is colored by model, otherwise\n a black outline is used.\n ridgeplot_overlap: float\n Overlap height for ridgeplots.\n ridgeplot_kind: string\n By default (\"auto\") continuous variables are plotted using KDEs and discrete ones using\n histograms. To override this use \"hist\" to plot histograms and \"density\" for KDEs\n ridgeplot_truncate: bool\n Whether to truncate densities according to the value of hdi_prop. Defaults to True\n ridgeplot_quantiles: list\n Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.\n Defaults to None.\n figsize: tuple\n Figure size. If None it will be defined automatically.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_config: dict, optional\n Currently specifies the bounds to use for bokeh axes. Defaults to value set in rcParams.\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show: bool, optional\n Call backend show function.\n\n Returns\n -------\n gridspec: matplotlib GridSpec or bokeh figures\n\n Examples\n --------\n Forestp\u013aot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> non_centered_data = az.load_arviz_data('non_centered_eight')\n >>> axes = az.plot_forest(non_centered_data,\n >>> kind='forestplot',\n >>> var_names=[\"^the\"],\n >>> filter_vars=\"regex\",\n >>> combined=True,\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n\n Forestp\u013aot with ropes\n\n .. plot::\n :context: close-figs\n\n >>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]}\n >>> axes = az.plot_forest(non_centered_data,\n >>> rope=rope,\n >>> var_names='~tau',\n >>> combined=True,\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n\n\n Ridgeplot\n\n .. plot::\n :context: close-figs\n\n >>> axes = az.plot_forest(non_centered_data,\n >>> kind='ridgeplot',\n >>> var_names=['theta'],\n >>> combined=True,\n >>> ridgeplot_overlap=3,\n >>> colors='white',\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n\n Ridgeplot non-truncated and with quantiles\n\n .. plot::\n :context: close-figs\n\n >>> axes = az.plot_forest(non_centered_data,\n >>> kind='ridgeplot',\n >>> var_names=['theta'],\n >>> combined=True,\n >>> ridgeplot_truncate=False,\n >>> ridgeplot_quantiles=[.25, .5, .75],\n >>> ridgeplot_overlap=0.7,\n >>> colors='white',\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n \"\"\"\n if not isinstance(data, (list, tuple)):\n data = [data]\n if len(data) == 1:\n legend = False\n\n if coords is None:\n coords = {}\n\n if labeller is None:\n labeller = NoModelLabeller() if legend else BaseLabeller()\n\n datasets = [convert_to_dataset(datum) for datum in reversed(data)]\n if transform is not None:\n datasets = [transform(dataset) for dataset in datasets]\n datasets = get_coords(\n datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords\n )\n\n var_names = _var_names(var_names, datasets, filter_vars)\n\n ncols, width_ratios = 1, [3]\n\n if ess:\n ncols += 1\n width_ratios.append(1)\n\n if r_hat:\n ncols += 1\n width_ratios.append(1)\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n plot_forest_kwargs = dict(\n ax=ax,\n datasets=datasets,\n var_names=var_names,\n model_names=model_names,\n combined=combined,\n colors=colors,\n figsize=figsize,\n width_ratios=width_ratios,\n linewidth=linewidth,\n markersize=markersize,\n kind=kind,\n ncols=ncols,\n hdi_prob=hdi_prob,\n quartiles=quartiles,\n rope=rope,\n ridgeplot_overlap=ridgeplot_overlap,\n ridgeplot_alpha=ridgeplot_alpha,\n ridgeplot_kind=ridgeplot_kind,\n ridgeplot_truncate=ridgeplot_truncate,\n ridgeplot_quantiles=ridgeplot_quantiles,\n textsize=textsize,\n legend=legend,\n labeller=labeller,\n ess=ess,\n r_hat=r_hat,\n backend_kwargs=backend_kwargs,\n backend_config=backend_config,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_forest\", \"forestplot\", backend)\n axes = plot(**plot_forest_kwargs)\n return axes\n", "path": "arviz/plots/forestplot.py"}], "after_files": [{"content": "\"\"\"Forest plot.\"\"\"\nfrom ..data import convert_to_dataset\nfrom ..labels import BaseLabeller, NoModelLabeller\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names, get_coords\nfrom .plot_utils import get_plotting_function\n\n\ndef plot_forest(\n data,\n kind=\"forestplot\",\n model_names=None,\n var_names=None,\n filter_vars=None,\n transform=None,\n coords=None,\n combined=False,\n hdi_prob=None,\n rope=None,\n quartiles=True,\n ess=False,\n r_hat=False,\n colors=\"cycle\",\n textsize=None,\n linewidth=None,\n markersize=None,\n legend=True,\n labeller=None,\n ridgeplot_alpha=None,\n ridgeplot_overlap=2,\n ridgeplot_kind=\"auto\",\n ridgeplot_truncate=True,\n ridgeplot_quantiles=None,\n figsize=None,\n ax=None,\n backend=None,\n backend_config=None,\n backend_kwargs=None,\n show=None,\n):\n \"\"\"Forest plot to compare HDI intervals from a number of distributions.\n\n Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces.\n\n Parameters\n ----------\n data: obj or list[obj]\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n kind: str\n Choose kind of plot for main axis. Supports \"forestplot\" or \"ridgeplot\"\n model_names: list[str], optional\n List with names for the models in the list of data. Useful when plotting more that one\n dataset\n var_names: list[str], optional\n List of variables to plot (defaults to None, which results in all variables plotted)\n Prefix the variables by `~` when you want to exclude them from the plot.\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\", interpret\n var_names as substrings of the real variables names. If \"regex\", interpret var_names as\n regular expressions on the real variables names. A la `pandas.filter`.\n transform: callable\n Function to transform data (defaults to None i.e.the identity function)\n coords: dict, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n combined: bool\n Flag for combining multiple chains into a single chain. If False (default), chains will be\n plotted separately.\n hdi_prob: float, optional\n Plots highest posterior density interval for chosen percentage of density. Defaults to 0.94.\n rope: tuple or dictionary of tuples\n Lower and upper values of the Region Of Practical Equivalence. If a list with one interval\n only is provided, the ROPE will be displayed across the y-axis. If more than one interval is\n provided the length of the list should match the number of variables.\n quartiles: bool, optional\n Flag for plotting the interquartile range, in addition to the hdi_prob intervals.\n Defaults to True\n r_hat: bool, optional\n Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False\n ess: bool, optional\n Flag for plotting the effective sample size. Defaults to False\n colors: list or string, optional\n list with valid matplotlib colors, one color per model. Alternative a string can be passed.\n If the string is `cycle`, it will automatically chose a color per model from the matplotlibs\n cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all\n models. Defaults to 'cycle'.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n linewidth: int\n Line width throughout. If None it will be autoscaled based on figsize.\n markersize: int\n Markersize throughout. If None it will be autoscaled based on figsize.\n legend : bool, optional\n Show a legend with the color encoded model information.\n Defaults to true if there are multiple models\n labeller : labeller instance, optional\n Class providing the method `make_model_label` to generate the labels in the plot.\n Read the :ref:`label_guide` for more details and usage examples.\n ridgeplot_alpha: float\n Transparency for ridgeplot fill. If 0, border is colored by model, otherwise\n a black outline is used.\n ridgeplot_overlap: float\n Overlap height for ridgeplots.\n ridgeplot_kind: string\n By default (\"auto\") continuous variables are plotted using KDEs and discrete ones using\n histograms. To override this use \"hist\" to plot histograms and \"density\" for KDEs\n ridgeplot_truncate: bool\n Whether to truncate densities according to the value of hdi_prop. Defaults to True\n ridgeplot_quantiles: list\n Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.\n Defaults to None.\n figsize: tuple\n Figure size. If None it will be defined automatically.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_config: dict, optional\n Currently specifies the bounds to use for bokeh axes. Defaults to value set in rcParams.\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show: bool, optional\n Call backend show function.\n\n Returns\n -------\n gridspec: matplotlib GridSpec or bokeh figures\n\n Examples\n --------\n Forestp\u013aot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> non_centered_data = az.load_arviz_data('non_centered_eight')\n >>> axes = az.plot_forest(non_centered_data,\n >>> kind='forestplot',\n >>> var_names=[\"^the\"],\n >>> filter_vars=\"regex\",\n >>> combined=True,\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n\n Forestplot with multiple datasets\n\n .. plot::\n :context: close-figs\n\n >>> centered_data = az.load_arviz_data('centered_eight')\n >>> axes = az.plot_forest([non_centered_data, centered_data],\n >>> model_names = [\"non centered eight\", \"centered eight\"],\n >>> kind='forestplot',\n >>> var_names=[\"^the\"],\n >>> filter_vars=\"regex\",\n >>> combined=True,\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools models')\n\n Forestp\u013aot with ropes\n\n .. plot::\n :context: close-figs\n\n >>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]}\n >>> axes = az.plot_forest(non_centered_data,\n >>> rope=rope,\n >>> var_names='~tau',\n >>> combined=True,\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n\n\n Ridgeplot\n\n .. plot::\n :context: close-figs\n\n >>> axes = az.plot_forest(non_centered_data,\n >>> kind='ridgeplot',\n >>> var_names=['theta'],\n >>> combined=True,\n >>> ridgeplot_overlap=3,\n >>> colors='white',\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n\n Ridgeplot non-truncated and with quantiles\n\n .. plot::\n :context: close-figs\n\n >>> axes = az.plot_forest(non_centered_data,\n >>> kind='ridgeplot',\n >>> var_names=['theta'],\n >>> combined=True,\n >>> ridgeplot_truncate=False,\n >>> ridgeplot_quantiles=[.25, .5, .75],\n >>> ridgeplot_overlap=0.7,\n >>> colors='white',\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n \"\"\"\n if not isinstance(data, (list, tuple)):\n data = [data]\n if len(data) == 1:\n legend = False\n\n if coords is None:\n coords = {}\n\n if labeller is None:\n labeller = NoModelLabeller() if legend else BaseLabeller()\n\n datasets = [convert_to_dataset(datum) for datum in reversed(data)]\n if transform is not None:\n datasets = [transform(dataset) for dataset in datasets]\n datasets = get_coords(\n datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords\n )\n\n var_names = _var_names(var_names, datasets, filter_vars)\n\n ncols, width_ratios = 1, [3]\n\n if ess:\n ncols += 1\n width_ratios.append(1)\n\n if r_hat:\n ncols += 1\n width_ratios.append(1)\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n plot_forest_kwargs = dict(\n ax=ax,\n datasets=datasets,\n var_names=var_names,\n model_names=model_names,\n combined=combined,\n colors=colors,\n figsize=figsize,\n width_ratios=width_ratios,\n linewidth=linewidth,\n markersize=markersize,\n kind=kind,\n ncols=ncols,\n hdi_prob=hdi_prob,\n quartiles=quartiles,\n rope=rope,\n ridgeplot_overlap=ridgeplot_overlap,\n ridgeplot_alpha=ridgeplot_alpha,\n ridgeplot_kind=ridgeplot_kind,\n ridgeplot_truncate=ridgeplot_truncate,\n ridgeplot_quantiles=ridgeplot_quantiles,\n textsize=textsize,\n legend=legend,\n labeller=labeller,\n ess=ess,\n r_hat=r_hat,\n backend_kwargs=backend_kwargs,\n backend_config=backend_config,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_forest\", \"forestplot\", backend)\n axes = plot(**plot_forest_kwargs)\n return axes\n", "path": "arviz/plots/forestplot.py"}]} | 3,281 | 241 |
gh_patches_debug_24469 | rasdani/github-patches | git_diff | sanic-org__sanic-2606 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HTTP 1 request headers decoded using default encoding instead of ISO-8859-1
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
headers are decoded here without specifying their encoding:
https://github.com/sanic-org/sanic/blob/ad4e526c775fc3ce950503d6476d9d344492b0dd/sanic/http/http1.py#L205
On my system (osx using python 3.10.8 installed via homebrew) this causes bytes that are valid characters in ISO-8859-1 but not in UTF-8 to be decoded as surrogate escape characters, e.g. `b"\x80"` becomes `"\udf80"` instead of `"\x80"`
### Code snippet
_No response_
### Expected Behavior
headers encoded as ISO-8859-1 with no MIME type to be decoded correctly without using UTF-8 surrogate escape characters.
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
linux
### Sanic Version
22.9.1
### Additional context
this used to work as expected in Sanic<=20.12.7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/asgi.py`
Content:
```
1 from __future__ import annotations
2
3 import warnings
4
5 from typing import TYPE_CHECKING, Optional
6 from urllib.parse import quote
7
8 from sanic.compat import Header
9 from sanic.exceptions import ServerError
10 from sanic.helpers import Default
11 from sanic.http import Stage
12 from sanic.log import error_logger, logger
13 from sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport
14 from sanic.request import Request
15 from sanic.response import BaseHTTPResponse
16 from sanic.server import ConnInfo
17 from sanic.server.websockets.connection import WebSocketConnection
18
19
20 if TYPE_CHECKING:
21 from sanic import Sanic
22
23
24 class Lifespan:
25 def __init__(
26 self, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
27 ) -> None:
28 self.sanic_app = sanic_app
29 self.scope = scope
30 self.receive = receive
31 self.send = send
32
33 if "server.init.before" in self.sanic_app.signal_router.name_index:
34 logger.debug(
35 'You have set a listener for "before_server_start" '
36 "in ASGI mode. "
37 "It will be executed as early as possible, but not before "
38 "the ASGI server is started.",
39 extra={"verbosity": 1},
40 )
41 if "server.shutdown.after" in self.sanic_app.signal_router.name_index:
42 logger.debug(
43 'You have set a listener for "after_server_stop" '
44 "in ASGI mode. "
45 "It will be executed as late as possible, but not after "
46 "the ASGI server is stopped.",
47 extra={"verbosity": 1},
48 )
49
50 async def startup(self) -> None:
51 """
52 Gather the listeners to fire on server start.
53 Because we are using a third-party server and not Sanic server, we do
54 not have access to fire anything BEFORE the server starts.
55 Therefore, we fire before_server_start and after_server_start
56 in sequence since the ASGI lifespan protocol only supports a single
57 startup event.
58 """
59 await self.sanic_app._startup()
60 await self.sanic_app._server_event("init", "before")
61 await self.sanic_app._server_event("init", "after")
62
63 if not isinstance(self.sanic_app.config.USE_UVLOOP, Default):
64 warnings.warn(
65 "You have set the USE_UVLOOP configuration option, but Sanic "
66 "cannot control the event loop when running in ASGI mode."
67 "This option will be ignored."
68 )
69
70 async def shutdown(self) -> None:
71 """
72 Gather the listeners to fire on server stop.
73 Because we are using a third-party server and not Sanic server, we do
74 not have access to fire anything AFTER the server stops.
75 Therefore, we fire before_server_stop and after_server_stop
76 in sequence since the ASGI lifespan protocol only supports a single
77 shutdown event.
78 """
79 await self.sanic_app._server_event("shutdown", "before")
80 await self.sanic_app._server_event("shutdown", "after")
81
82 async def __call__(self) -> None:
83 while True:
84 message = await self.receive()
85 if message["type"] == "lifespan.startup":
86 try:
87 await self.startup()
88 except Exception as e:
89 error_logger.exception(e)
90 await self.send(
91 {"type": "lifespan.startup.failed", "message": str(e)}
92 )
93 else:
94 await self.send({"type": "lifespan.startup.complete"})
95 elif message["type"] == "lifespan.shutdown":
96 try:
97 await self.shutdown()
98 except Exception as e:
99 error_logger.exception(e)
100 await self.send(
101 {"type": "lifespan.shutdown.failed", "message": str(e)}
102 )
103 else:
104 await self.send({"type": "lifespan.shutdown.complete"})
105 return
106
107
108 class ASGIApp:
109 sanic_app: Sanic
110 request: Request
111 transport: MockTransport
112 lifespan: Lifespan
113 ws: Optional[WebSocketConnection]
114 stage: Stage
115 response: Optional[BaseHTTPResponse]
116
117 @classmethod
118 async def create(
119 cls,
120 sanic_app: Sanic,
121 scope: ASGIScope,
122 receive: ASGIReceive,
123 send: ASGISend,
124 ) -> ASGIApp:
125 instance = cls()
126 instance.ws = None
127 instance.sanic_app = sanic_app
128 instance.transport = MockTransport(scope, receive, send)
129 instance.transport.loop = sanic_app.loop
130 instance.stage = Stage.IDLE
131 instance.response = None
132 instance.sanic_app.state.is_started = True
133 setattr(instance.transport, "add_task", sanic_app.loop.create_task)
134
135 headers = Header(
136 [
137 (key.decode("latin-1"), value.decode("latin-1"))
138 for key, value in scope.get("headers", [])
139 ]
140 )
141 path = (
142 scope["path"][1:]
143 if scope["path"].startswith("/")
144 else scope["path"]
145 )
146 url = "/".join([scope.get("root_path", ""), quote(path)])
147 url_bytes = url.encode("latin-1")
148 url_bytes += b"?" + scope["query_string"]
149
150 if scope["type"] == "http":
151 version = scope["http_version"]
152 method = scope["method"]
153 elif scope["type"] == "websocket":
154 version = "1.1"
155 method = "GET"
156
157 instance.ws = instance.transport.create_websocket_connection(
158 send, receive
159 )
160 else:
161 raise ServerError("Received unknown ASGI scope")
162
163 request_class = sanic_app.request_class or Request
164 instance.request = request_class(
165 url_bytes,
166 headers,
167 version,
168 method,
169 instance.transport,
170 sanic_app,
171 )
172 instance.request.stream = instance # type: ignore
173 instance.request_body = True
174 instance.request.conn_info = ConnInfo(instance.transport)
175
176 await instance.sanic_app.dispatch(
177 "http.lifecycle.request",
178 inline=True,
179 context={"request": instance.request},
180 fail_not_found=False,
181 )
182
183 return instance
184
185 async def read(self) -> Optional[bytes]:
186 """
187 Read and stream the body in chunks from an incoming ASGI message.
188 """
189 if self.stage is Stage.IDLE:
190 self.stage = Stage.REQUEST
191 message = await self.transport.receive()
192 body = message.get("body", b"")
193 if not message.get("more_body", False):
194 self.request_body = False
195 if not body:
196 return None
197 return body
198
199 async def __aiter__(self):
200 while self.request_body:
201 data = await self.read()
202 if data:
203 yield data
204
205 def respond(self, response: BaseHTTPResponse):
206 if self.stage is not Stage.HANDLER:
207 self.stage = Stage.FAILED
208 raise RuntimeError("Response already started")
209 if self.response is not None:
210 self.response.stream = None
211 response.stream, self.response = self, response
212 return response
213
214 async def send(self, data, end_stream):
215 self.stage = Stage.IDLE if end_stream else Stage.RESPONSE
216 if self.response:
217 response, self.response = self.response, None
218 await self.transport.send(
219 {
220 "type": "http.response.start",
221 "status": response.status,
222 "headers": response.processed_headers,
223 }
224 )
225 response_body = getattr(response, "body", None)
226 if response_body:
227 data = response_body + data if data else response_body
228 await self.transport.send(
229 {
230 "type": "http.response.body",
231 "body": data.encode() if hasattr(data, "encode") else data,
232 "more_body": not end_stream,
233 }
234 )
235
236 _asgi_single_callable = True # We conform to ASGI 3.0 single-callable
237
238 async def __call__(self) -> None:
239 """
240 Handle the incoming request.
241 """
242 try:
243 self.stage = Stage.HANDLER
244 await self.sanic_app.handle_request(self.request)
245 except Exception as e:
246 try:
247 await self.sanic_app.handle_exception(self.request, e)
248 except Exception as exc:
249 await self.sanic_app.handle_exception(self.request, exc, False)
250
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/asgi.py b/sanic/asgi.py
--- a/sanic/asgi.py
+++ b/sanic/asgi.py
@@ -6,7 +6,7 @@
from urllib.parse import quote
from sanic.compat import Header
-from sanic.exceptions import ServerError
+from sanic.exceptions import BadRequest, ServerError
from sanic.helpers import Default
from sanic.http import Stage
from sanic.log import error_logger, logger
@@ -132,12 +132,20 @@
instance.sanic_app.state.is_started = True
setattr(instance.transport, "add_task", sanic_app.loop.create_task)
- headers = Header(
- [
- (key.decode("latin-1"), value.decode("latin-1"))
- for key, value in scope.get("headers", [])
- ]
- )
+ try:
+ headers = Header(
+ [
+ (
+ key.decode("ASCII"),
+ value.decode(errors="surrogateescape"),
+ )
+ for key, value in scope.get("headers", [])
+ ]
+ )
+ except UnicodeDecodeError:
+ raise BadRequest(
+ "Header names can only contain US-ASCII characters"
+ )
path = (
scope["path"][1:]
if scope["path"].startswith("/")
| {"golden_diff": "diff --git a/sanic/asgi.py b/sanic/asgi.py\n--- a/sanic/asgi.py\n+++ b/sanic/asgi.py\n@@ -6,7 +6,7 @@\n from urllib.parse import quote\n \n from sanic.compat import Header\n-from sanic.exceptions import ServerError\n+from sanic.exceptions import BadRequest, ServerError\n from sanic.helpers import Default\n from sanic.http import Stage\n from sanic.log import error_logger, logger\n@@ -132,12 +132,20 @@\n instance.sanic_app.state.is_started = True\n setattr(instance.transport, \"add_task\", sanic_app.loop.create_task)\n \n- headers = Header(\n- [\n- (key.decode(\"latin-1\"), value.decode(\"latin-1\"))\n- for key, value in scope.get(\"headers\", [])\n- ]\n- )\n+ try:\n+ headers = Header(\n+ [\n+ (\n+ key.decode(\"ASCII\"),\n+ value.decode(errors=\"surrogateescape\"),\n+ )\n+ for key, value in scope.get(\"headers\", [])\n+ ]\n+ )\n+ except UnicodeDecodeError:\n+ raise BadRequest(\n+ \"Header names can only contain US-ASCII characters\"\n+ )\n path = (\n scope[\"path\"][1:]\n if scope[\"path\"].startswith(\"/\")\n", "issue": "HTTP 1 request headers decoded using default encoding instead of ISO-8859-1\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Describe the bug\n\nheaders are decoded here without specifying their encoding:\r\n\r\nhttps://github.com/sanic-org/sanic/blob/ad4e526c775fc3ce950503d6476d9d344492b0dd/sanic/http/http1.py#L205\r\n\r\nOn my system (osx using python 3.10.8 installed via homebrew) this causes bytes that are valid characters in ISO-8859-1 but not in UTF-8 to be decoded as surrogate escape characters, e.g. `b\"\\x80\"` becomes `\"\\udf80\"` instead of `\"\\x80\"`\n\n### Code snippet\n\n_No response_\n\n### Expected Behavior\n\nheaders encoded as ISO-8859-1 with no MIME type to be decoded correctly without using UTF-8 surrogate escape characters.\n\n### How do you run Sanic?\n\nAs a script (`app.run` or `Sanic.serve`)\n\n### Operating System\n\nlinux\n\n### Sanic Version\n\n22.9.1\n\n### Additional context\n\nthis used to work as expected in Sanic<=20.12.7\n", "before_files": [{"content": "from __future__ import annotations\n\nimport warnings\n\nfrom typing import TYPE_CHECKING, Optional\nfrom urllib.parse import quote\n\nfrom sanic.compat import Header\nfrom sanic.exceptions import ServerError\nfrom sanic.helpers import Default\nfrom sanic.http import Stage\nfrom sanic.log import error_logger, logger\nfrom sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport\nfrom sanic.request import Request\nfrom sanic.response import BaseHTTPResponse\nfrom sanic.server import ConnInfo\nfrom sanic.server.websockets.connection import WebSocketConnection\n\n\nif TYPE_CHECKING:\n from sanic import Sanic\n\n\nclass Lifespan:\n def __init__(\n self, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> None:\n self.sanic_app = sanic_app\n self.scope = scope\n self.receive = receive\n self.send = send\n\n if \"server.init.before\" in self.sanic_app.signal_router.name_index:\n logger.debug(\n 'You have set a listener for \"before_server_start\" '\n \"in ASGI mode. \"\n \"It will be executed as early as possible, but not before \"\n \"the ASGI server is started.\",\n extra={\"verbosity\": 1},\n )\n if \"server.shutdown.after\" in self.sanic_app.signal_router.name_index:\n logger.debug(\n 'You have set a listener for \"after_server_stop\" '\n \"in ASGI mode. \"\n \"It will be executed as late as possible, but not after \"\n \"the ASGI server is stopped.\",\n extra={\"verbosity\": 1},\n )\n\n async def startup(self) -> None:\n \"\"\"\n Gather the listeners to fire on server start.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything BEFORE the server starts.\n Therefore, we fire before_server_start and after_server_start\n in sequence since the ASGI lifespan protocol only supports a single\n startup event.\n \"\"\"\n await self.sanic_app._startup()\n await self.sanic_app._server_event(\"init\", \"before\")\n await self.sanic_app._server_event(\"init\", \"after\")\n\n if not isinstance(self.sanic_app.config.USE_UVLOOP, Default):\n warnings.warn(\n \"You have set the USE_UVLOOP configuration option, but Sanic \"\n \"cannot control the event loop when running in ASGI mode.\"\n \"This option will be ignored.\"\n )\n\n async def shutdown(self) -> None:\n \"\"\"\n Gather the listeners to fire on server stop.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything AFTER the server stops.\n Therefore, we fire before_server_stop and after_server_stop\n in sequence since the ASGI lifespan protocol only supports a single\n shutdown event.\n \"\"\"\n await self.sanic_app._server_event(\"shutdown\", \"before\")\n await self.sanic_app._server_event(\"shutdown\", \"after\")\n\n async def __call__(self) -> None:\n while True:\n message = await self.receive()\n if message[\"type\"] == \"lifespan.startup\":\n try:\n await self.startup()\n except Exception as e:\n error_logger.exception(e)\n await self.send(\n {\"type\": \"lifespan.startup.failed\", \"message\": str(e)}\n )\n else:\n await self.send({\"type\": \"lifespan.startup.complete\"})\n elif message[\"type\"] == \"lifespan.shutdown\":\n try:\n await self.shutdown()\n except Exception as e:\n error_logger.exception(e)\n await self.send(\n {\"type\": \"lifespan.shutdown.failed\", \"message\": str(e)}\n )\n else:\n await self.send({\"type\": \"lifespan.shutdown.complete\"})\n return\n\n\nclass ASGIApp:\n sanic_app: Sanic\n request: Request\n transport: MockTransport\n lifespan: Lifespan\n ws: Optional[WebSocketConnection]\n stage: Stage\n response: Optional[BaseHTTPResponse]\n\n @classmethod\n async def create(\n cls,\n sanic_app: Sanic,\n scope: ASGIScope,\n receive: ASGIReceive,\n send: ASGISend,\n ) -> ASGIApp:\n instance = cls()\n instance.ws = None\n instance.sanic_app = sanic_app\n instance.transport = MockTransport(scope, receive, send)\n instance.transport.loop = sanic_app.loop\n instance.stage = Stage.IDLE\n instance.response = None\n instance.sanic_app.state.is_started = True\n setattr(instance.transport, \"add_task\", sanic_app.loop.create_task)\n\n headers = Header(\n [\n (key.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for key, value in scope.get(\"headers\", [])\n ]\n )\n path = (\n scope[\"path\"][1:]\n if scope[\"path\"].startswith(\"/\")\n else scope[\"path\"]\n )\n url = \"/\".join([scope.get(\"root_path\", \"\"), quote(path)])\n url_bytes = url.encode(\"latin-1\")\n url_bytes += b\"?\" + scope[\"query_string\"]\n\n if scope[\"type\"] == \"http\":\n version = scope[\"http_version\"]\n method = scope[\"method\"]\n elif scope[\"type\"] == \"websocket\":\n version = \"1.1\"\n method = \"GET\"\n\n instance.ws = instance.transport.create_websocket_connection(\n send, receive\n )\n else:\n raise ServerError(\"Received unknown ASGI scope\")\n\n request_class = sanic_app.request_class or Request\n instance.request = request_class(\n url_bytes,\n headers,\n version,\n method,\n instance.transport,\n sanic_app,\n )\n instance.request.stream = instance # type: ignore\n instance.request_body = True\n instance.request.conn_info = ConnInfo(instance.transport)\n\n await instance.sanic_app.dispatch(\n \"http.lifecycle.request\",\n inline=True,\n context={\"request\": instance.request},\n fail_not_found=False,\n )\n\n return instance\n\n async def read(self) -> Optional[bytes]:\n \"\"\"\n Read and stream the body in chunks from an incoming ASGI message.\n \"\"\"\n if self.stage is Stage.IDLE:\n self.stage = Stage.REQUEST\n message = await self.transport.receive()\n body = message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n self.request_body = False\n if not body:\n return None\n return body\n\n async def __aiter__(self):\n while self.request_body:\n data = await self.read()\n if data:\n yield data\n\n def respond(self, response: BaseHTTPResponse):\n if self.stage is not Stage.HANDLER:\n self.stage = Stage.FAILED\n raise RuntimeError(\"Response already started\")\n if self.response is not None:\n self.response.stream = None\n response.stream, self.response = self, response\n return response\n\n async def send(self, data, end_stream):\n self.stage = Stage.IDLE if end_stream else Stage.RESPONSE\n if self.response:\n response, self.response = self.response, None\n await self.transport.send(\n {\n \"type\": \"http.response.start\",\n \"status\": response.status,\n \"headers\": response.processed_headers,\n }\n )\n response_body = getattr(response, \"body\", None)\n if response_body:\n data = response_body + data if data else response_body\n await self.transport.send(\n {\n \"type\": \"http.response.body\",\n \"body\": data.encode() if hasattr(data, \"encode\") else data,\n \"more_body\": not end_stream,\n }\n )\n\n _asgi_single_callable = True # We conform to ASGI 3.0 single-callable\n\n async def __call__(self) -> None:\n \"\"\"\n Handle the incoming request.\n \"\"\"\n try:\n self.stage = Stage.HANDLER\n await self.sanic_app.handle_request(self.request)\n except Exception as e:\n try:\n await self.sanic_app.handle_exception(self.request, e)\n except Exception as exc:\n await self.sanic_app.handle_exception(self.request, exc, False)\n", "path": "sanic/asgi.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport warnings\n\nfrom typing import TYPE_CHECKING, Optional\nfrom urllib.parse import quote\n\nfrom sanic.compat import Header\nfrom sanic.exceptions import BadRequest, ServerError\nfrom sanic.helpers import Default\nfrom sanic.http import Stage\nfrom sanic.log import error_logger, logger\nfrom sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport\nfrom sanic.request import Request\nfrom sanic.response import BaseHTTPResponse\nfrom sanic.server import ConnInfo\nfrom sanic.server.websockets.connection import WebSocketConnection\n\n\nif TYPE_CHECKING:\n from sanic import Sanic\n\n\nclass Lifespan:\n def __init__(\n self, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> None:\n self.sanic_app = sanic_app\n self.scope = scope\n self.receive = receive\n self.send = send\n\n if \"server.init.before\" in self.sanic_app.signal_router.name_index:\n logger.debug(\n 'You have set a listener for \"before_server_start\" '\n \"in ASGI mode. \"\n \"It will be executed as early as possible, but not before \"\n \"the ASGI server is started.\",\n extra={\"verbosity\": 1},\n )\n if \"server.shutdown.after\" in self.sanic_app.signal_router.name_index:\n logger.debug(\n 'You have set a listener for \"after_server_stop\" '\n \"in ASGI mode. \"\n \"It will be executed as late as possible, but not after \"\n \"the ASGI server is stopped.\",\n extra={\"verbosity\": 1},\n )\n\n async def startup(self) -> None:\n \"\"\"\n Gather the listeners to fire on server start.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything BEFORE the server starts.\n Therefore, we fire before_server_start and after_server_start\n in sequence since the ASGI lifespan protocol only supports a single\n startup event.\n \"\"\"\n await self.sanic_app._startup()\n await self.sanic_app._server_event(\"init\", \"before\")\n await self.sanic_app._server_event(\"init\", \"after\")\n\n if not isinstance(self.sanic_app.config.USE_UVLOOP, Default):\n warnings.warn(\n \"You have set the USE_UVLOOP configuration option, but Sanic \"\n \"cannot control the event loop when running in ASGI mode.\"\n \"This option will be ignored.\"\n )\n\n async def shutdown(self) -> None:\n \"\"\"\n Gather the listeners to fire on server stop.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything AFTER the server stops.\n Therefore, we fire before_server_stop and after_server_stop\n in sequence since the ASGI lifespan protocol only supports a single\n shutdown event.\n \"\"\"\n await self.sanic_app._server_event(\"shutdown\", \"before\")\n await self.sanic_app._server_event(\"shutdown\", \"after\")\n\n async def __call__(self) -> None:\n while True:\n message = await self.receive()\n if message[\"type\"] == \"lifespan.startup\":\n try:\n await self.startup()\n except Exception as e:\n error_logger.exception(e)\n await self.send(\n {\"type\": \"lifespan.startup.failed\", \"message\": str(e)}\n )\n else:\n await self.send({\"type\": \"lifespan.startup.complete\"})\n elif message[\"type\"] == \"lifespan.shutdown\":\n try:\n await self.shutdown()\n except Exception as e:\n error_logger.exception(e)\n await self.send(\n {\"type\": \"lifespan.shutdown.failed\", \"message\": str(e)}\n )\n else:\n await self.send({\"type\": \"lifespan.shutdown.complete\"})\n return\n\n\nclass ASGIApp:\n sanic_app: Sanic\n request: Request\n transport: MockTransport\n lifespan: Lifespan\n ws: Optional[WebSocketConnection]\n stage: Stage\n response: Optional[BaseHTTPResponse]\n\n @classmethod\n async def create(\n cls,\n sanic_app: Sanic,\n scope: ASGIScope,\n receive: ASGIReceive,\n send: ASGISend,\n ) -> ASGIApp:\n instance = cls()\n instance.ws = None\n instance.sanic_app = sanic_app\n instance.transport = MockTransport(scope, receive, send)\n instance.transport.loop = sanic_app.loop\n instance.stage = Stage.IDLE\n instance.response = None\n instance.sanic_app.state.is_started = True\n setattr(instance.transport, \"add_task\", sanic_app.loop.create_task)\n\n try:\n headers = Header(\n [\n (\n key.decode(\"ASCII\"),\n value.decode(errors=\"surrogateescape\"),\n )\n for key, value in scope.get(\"headers\", [])\n ]\n )\n except UnicodeDecodeError:\n raise BadRequest(\n \"Header names can only contain US-ASCII characters\"\n )\n path = (\n scope[\"path\"][1:]\n if scope[\"path\"].startswith(\"/\")\n else scope[\"path\"]\n )\n url = \"/\".join([scope.get(\"root_path\", \"\"), quote(path)])\n url_bytes = url.encode(\"latin-1\")\n url_bytes += b\"?\" + scope[\"query_string\"]\n\n if scope[\"type\"] == \"http\":\n version = scope[\"http_version\"]\n method = scope[\"method\"]\n elif scope[\"type\"] == \"websocket\":\n version = \"1.1\"\n method = \"GET\"\n\n instance.ws = instance.transport.create_websocket_connection(\n send, receive\n )\n else:\n raise ServerError(\"Received unknown ASGI scope\")\n\n request_class = sanic_app.request_class or Request\n instance.request = request_class(\n url_bytes,\n headers,\n version,\n method,\n instance.transport,\n sanic_app,\n )\n instance.request.stream = instance # type: ignore\n instance.request_body = True\n instance.request.conn_info = ConnInfo(instance.transport)\n\n await instance.sanic_app.dispatch(\n \"http.lifecycle.request\",\n inline=True,\n context={\"request\": instance.request},\n fail_not_found=False,\n )\n\n return instance\n\n async def read(self) -> Optional[bytes]:\n \"\"\"\n Read and stream the body in chunks from an incoming ASGI message.\n \"\"\"\n if self.stage is Stage.IDLE:\n self.stage = Stage.REQUEST\n message = await self.transport.receive()\n body = message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n self.request_body = False\n if not body:\n return None\n return body\n\n async def __aiter__(self):\n while self.request_body:\n data = await self.read()\n if data:\n yield data\n\n def respond(self, response: BaseHTTPResponse):\n if self.stage is not Stage.HANDLER:\n self.stage = Stage.FAILED\n raise RuntimeError(\"Response already started\")\n if self.response is not None:\n self.response.stream = None\n response.stream, self.response = self, response\n return response\n\n async def send(self, data, end_stream):\n self.stage = Stage.IDLE if end_stream else Stage.RESPONSE\n if self.response:\n response, self.response = self.response, None\n await self.transport.send(\n {\n \"type\": \"http.response.start\",\n \"status\": response.status,\n \"headers\": response.processed_headers,\n }\n )\n response_body = getattr(response, \"body\", None)\n if response_body:\n data = response_body + data if data else response_body\n await self.transport.send(\n {\n \"type\": \"http.response.body\",\n \"body\": data.encode() if hasattr(data, \"encode\") else data,\n \"more_body\": not end_stream,\n }\n )\n\n _asgi_single_callable = True # We conform to ASGI 3.0 single-callable\n\n async def __call__(self) -> None:\n \"\"\"\n Handle the incoming request.\n \"\"\"\n try:\n self.stage = Stage.HANDLER\n await self.sanic_app.handle_request(self.request)\n except Exception as e:\n try:\n await self.sanic_app.handle_exception(self.request, e)\n except Exception as exc:\n await self.sanic_app.handle_exception(self.request, exc, False)\n", "path": "sanic/asgi.py"}]} | 2,996 | 289 |
gh_patches_debug_35056 | rasdani/github-patches | git_diff | opsdroid__opsdroid-142 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make crontab parser timezone aware
The crontab matcher should take a timezone as a kwarg. It should also be possible to set a global timezone in the config. Default should be UTC.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/matchers.py`
Content:
```
1 """Decorator functions to use when creating skill modules."""
2
3 import logging
4
5 from opsdroid.helper import get_opsdroid
6 from opsdroid.web import Web
7
8
9 _LOGGER = logging.getLogger(__name__)
10
11
12 def match_regex(regex):
13 """Return regex match decorator."""
14 def matcher(func):
15 """Add decorated function to skills list for regex matching."""
16 opsdroid = get_opsdroid()
17 opsdroid.skills.append({"regex": regex, "skill": func,
18 "config":
19 opsdroid.loader.current_import_config})
20 return func
21 return matcher
22
23
24 def match_apiai_action(action):
25 """Return apiai action match decorator."""
26 def matcher(func):
27 """Add decorated function to skills list for apiai matching."""
28 opsdroid = get_opsdroid()
29 opsdroid.skills.append({"apiai_action": action, "skill": func,
30 "config":
31 opsdroid.loader.current_import_config})
32 return func
33 return matcher
34
35
36 def match_apiai_intent(intent):
37 """Return apiai intent match decorator."""
38 def matcher(func):
39 """Add decorated function to skills list for apiai matching."""
40 opsdroid = get_opsdroid()
41 opsdroid.skills.append({"apiai_intent": intent, "skill": func,
42 "config":
43 opsdroid.loader.current_import_config})
44 return func
45 return matcher
46
47
48 def match_crontab(crontab):
49 """Return crontab match decorator."""
50 def matcher(func):
51 """Add decorated function to skills list for crontab matching."""
52 opsdroid = get_opsdroid()
53 opsdroid.skills.append({"crontab": crontab, "skill": func,
54 "config":
55 opsdroid.loader.current_import_config})
56 return func
57 return matcher
58
59
60 def match_webhook(webhook):
61 """Return webhook match decorator."""
62 def matcher(func):
63 """Add decorated function to skills list for webhook matching."""
64 opsdroid = get_opsdroid()
65 config = opsdroid.loader.current_import_config
66 opsdroid.skills.append({"webhook": webhook, "skill": func,
67 "config": config})
68
69 async def wrapper(req, opsdroid=opsdroid, config=config):
70 """Wrap up the aiohttp handler."""
71 _LOGGER.info("Running skill %s via webhook", webhook)
72 opsdroid.stats["webhooks_called"] = \
73 opsdroid.stats["webhooks_called"] + 1
74 await func(opsdroid, config, req)
75 return Web.build_response(200, {"called_skill": webhook})
76
77 opsdroid.web_server.web_app.router.add_post(
78 "/skill/{}/{}".format(config["name"], webhook), wrapper)
79 opsdroid.web_server.web_app.router.add_post(
80 "/skill/{}/{}/".format(config["name"], webhook), wrapper)
81
82 return func
83 return matcher
84
```
Path: `opsdroid/parsers/crontab.py`
Content:
```
1 """A helper function for parsing and executing crontab skills."""
2
3 import logging
4 import asyncio
5 from datetime import datetime
6
7 import pycron
8
9
10 _LOGGER = logging.getLogger(__name__)
11
12
13 async def parse_crontab(opsdroid):
14 """Parse all crontab skills against the current time."""
15 # pylint: disable=broad-except
16 # We want to catch all exceptions coming from a skill module and not
17 # halt the application. If a skill throws an exception it just doesn't
18 # give a response to the user, so an error response should be given.
19 while opsdroid.eventloop.is_running():
20 await asyncio.sleep(60 - datetime.now().time().second)
21 _LOGGER.debug("Running crontab skills")
22 for skill in opsdroid.skills:
23 if "crontab" in skill and pycron.is_now(skill["crontab"]):
24 try:
25 await skill["skill"](opsdroid, skill["config"], None)
26 except Exception:
27 _LOGGER.exception("Exception when executing cron skill.")
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/matchers.py b/opsdroid/matchers.py
--- a/opsdroid/matchers.py
+++ b/opsdroid/matchers.py
@@ -45,14 +45,14 @@
return matcher
-def match_crontab(crontab):
+def match_crontab(crontab, timezone=None):
"""Return crontab match decorator."""
def matcher(func):
"""Add decorated function to skills list for crontab matching."""
opsdroid = get_opsdroid()
+ config = opsdroid.loader.current_import_config
opsdroid.skills.append({"crontab": crontab, "skill": func,
- "config":
- opsdroid.loader.current_import_config})
+ "config": config, "timezone": timezone})
return func
return matcher
diff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py
--- a/opsdroid/parsers/crontab.py
+++ b/opsdroid/parsers/crontab.py
@@ -1,9 +1,9 @@
"""A helper function for parsing and executing crontab skills."""
-import logging
import asyncio
-from datetime import datetime
+import logging
+import arrow
import pycron
@@ -17,11 +17,17 @@
# halt the application. If a skill throws an exception it just doesn't
# give a response to the user, so an error response should be given.
while opsdroid.eventloop.is_running():
- await asyncio.sleep(60 - datetime.now().time().second)
+ await asyncio.sleep(60 - arrow.now().time().second)
_LOGGER.debug("Running crontab skills")
for skill in opsdroid.skills:
- if "crontab" in skill and pycron.is_now(skill["crontab"]):
- try:
- await skill["skill"](opsdroid, skill["config"], None)
- except Exception:
- _LOGGER.exception("Exception when executing cron skill.")
+ if "crontab" in skill:
+ if skill["timezone"] is not None:
+ timezone = skill["timezone"]
+ else:
+ timezone = opsdroid.config.get("timezone", "UTC")
+ if pycron.is_now(skill["crontab"], arrow.now(tz=timezone)):
+ try:
+ await skill["skill"](opsdroid, skill["config"], None)
+ except Exception:
+ _LOGGER.exception(
+ "Exception when executing cron skill.")
| {"golden_diff": "diff --git a/opsdroid/matchers.py b/opsdroid/matchers.py\n--- a/opsdroid/matchers.py\n+++ b/opsdroid/matchers.py\n@@ -45,14 +45,14 @@\n return matcher\n \n \n-def match_crontab(crontab):\n+def match_crontab(crontab, timezone=None):\n \"\"\"Return crontab match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for crontab matching.\"\"\"\n opsdroid = get_opsdroid()\n+ config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"crontab\": crontab, \"skill\": func,\n- \"config\":\n- opsdroid.loader.current_import_config})\n+ \"config\": config, \"timezone\": timezone})\n return func\n return matcher\n \ndiff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py\n--- a/opsdroid/parsers/crontab.py\n+++ b/opsdroid/parsers/crontab.py\n@@ -1,9 +1,9 @@\n \"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n \n-import logging\n import asyncio\n-from datetime import datetime\n+import logging\n \n+import arrow\n import pycron\n \n \n@@ -17,11 +17,17 @@\n # halt the application. If a skill throws an exception it just doesn't\n # give a response to the user, so an error response should be given.\n while opsdroid.eventloop.is_running():\n- await asyncio.sleep(60 - datetime.now().time().second)\n+ await asyncio.sleep(60 - arrow.now().time().second)\n _LOGGER.debug(\"Running crontab skills\")\n for skill in opsdroid.skills:\n- if \"crontab\" in skill and pycron.is_now(skill[\"crontab\"]):\n- try:\n- await skill[\"skill\"](opsdroid, skill[\"config\"], None)\n- except Exception:\n- _LOGGER.exception(\"Exception when executing cron skill.\")\n+ if \"crontab\" in skill:\n+ if skill[\"timezone\"] is not None:\n+ timezone = skill[\"timezone\"]\n+ else:\n+ timezone = opsdroid.config.get(\"timezone\", \"UTC\")\n+ if pycron.is_now(skill[\"crontab\"], arrow.now(tz=timezone)):\n+ try:\n+ await skill[\"skill\"](opsdroid, skill[\"config\"], None)\n+ except Exception:\n+ _LOGGER.exception(\n+ \"Exception when executing cron skill.\")\n", "issue": "Make crontab parser timezone aware\nThe crontab matcher should take a timezone as a kwarg. It should also be possible to set a global timezone in the config. Default should be UTC.\n", "before_files": [{"content": "\"\"\"Decorator functions to use when creating skill modules.\"\"\"\n\nimport logging\n\nfrom opsdroid.helper import get_opsdroid\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef match_regex(regex):\n \"\"\"Return regex match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for regex matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"regex\": regex, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_action(action):\n \"\"\"Return apiai action match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_action\": action, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_intent(intent):\n \"\"\"Return apiai intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_intent\": intent, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_crontab(crontab):\n \"\"\"Return crontab match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for crontab matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"crontab\": crontab, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_webhook(webhook):\n \"\"\"Return webhook match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for webhook matching.\"\"\"\n opsdroid = get_opsdroid()\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"webhook\": webhook, \"skill\": func,\n \"config\": config})\n\n async def wrapper(req, opsdroid=opsdroid, config=config):\n \"\"\"Wrap up the aiohttp handler.\"\"\"\n _LOGGER.info(\"Running skill %s via webhook\", webhook)\n opsdroid.stats[\"webhooks_called\"] = \\\n opsdroid.stats[\"webhooks_called\"] + 1\n await func(opsdroid, config, req)\n return Web.build_response(200, {\"called_skill\": webhook})\n\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}\".format(config[\"name\"], webhook), wrapper)\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}/\".format(config[\"name\"], webhook), wrapper)\n\n return func\n return matcher\n", "path": "opsdroid/matchers.py"}, {"content": "\"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n\nimport logging\nimport asyncio\nfrom datetime import datetime\n\nimport pycron\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_crontab(opsdroid):\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n # pylint: disable=broad-except\n # We want to catch all exceptions coming from a skill module and not\n # halt the application. If a skill throws an exception it just doesn't\n # give a response to the user, so an error response should be given.\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - datetime.now().time().second)\n _LOGGER.debug(\"Running crontab skills\")\n for skill in opsdroid.skills:\n if \"crontab\" in skill and pycron.is_now(skill[\"crontab\"]):\n try:\n await skill[\"skill\"](opsdroid, skill[\"config\"], None)\n except Exception:\n _LOGGER.exception(\"Exception when executing cron skill.\")\n", "path": "opsdroid/parsers/crontab.py"}], "after_files": [{"content": "\"\"\"Decorator functions to use when creating skill modules.\"\"\"\n\nimport logging\n\nfrom opsdroid.helper import get_opsdroid\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef match_regex(regex):\n \"\"\"Return regex match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for regex matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"regex\": regex, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_action(action):\n \"\"\"Return apiai action match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_action\": action, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_intent(intent):\n \"\"\"Return apiai intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_intent\": intent, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_crontab(crontab, timezone=None):\n \"\"\"Return crontab match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for crontab matching.\"\"\"\n opsdroid = get_opsdroid()\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"crontab\": crontab, \"skill\": func,\n \"config\": config, \"timezone\": timezone})\n return func\n return matcher\n\n\ndef match_webhook(webhook):\n \"\"\"Return webhook match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for webhook matching.\"\"\"\n opsdroid = get_opsdroid()\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"webhook\": webhook, \"skill\": func,\n \"config\": config})\n\n async def wrapper(req, opsdroid=opsdroid, config=config):\n \"\"\"Wrap up the aiohttp handler.\"\"\"\n _LOGGER.info(\"Running skill %s via webhook\", webhook)\n opsdroid.stats[\"webhooks_called\"] = \\\n opsdroid.stats[\"webhooks_called\"] + 1\n await func(opsdroid, config, req)\n return Web.build_response(200, {\"called_skill\": webhook})\n\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}\".format(config[\"name\"], webhook), wrapper)\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}/\".format(config[\"name\"], webhook), wrapper)\n\n return func\n return matcher\n", "path": "opsdroid/matchers.py"}, {"content": "\"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n\nimport asyncio\nimport logging\n\nimport arrow\nimport pycron\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_crontab(opsdroid):\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n # pylint: disable=broad-except\n # We want to catch all exceptions coming from a skill module and not\n # halt the application. If a skill throws an exception it just doesn't\n # give a response to the user, so an error response should be given.\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - arrow.now().time().second)\n _LOGGER.debug(\"Running crontab skills\")\n for skill in opsdroid.skills:\n if \"crontab\" in skill:\n if skill[\"timezone\"] is not None:\n timezone = skill[\"timezone\"]\n else:\n timezone = opsdroid.config.get(\"timezone\", \"UTC\")\n if pycron.is_now(skill[\"crontab\"], arrow.now(tz=timezone)):\n try:\n await skill[\"skill\"](opsdroid, skill[\"config\"], None)\n except Exception:\n _LOGGER.exception(\n \"Exception when executing cron skill.\")\n", "path": "opsdroid/parsers/crontab.py"}]} | 1,380 | 573 |
gh_patches_debug_33329 | rasdani/github-patches | git_diff | translate__pootle-4714 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fix update_stores not changing fuzzy state
regression from 80d35df2b2c3987470873c541c288dc95c0ec79e
ignoring change in state makes it impossible to update the fuzzy state
of a translatin unit by modifying the files on disk, and running
update_stores. With state ignored in the comparison, those changes are
silently ignored (and thenget overridden again on next sync_stores)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_store/diff.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import difflib
10 from collections import OrderedDict
11
12 from django.utils.functional import cached_property
13
14 from .fields import to_python as multistring_to_python
15 from .unit import UnitProxy
16 from .util import OBSOLETE
17
18
19 class UnitDiffProxy(UnitProxy):
20 """Wraps File/DB Unit dicts used by StoreDiff for equality comparison"""
21
22 match_attrs = ["context", "developer_comment", "locations",
23 "source", "target", "translator_comment"]
24
25 def __eq__(self, other):
26 return all(getattr(self, k) == getattr(other, k)
27 for k in self.match_attrs)
28
29 def __ne__(self, other):
30 return not self == other
31
32
33 class DBUnit(UnitDiffProxy):
34
35 pass
36
37
38 class FileUnit(UnitDiffProxy):
39
40 @property
41 def locations(self):
42 return "\n".join(self.unit["locations"])
43
44 @property
45 def source(self):
46 return multistring_to_python(self.unit["source"])
47
48 @property
49 def target(self):
50 return multistring_to_python(self.unit["target"])
51
52
53 class StoreDiff(object):
54
55 def __init__(self, db_store, file_store, file_revision):
56 self.db_store = db_store
57 self.file_store = file_store
58 self.db_revision = db_store.get_max_unit_revision()
59 self.file_revision = file_revision
60
61 @cached_property
62 def active_units(self):
63 return [unitid for unitid, unit in self.db_units.items()
64 if unit['state'] != OBSOLETE]
65
66 @cached_property
67 def db_units(self):
68 """All of the db units regardless of state or revision"""
69 db_units = OrderedDict()
70 unit_fields = ("unitid", "state", "id", "index", "revision",
71 "source_f", "target_f", "developer_comment",
72 "translator_comment", "locations", "context")
73 for unit in self.db_store.unit_set.values(*unit_fields).order_by("index"):
74 db_units[unit["unitid"]] = unit
75 return db_units
76
77 @cached_property
78 def file_units(self):
79 file_units = OrderedDict()
80 for unit in self.file_store.units:
81 if unit.isheader():
82 continue
83 file_units[unit.getid()] = {
84 "unitid": unit.getid(),
85 "context": unit.getcontext(),
86 "locations": unit.getlocations(),
87 "source": unit.source,
88 "target": unit.target,
89 "state": unit.get_state_n(),
90 "developer_comment": unit.getnotes(origin="developer"),
91 "translator_comment": unit.getnotes(origin="translator")}
92 return file_units
93
94 @cached_property
95 def insert_points(self):
96 """Returns a list of insert points with update index info.
97 :return: a list of tuples
98 ``(insert_at, uids_to_add, next_index, update_index_delta)`` where
99 ``insert_at`` is the point for inserting
100 ``uids_to_add`` are the units to be inserted
101 ``update_index_delta`` is the offset for index updating
102 ``next_index`` is the starting point after which
103 ``update_index_delta`` should be applied.
104 """
105 inserts = []
106 new_unitid_list = self.new_unit_list
107 for (tag, i1, i2, j1, j2) in self.opcodes:
108 if tag == 'insert':
109 update_index_delta = 0
110 insert_at = 0
111 if i1 > 0:
112 insert_at = (
113 self.db_units[self.active_units[i1 - 1]]['index'])
114 next_index = insert_at + 1
115 if i1 < len(self.active_units):
116 next_index = self.db_units[self.active_units[i1]]["index"]
117 update_index_delta = (
118 j2 - j1 - next_index + insert_at + 1)
119
120 inserts.append((insert_at,
121 new_unitid_list[j1:j2],
122 next_index,
123 update_index_delta))
124
125 elif tag == 'replace':
126 insert_at = self.db_units[self.active_units[i1 - 1]]['index']
127 next_index = self.db_units[self.active_units[i2 - 1]]['index']
128 inserts.append((insert_at,
129 new_unitid_list[j1:j2],
130 next_index,
131 j2 - j1 - insert_at + next_index))
132
133 return inserts
134
135 @cached_property
136 def new_unit_list(self):
137 # If file_revision is gte than the db_revision then new unit list
138 # will be exactly what is in the file
139 if self.file_revision >= self.db_revision:
140 return self.file_units.keys()
141
142 # These units are kept as they have been updated since file_revision
143 # but do not appear in the file
144 new_units = [u for u in self.updated_db_units
145 if u not in self.file_units]
146
147 # These unit are either present in both or only in the file so are
148 # kept in the file order
149 new_units += [u for u in self.file_units.keys()
150 if u not in self.obsoleted_db_units]
151
152 return new_units
153
154 @cached_property
155 def obsoleted_db_units(self):
156 return [unitid for unitid, unit in self.db_units.items()
157 if (unit['state'] == OBSOLETE
158 and unit["revision"] > self.file_revision)]
159
160 @cached_property
161 def opcodes(self):
162 sm = difflib.SequenceMatcher(None,
163 self.active_units,
164 self.new_unit_list)
165 return sm.get_opcodes()
166
167 @cached_property
168 def updated_db_units(self):
169 return [unitid for unitid, unit in self.db_units.items()
170 if (unit['revision'] > self.file_revision
171 and unit["state"] != OBSOLETE)]
172
173 def diff(self):
174 """Return a dictionary of change actions or None if there are no
175 changes to be made.
176 """
177 diff = {"index": self.get_indexes_to_update(),
178 "obsolete": self.get_units_to_obsolete(),
179 "add": self.get_units_to_add(),
180 "update": self.get_units_to_update()}
181 if self.has_changes(diff):
182 return diff
183 return None
184
185 def get_indexes_to_update(self):
186 offset = 0
187 index_updates = []
188 for (insert_at, uids_add, next_index, delta) in self.insert_points:
189 if delta > 0:
190 index_updates += [(next_index + offset, delta)]
191 offset += delta
192 return index_updates
193
194 def get_units_to_add(self):
195 offset = 0
196 to_add = []
197 for (insert_at, uids_add, next_index, delta) in self.insert_points:
198 for index, uid in enumerate(uids_add):
199 file_unit = self.file_store.findid(uid)
200 if file_unit and file_unit.getid() not in self.db_units:
201 new_unit_index = insert_at + index + 1 + offset
202 to_add += [(file_unit, new_unit_index)]
203 if delta > 0:
204 offset += delta
205 return to_add
206
207 def get_units_to_obsolete(self):
208 return [unit['id'] for unitid, unit in self.db_units.items()
209 if (unitid not in self.file_units
210 and unitid in self.active_units
211 and unitid not in self.updated_db_units)]
212
213 def get_units_to_update(self):
214 uid_index_map = {}
215 offset = 0
216
217 for (insert_at, uids_add, next_index, delta) in self.insert_points:
218 for index, uid in enumerate(uids_add):
219 new_unit_index = insert_at + index + 1 + offset
220 if uid in self.db_units:
221 uid_index_map[uid] = {
222 'dbid': self.db_units[uid]['id'],
223 'index': new_unit_index}
224 if delta > 0:
225 offset += delta
226 update_dbids = self.get_updated_dbids()
227 update_dbids.update({x['dbid'] for x in uid_index_map.values()})
228 return (update_dbids, uid_index_map)
229
230 def get_updated_dbids(self):
231 """Returns a set of unit DB ids to be updated.
232 """
233 update_dbids = set()
234
235 for (tag, i1, i2, j1, j2) in self.opcodes:
236 if tag == 'equal':
237 update_dbids.update(
238 set(self.db_units[uid]['id']
239 for uid in self.active_units[i1:i2]
240 if (uid in self.file_units
241 and (DBUnit(self.db_units[uid])
242 != FileUnit(self.file_units[uid])))))
243 return update_dbids
244
245 def has_changes(self, diff):
246 for k, v in diff.items():
247 if k == "update":
248 if len(v[0]) > 0:
249 return True
250 else:
251 if len(v) > 0:
252 return True
253 return False
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_store/diff.py b/pootle/apps/pootle_store/diff.py
--- a/pootle/apps/pootle_store/diff.py
+++ b/pootle/apps/pootle_store/diff.py
@@ -13,14 +13,14 @@
from .fields import to_python as multistring_to_python
from .unit import UnitProxy
-from .util import OBSOLETE
+from .util import FUZZY, OBSOLETE, TRANSLATED, UNTRANSLATED
class UnitDiffProxy(UnitProxy):
"""Wraps File/DB Unit dicts used by StoreDiff for equality comparison"""
match_attrs = ["context", "developer_comment", "locations",
- "source", "target", "translator_comment"]
+ "source", "state", "target", "translator_comment"]
def __eq__(self, other):
return all(getattr(self, k) == getattr(other, k)
@@ -31,7 +31,6 @@
class DBUnit(UnitDiffProxy):
-
pass
@@ -80,13 +79,20 @@
for unit in self.file_store.units:
if unit.isheader():
continue
+ state = UNTRANSLATED
+ if unit.isobsolete():
+ state = OBSOLETE
+ elif unit.istranslated():
+ state = TRANSLATED
+ elif unit.isfuzzy():
+ state = FUZZY
file_units[unit.getid()] = {
"unitid": unit.getid(),
"context": unit.getcontext(),
"locations": unit.getlocations(),
"source": unit.source,
"target": unit.target,
- "state": unit.get_state_n(),
+ "state": state,
"developer_comment": unit.getnotes(origin="developer"),
"translator_comment": unit.getnotes(origin="translator")}
return file_units
| {"golden_diff": "diff --git a/pootle/apps/pootle_store/diff.py b/pootle/apps/pootle_store/diff.py\n--- a/pootle/apps/pootle_store/diff.py\n+++ b/pootle/apps/pootle_store/diff.py\n@@ -13,14 +13,14 @@\n \n from .fields import to_python as multistring_to_python\n from .unit import UnitProxy\n-from .util import OBSOLETE\n+from .util import FUZZY, OBSOLETE, TRANSLATED, UNTRANSLATED\n \n \n class UnitDiffProxy(UnitProxy):\n \"\"\"Wraps File/DB Unit dicts used by StoreDiff for equality comparison\"\"\"\n \n match_attrs = [\"context\", \"developer_comment\", \"locations\",\n- \"source\", \"target\", \"translator_comment\"]\n+ \"source\", \"state\", \"target\", \"translator_comment\"]\n \n def __eq__(self, other):\n return all(getattr(self, k) == getattr(other, k)\n@@ -31,7 +31,6 @@\n \n \n class DBUnit(UnitDiffProxy):\n-\n pass\n \n \n@@ -80,13 +79,20 @@\n for unit in self.file_store.units:\n if unit.isheader():\n continue\n+ state = UNTRANSLATED\n+ if unit.isobsolete():\n+ state = OBSOLETE\n+ elif unit.istranslated():\n+ state = TRANSLATED\n+ elif unit.isfuzzy():\n+ state = FUZZY\n file_units[unit.getid()] = {\n \"unitid\": unit.getid(),\n \"context\": unit.getcontext(),\n \"locations\": unit.getlocations(),\n \"source\": unit.source,\n \"target\": unit.target,\n- \"state\": unit.get_state_n(),\n+ \"state\": state,\n \"developer_comment\": unit.getnotes(origin=\"developer\"),\n \"translator_comment\": unit.getnotes(origin=\"translator\")}\n return file_units\n", "issue": "fix update_stores not changing fuzzy state\nregression from 80d35df2b2c3987470873c541c288dc95c0ec79e\nignoring change in state makes it impossible to update the fuzzy state\nof a translatin unit by modifying the files on disk, and running\nupdate_stores. With state ignored in the comparison, those changes are\nsilently ignored (and thenget overridden again on next sync_stores)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport difflib\nfrom collections import OrderedDict\n\nfrom django.utils.functional import cached_property\n\nfrom .fields import to_python as multistring_to_python\nfrom .unit import UnitProxy\nfrom .util import OBSOLETE\n\n\nclass UnitDiffProxy(UnitProxy):\n \"\"\"Wraps File/DB Unit dicts used by StoreDiff for equality comparison\"\"\"\n\n match_attrs = [\"context\", \"developer_comment\", \"locations\",\n \"source\", \"target\", \"translator_comment\"]\n\n def __eq__(self, other):\n return all(getattr(self, k) == getattr(other, k)\n for k in self.match_attrs)\n\n def __ne__(self, other):\n return not self == other\n\n\nclass DBUnit(UnitDiffProxy):\n\n pass\n\n\nclass FileUnit(UnitDiffProxy):\n\n @property\n def locations(self):\n return \"\\n\".join(self.unit[\"locations\"])\n\n @property\n def source(self):\n return multistring_to_python(self.unit[\"source\"])\n\n @property\n def target(self):\n return multistring_to_python(self.unit[\"target\"])\n\n\nclass StoreDiff(object):\n\n def __init__(self, db_store, file_store, file_revision):\n self.db_store = db_store\n self.file_store = file_store\n self.db_revision = db_store.get_max_unit_revision()\n self.file_revision = file_revision\n\n @cached_property\n def active_units(self):\n return [unitid for unitid, unit in self.db_units.items()\n if unit['state'] != OBSOLETE]\n\n @cached_property\n def db_units(self):\n \"\"\"All of the db units regardless of state or revision\"\"\"\n db_units = OrderedDict()\n unit_fields = (\"unitid\", \"state\", \"id\", \"index\", \"revision\",\n \"source_f\", \"target_f\", \"developer_comment\",\n \"translator_comment\", \"locations\", \"context\")\n for unit in self.db_store.unit_set.values(*unit_fields).order_by(\"index\"):\n db_units[unit[\"unitid\"]] = unit\n return db_units\n\n @cached_property\n def file_units(self):\n file_units = OrderedDict()\n for unit in self.file_store.units:\n if unit.isheader():\n continue\n file_units[unit.getid()] = {\n \"unitid\": unit.getid(),\n \"context\": unit.getcontext(),\n \"locations\": unit.getlocations(),\n \"source\": unit.source,\n \"target\": unit.target,\n \"state\": unit.get_state_n(),\n \"developer_comment\": unit.getnotes(origin=\"developer\"),\n \"translator_comment\": unit.getnotes(origin=\"translator\")}\n return file_units\n\n @cached_property\n def insert_points(self):\n \"\"\"Returns a list of insert points with update index info.\n :return: a list of tuples\n ``(insert_at, uids_to_add, next_index, update_index_delta)`` where\n ``insert_at`` is the point for inserting\n ``uids_to_add`` are the units to be inserted\n ``update_index_delta`` is the offset for index updating\n ``next_index`` is the starting point after which\n ``update_index_delta`` should be applied.\n \"\"\"\n inserts = []\n new_unitid_list = self.new_unit_list\n for (tag, i1, i2, j1, j2) in self.opcodes:\n if tag == 'insert':\n update_index_delta = 0\n insert_at = 0\n if i1 > 0:\n insert_at = (\n self.db_units[self.active_units[i1 - 1]]['index'])\n next_index = insert_at + 1\n if i1 < len(self.active_units):\n next_index = self.db_units[self.active_units[i1]][\"index\"]\n update_index_delta = (\n j2 - j1 - next_index + insert_at + 1)\n\n inserts.append((insert_at,\n new_unitid_list[j1:j2],\n next_index,\n update_index_delta))\n\n elif tag == 'replace':\n insert_at = self.db_units[self.active_units[i1 - 1]]['index']\n next_index = self.db_units[self.active_units[i2 - 1]]['index']\n inserts.append((insert_at,\n new_unitid_list[j1:j2],\n next_index,\n j2 - j1 - insert_at + next_index))\n\n return inserts\n\n @cached_property\n def new_unit_list(self):\n # If file_revision is gte than the db_revision then new unit list\n # will be exactly what is in the file\n if self.file_revision >= self.db_revision:\n return self.file_units.keys()\n\n # These units are kept as they have been updated since file_revision\n # but do not appear in the file\n new_units = [u for u in self.updated_db_units\n if u not in self.file_units]\n\n # These unit are either present in both or only in the file so are\n # kept in the file order\n new_units += [u for u in self.file_units.keys()\n if u not in self.obsoleted_db_units]\n\n return new_units\n\n @cached_property\n def obsoleted_db_units(self):\n return [unitid for unitid, unit in self.db_units.items()\n if (unit['state'] == OBSOLETE\n and unit[\"revision\"] > self.file_revision)]\n\n @cached_property\n def opcodes(self):\n sm = difflib.SequenceMatcher(None,\n self.active_units,\n self.new_unit_list)\n return sm.get_opcodes()\n\n @cached_property\n def updated_db_units(self):\n return [unitid for unitid, unit in self.db_units.items()\n if (unit['revision'] > self.file_revision\n and unit[\"state\"] != OBSOLETE)]\n\n def diff(self):\n \"\"\"Return a dictionary of change actions or None if there are no\n changes to be made.\n \"\"\"\n diff = {\"index\": self.get_indexes_to_update(),\n \"obsolete\": self.get_units_to_obsolete(),\n \"add\": self.get_units_to_add(),\n \"update\": self.get_units_to_update()}\n if self.has_changes(diff):\n return diff\n return None\n\n def get_indexes_to_update(self):\n offset = 0\n index_updates = []\n for (insert_at, uids_add, next_index, delta) in self.insert_points:\n if delta > 0:\n index_updates += [(next_index + offset, delta)]\n offset += delta\n return index_updates\n\n def get_units_to_add(self):\n offset = 0\n to_add = []\n for (insert_at, uids_add, next_index, delta) in self.insert_points:\n for index, uid in enumerate(uids_add):\n file_unit = self.file_store.findid(uid)\n if file_unit and file_unit.getid() not in self.db_units:\n new_unit_index = insert_at + index + 1 + offset\n to_add += [(file_unit, new_unit_index)]\n if delta > 0:\n offset += delta\n return to_add\n\n def get_units_to_obsolete(self):\n return [unit['id'] for unitid, unit in self.db_units.items()\n if (unitid not in self.file_units\n and unitid in self.active_units\n and unitid not in self.updated_db_units)]\n\n def get_units_to_update(self):\n uid_index_map = {}\n offset = 0\n\n for (insert_at, uids_add, next_index, delta) in self.insert_points:\n for index, uid in enumerate(uids_add):\n new_unit_index = insert_at + index + 1 + offset\n if uid in self.db_units:\n uid_index_map[uid] = {\n 'dbid': self.db_units[uid]['id'],\n 'index': new_unit_index}\n if delta > 0:\n offset += delta\n update_dbids = self.get_updated_dbids()\n update_dbids.update({x['dbid'] for x in uid_index_map.values()})\n return (update_dbids, uid_index_map)\n\n def get_updated_dbids(self):\n \"\"\"Returns a set of unit DB ids to be updated.\n \"\"\"\n update_dbids = set()\n\n for (tag, i1, i2, j1, j2) in self.opcodes:\n if tag == 'equal':\n update_dbids.update(\n set(self.db_units[uid]['id']\n for uid in self.active_units[i1:i2]\n if (uid in self.file_units\n and (DBUnit(self.db_units[uid])\n != FileUnit(self.file_units[uid])))))\n return update_dbids\n\n def has_changes(self, diff):\n for k, v in diff.items():\n if k == \"update\":\n if len(v[0]) > 0:\n return True\n else:\n if len(v) > 0:\n return True\n return False\n", "path": "pootle/apps/pootle_store/diff.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport difflib\nfrom collections import OrderedDict\n\nfrom django.utils.functional import cached_property\n\nfrom .fields import to_python as multistring_to_python\nfrom .unit import UnitProxy\nfrom .util import FUZZY, OBSOLETE, TRANSLATED, UNTRANSLATED\n\n\nclass UnitDiffProxy(UnitProxy):\n \"\"\"Wraps File/DB Unit dicts used by StoreDiff for equality comparison\"\"\"\n\n match_attrs = [\"context\", \"developer_comment\", \"locations\",\n \"source\", \"state\", \"target\", \"translator_comment\"]\n\n def __eq__(self, other):\n return all(getattr(self, k) == getattr(other, k)\n for k in self.match_attrs)\n\n def __ne__(self, other):\n return not self == other\n\n\nclass DBUnit(UnitDiffProxy):\n pass\n\n\nclass FileUnit(UnitDiffProxy):\n\n @property\n def locations(self):\n return \"\\n\".join(self.unit[\"locations\"])\n\n @property\n def source(self):\n return multistring_to_python(self.unit[\"source\"])\n\n @property\n def target(self):\n return multistring_to_python(self.unit[\"target\"])\n\n\nclass StoreDiff(object):\n\n def __init__(self, db_store, file_store, file_revision):\n self.db_store = db_store\n self.file_store = file_store\n self.db_revision = db_store.get_max_unit_revision()\n self.file_revision = file_revision\n\n @cached_property\n def active_units(self):\n return [unitid for unitid, unit in self.db_units.items()\n if unit['state'] != OBSOLETE]\n\n @cached_property\n def db_units(self):\n \"\"\"All of the db units regardless of state or revision\"\"\"\n db_units = OrderedDict()\n unit_fields = (\"unitid\", \"state\", \"id\", \"index\", \"revision\",\n \"source_f\", \"target_f\", \"developer_comment\",\n \"translator_comment\", \"locations\", \"context\")\n for unit in self.db_store.unit_set.values(*unit_fields).order_by(\"index\"):\n db_units[unit[\"unitid\"]] = unit\n return db_units\n\n @cached_property\n def file_units(self):\n file_units = OrderedDict()\n for unit in self.file_store.units:\n if unit.isheader():\n continue\n state = UNTRANSLATED\n if unit.isobsolete():\n state = OBSOLETE\n elif unit.istranslated():\n state = TRANSLATED\n elif unit.isfuzzy():\n state = FUZZY\n file_units[unit.getid()] = {\n \"unitid\": unit.getid(),\n \"context\": unit.getcontext(),\n \"locations\": unit.getlocations(),\n \"source\": unit.source,\n \"target\": unit.target,\n \"state\": state,\n \"developer_comment\": unit.getnotes(origin=\"developer\"),\n \"translator_comment\": unit.getnotes(origin=\"translator\")}\n return file_units\n\n @cached_property\n def insert_points(self):\n \"\"\"Returns a list of insert points with update index info.\n :return: a list of tuples\n ``(insert_at, uids_to_add, next_index, update_index_delta)`` where\n ``insert_at`` is the point for inserting\n ``uids_to_add`` are the units to be inserted\n ``update_index_delta`` is the offset for index updating\n ``next_index`` is the starting point after which\n ``update_index_delta`` should be applied.\n \"\"\"\n inserts = []\n new_unitid_list = self.new_unit_list\n for (tag, i1, i2, j1, j2) in self.opcodes:\n if tag == 'insert':\n update_index_delta = 0\n insert_at = 0\n if i1 > 0:\n insert_at = (\n self.db_units[self.active_units[i1 - 1]]['index'])\n next_index = insert_at + 1\n if i1 < len(self.active_units):\n next_index = self.db_units[self.active_units[i1]][\"index\"]\n update_index_delta = (\n j2 - j1 - next_index + insert_at + 1)\n\n inserts.append((insert_at,\n new_unitid_list[j1:j2],\n next_index,\n update_index_delta))\n\n elif tag == 'replace':\n insert_at = self.db_units[self.active_units[i1 - 1]]['index']\n next_index = self.db_units[self.active_units[i2 - 1]]['index']\n inserts.append((insert_at,\n new_unitid_list[j1:j2],\n next_index,\n j2 - j1 - insert_at + next_index))\n\n return inserts\n\n @cached_property\n def new_unit_list(self):\n # If file_revision is gte than the db_revision then new unit list\n # will be exactly what is in the file\n if self.file_revision >= self.db_revision:\n return self.file_units.keys()\n\n # These units are kept as they have been updated since file_revision\n # but do not appear in the file\n new_units = [u for u in self.updated_db_units\n if u not in self.file_units]\n\n # These unit are either present in both or only in the file so are\n # kept in the file order\n new_units += [u for u in self.file_units.keys()\n if u not in self.obsoleted_db_units]\n\n return new_units\n\n @cached_property\n def obsoleted_db_units(self):\n return [unitid for unitid, unit in self.db_units.items()\n if (unit['state'] == OBSOLETE\n and unit[\"revision\"] > self.file_revision)]\n\n @cached_property\n def opcodes(self):\n sm = difflib.SequenceMatcher(None,\n self.active_units,\n self.new_unit_list)\n return sm.get_opcodes()\n\n @cached_property\n def updated_db_units(self):\n return [unitid for unitid, unit in self.db_units.items()\n if (unit['revision'] > self.file_revision\n and unit[\"state\"] != OBSOLETE)]\n\n def diff(self):\n \"\"\"Return a dictionary of change actions or None if there are no\n changes to be made.\n \"\"\"\n diff = {\"index\": self.get_indexes_to_update(),\n \"obsolete\": self.get_units_to_obsolete(),\n \"add\": self.get_units_to_add(),\n \"update\": self.get_units_to_update()}\n if self.has_changes(diff):\n return diff\n return None\n\n def get_indexes_to_update(self):\n offset = 0\n index_updates = []\n for (insert_at, uids_add, next_index, delta) in self.insert_points:\n if delta > 0:\n index_updates += [(next_index + offset, delta)]\n offset += delta\n return index_updates\n\n def get_units_to_add(self):\n offset = 0\n to_add = []\n for (insert_at, uids_add, next_index, delta) in self.insert_points:\n for index, uid in enumerate(uids_add):\n file_unit = self.file_store.findid(uid)\n if file_unit and file_unit.getid() not in self.db_units:\n new_unit_index = insert_at + index + 1 + offset\n to_add += [(file_unit, new_unit_index)]\n if delta > 0:\n offset += delta\n return to_add\n\n def get_units_to_obsolete(self):\n return [unit['id'] for unitid, unit in self.db_units.items()\n if (unitid not in self.file_units\n and unitid in self.active_units\n and unitid not in self.updated_db_units)]\n\n def get_units_to_update(self):\n uid_index_map = {}\n offset = 0\n\n for (insert_at, uids_add, next_index, delta) in self.insert_points:\n for index, uid in enumerate(uids_add):\n new_unit_index = insert_at + index + 1 + offset\n if uid in self.db_units:\n uid_index_map[uid] = {\n 'dbid': self.db_units[uid]['id'],\n 'index': new_unit_index}\n if delta > 0:\n offset += delta\n update_dbids = self.get_updated_dbids()\n update_dbids.update({x['dbid'] for x in uid_index_map.values()})\n return (update_dbids, uid_index_map)\n\n def get_updated_dbids(self):\n \"\"\"Returns a set of unit DB ids to be updated.\n \"\"\"\n update_dbids = set()\n\n for (tag, i1, i2, j1, j2) in self.opcodes:\n if tag == 'equal':\n update_dbids.update(\n set(self.db_units[uid]['id']\n for uid in self.active_units[i1:i2]\n if (uid in self.file_units\n and (DBUnit(self.db_units[uid])\n != FileUnit(self.file_units[uid])))))\n return update_dbids\n\n def has_changes(self, diff):\n for k, v in diff.items():\n if k == \"update\":\n if len(v[0]) > 0:\n return True\n else:\n if len(v) > 0:\n return True\n return False\n", "path": "pootle/apps/pootle_store/diff.py"}]} | 3,030 | 420 |
gh_patches_debug_37008 | rasdani/github-patches | git_diff | great-expectations__great_expectations-2966 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/rule_based_profiler/profiler.py`
Content:
```
1 import uuid
2 from typing import Dict, List, Optional, Union
3
4 import great_expectations.exceptions as ge_exceptions
5 from great_expectations import DataContext
6 from great_expectations.core import ExpectationConfiguration, ExpectationSuite
7 from great_expectations.data_context.util import instantiate_class_from_config
8 from great_expectations.rule_based_profiler.domain_builder.domain_builder import (
9 DomainBuilder,
10 )
11 from great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import (
12 ExpectationConfigurationBuilder,
13 )
14 from great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (
15 ParameterBuilder,
16 )
17 from great_expectations.rule_based_profiler.parameter_builder.parameter_container import (
18 ParameterContainer,
19 build_parameter_container_for_variables,
20 )
21 from great_expectations.rule_based_profiler.rule.rule import Rule
22
23
24 class Profiler:
25 """
26 Profiler object serves to profile, or automatically evaluate a set of rules, upon a given
27 batch / multiple batches of data.
28 """
29
30 def __init__(
31 self,
32 *,
33 profiler_config: Optional[Dict[str, Dict[str, Dict]]] = None,
34 data_context: Optional[DataContext] = None,
35 ):
36 """
37 Create a new Profiler using configured rules.
38 For a rule or an item in a rule configuration, instantiates the following if
39 available: a domain builder, a parameter builder, and a configuration builder.
40 These will be used to define profiler computation patterns.
41
42 Args:
43 profiler_config: Variables and Rules configuration as a dictionary
44 data_context: DataContext object that defines a full runtime environment (data access, etc.)
45 """
46 self._data_context = data_context
47 self._rules = []
48
49 rules_configs: Dict[str, Dict] = profiler_config.get("rules", {})
50 rule_name: str
51 rule_config: dict
52
53 for rule_name, rule_config in rules_configs.items():
54 domain_builder_config: dict = rule_config.get("domain_builder")
55
56 if domain_builder_config is None:
57 raise ge_exceptions.ProfilerConfigurationError(
58 message=f'Invalid rule "{rule_name}": no domain_builder found.'
59 )
60
61 domain_builder: DomainBuilder = instantiate_class_from_config(
62 config=domain_builder_config,
63 runtime_environment={"data_context": data_context},
64 config_defaults={
65 "module_name": "great_expectations.rule_based_profiler.domain_builder"
66 },
67 )
68
69 parameter_builders: List[ParameterBuilder] = []
70
71 parameter_builder_configs: dict = rule_config.get("parameter_builders")
72
73 if parameter_builder_configs:
74 parameter_builder_config: dict
75 for parameter_builder_config in parameter_builder_configs:
76 parameter_builders.append(
77 instantiate_class_from_config(
78 config=parameter_builder_config,
79 runtime_environment={"data_context": data_context},
80 config_defaults={
81 "module_name": "great_expectations.rule_based_profiler.parameter_builder"
82 },
83 )
84 )
85
86 expectation_configuration_builders: List[
87 ExpectationConfigurationBuilder
88 ] = []
89
90 expectation_configuration_builder_configs: dict = rule_config.get(
91 "expectation_configuration_builders"
92 )
93
94 if expectation_configuration_builder_configs:
95 expectation_configuration_builder_config: dict
96 for (
97 expectation_configuration_builder_config
98 ) in expectation_configuration_builder_configs:
99 expectation_configuration_builders.append(
100 instantiate_class_from_config(
101 config=expectation_configuration_builder_config,
102 runtime_environment={},
103 config_defaults={
104 "class_name": "DefaultExpectationConfigurationBuilder",
105 "module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
106 },
107 )
108 )
109
110 variables_configs: Dict[str, Dict] = profiler_config.get("variables", {})
111 variables: Optional[ParameterContainer] = None
112
113 if variables_configs:
114 variables = build_parameter_container_for_variables(
115 variables_configs=variables_configs
116 )
117
118 self._rules.append(
119 Rule(
120 name=rule_name,
121 domain_builder=domain_builder,
122 parameter_builders=parameter_builders,
123 expectation_configuration_builders=expectation_configuration_builders,
124 variables=variables,
125 )
126 )
127
128 def profile(
129 self,
130 *,
131 expectation_suite_name: Optional[str] = None,
132 ) -> ExpectationSuite:
133 """
134 Args:
135 :param expectation_suite_name: A name for returned Expectation suite.
136 :return: Set of rule evaluation results in the form of an ExpectationSuite
137 """
138 if expectation_suite_name is None:
139 expectation_suite_name = (
140 f"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}"
141 )
142
143 expectation_suite: ExpectationSuite = ExpectationSuite(
144 expectation_suite_name=expectation_suite_name
145 )
146
147 rule: Rule
148 for rule in self._rules:
149 expectation_configurations: List[ExpectationConfiguration] = rule.generate()
150 expectation_configuration: ExpectationConfiguration
151 for expectation_configuration in expectation_configurations:
152 expectation_suite.add_expectation(
153 expectation_configuration=expectation_configuration
154 )
155
156 return expectation_suite
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/rule_based_profiler/profiler.py b/great_expectations/rule_based_profiler/profiler.py
--- a/great_expectations/rule_based_profiler/profiler.py
+++ b/great_expectations/rule_based_profiler/profiler.py
@@ -43,10 +43,11 @@
profiler_config: Variables and Rules configuration as a dictionary
data_context: DataContext object that defines a full runtime environment (data access, etc.)
"""
+ self._profiler_config = profiler_config
self._data_context = data_context
self._rules = []
- rules_configs: Dict[str, Dict] = profiler_config.get("rules", {})
+ rules_configs: Dict[str, Dict] = self._profiler_config.get("rules", {})
rule_name: str
rule_config: dict
@@ -107,7 +108,9 @@
)
)
- variables_configs: Dict[str, Dict] = profiler_config.get("variables", {})
+ variables_configs: Dict[str, Dict] = self._profiler_config.get(
+ "variables", {}
+ )
variables: Optional[ParameterContainer] = None
if variables_configs:
@@ -129,10 +132,12 @@
self,
*,
expectation_suite_name: Optional[str] = None,
+ include_citation: bool = True,
) -> ExpectationSuite:
"""
Args:
:param expectation_suite_name: A name for returned Expectation suite.
+ :param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler
:return: Set of rule evaluation results in the form of an ExpectationSuite
"""
if expectation_suite_name is None:
@@ -144,6 +149,12 @@
expectation_suite_name=expectation_suite_name
)
+ if include_citation:
+ expectation_suite.add_citation(
+ comment="Suite created by Rule-Based Profiler with the following config",
+ profiler_config=self._profiler_config,
+ )
+
rule: Rule
for rule in self._rules:
expectation_configurations: List[ExpectationConfiguration] = rule.generate()
| {"golden_diff": "diff --git a/great_expectations/rule_based_profiler/profiler.py b/great_expectations/rule_based_profiler/profiler.py\n--- a/great_expectations/rule_based_profiler/profiler.py\n+++ b/great_expectations/rule_based_profiler/profiler.py\n@@ -43,10 +43,11 @@\n profiler_config: Variables and Rules configuration as a dictionary\n data_context: DataContext object that defines a full runtime environment (data access, etc.)\n \"\"\"\n+ self._profiler_config = profiler_config\n self._data_context = data_context\n self._rules = []\n \n- rules_configs: Dict[str, Dict] = profiler_config.get(\"rules\", {})\n+ rules_configs: Dict[str, Dict] = self._profiler_config.get(\"rules\", {})\n rule_name: str\n rule_config: dict\n \n@@ -107,7 +108,9 @@\n )\n )\n \n- variables_configs: Dict[str, Dict] = profiler_config.get(\"variables\", {})\n+ variables_configs: Dict[str, Dict] = self._profiler_config.get(\n+ \"variables\", {}\n+ )\n variables: Optional[ParameterContainer] = None\n \n if variables_configs:\n@@ -129,10 +132,12 @@\n self,\n *,\n expectation_suite_name: Optional[str] = None,\n+ include_citation: bool = True,\n ) -> ExpectationSuite:\n \"\"\"\n Args:\n :param expectation_suite_name: A name for returned Expectation suite.\n+ :param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler\n :return: Set of rule evaluation results in the form of an ExpectationSuite\n \"\"\"\n if expectation_suite_name is None:\n@@ -144,6 +149,12 @@\n expectation_suite_name=expectation_suite_name\n )\n \n+ if include_citation:\n+ expectation_suite.add_citation(\n+ comment=\"Suite created by Rule-Based Profiler with the following config\",\n+ profiler_config=self._profiler_config,\n+ )\n+\n rule: Rule\n for rule in self._rules:\n expectation_configurations: List[ExpectationConfiguration] = rule.generate()\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import uuid\nfrom typing import Dict, List, Optional, Union\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations import DataContext\nfrom great_expectations.core import ExpectationConfiguration, ExpectationSuite\nfrom great_expectations.data_context.util import instantiate_class_from_config\nfrom great_expectations.rule_based_profiler.domain_builder.domain_builder import (\n DomainBuilder,\n)\nfrom great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import (\n ExpectationConfigurationBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (\n ParameterBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_container import (\n ParameterContainer,\n build_parameter_container_for_variables,\n)\nfrom great_expectations.rule_based_profiler.rule.rule import Rule\n\n\nclass Profiler:\n \"\"\"\n Profiler object serves to profile, or automatically evaluate a set of rules, upon a given\n batch / multiple batches of data.\n \"\"\"\n\n def __init__(\n self,\n *,\n profiler_config: Optional[Dict[str, Dict[str, Dict]]] = None,\n data_context: Optional[DataContext] = None,\n ):\n \"\"\"\n Create a new Profiler using configured rules.\n For a rule or an item in a rule configuration, instantiates the following if\n available: a domain builder, a parameter builder, and a configuration builder.\n These will be used to define profiler computation patterns.\n\n Args:\n profiler_config: Variables and Rules configuration as a dictionary\n data_context: DataContext object that defines a full runtime environment (data access, etc.)\n \"\"\"\n self._data_context = data_context\n self._rules = []\n\n rules_configs: Dict[str, Dict] = profiler_config.get(\"rules\", {})\n rule_name: str\n rule_config: dict\n\n for rule_name, rule_config in rules_configs.items():\n domain_builder_config: dict = rule_config.get(\"domain_builder\")\n\n if domain_builder_config is None:\n raise ge_exceptions.ProfilerConfigurationError(\n message=f'Invalid rule \"{rule_name}\": no domain_builder found.'\n )\n\n domain_builder: DomainBuilder = instantiate_class_from_config(\n config=domain_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.domain_builder\"\n },\n )\n\n parameter_builders: List[ParameterBuilder] = []\n\n parameter_builder_configs: dict = rule_config.get(\"parameter_builders\")\n\n if parameter_builder_configs:\n parameter_builder_config: dict\n for parameter_builder_config in parameter_builder_configs:\n parameter_builders.append(\n instantiate_class_from_config(\n config=parameter_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.parameter_builder\"\n },\n )\n )\n\n expectation_configuration_builders: List[\n ExpectationConfigurationBuilder\n ] = []\n\n expectation_configuration_builder_configs: dict = rule_config.get(\n \"expectation_configuration_builders\"\n )\n\n if expectation_configuration_builder_configs:\n expectation_configuration_builder_config: dict\n for (\n expectation_configuration_builder_config\n ) in expectation_configuration_builder_configs:\n expectation_configuration_builders.append(\n instantiate_class_from_config(\n config=expectation_configuration_builder_config,\n runtime_environment={},\n config_defaults={\n \"class_name\": \"DefaultExpectationConfigurationBuilder\",\n \"module_name\": \"great_expectations.rule_based_profiler.expectation_configuration_builder\",\n },\n )\n )\n\n variables_configs: Dict[str, Dict] = profiler_config.get(\"variables\", {})\n variables: Optional[ParameterContainer] = None\n\n if variables_configs:\n variables = build_parameter_container_for_variables(\n variables_configs=variables_configs\n )\n\n self._rules.append(\n Rule(\n name=rule_name,\n domain_builder=domain_builder,\n parameter_builders=parameter_builders,\n expectation_configuration_builders=expectation_configuration_builders,\n variables=variables,\n )\n )\n\n def profile(\n self,\n *,\n expectation_suite_name: Optional[str] = None,\n ) -> ExpectationSuite:\n \"\"\"\n Args:\n :param expectation_suite_name: A name for returned Expectation suite.\n :return: Set of rule evaluation results in the form of an ExpectationSuite\n \"\"\"\n if expectation_suite_name is None:\n expectation_suite_name = (\n f\"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}\"\n )\n\n expectation_suite: ExpectationSuite = ExpectationSuite(\n expectation_suite_name=expectation_suite_name\n )\n\n rule: Rule\n for rule in self._rules:\n expectation_configurations: List[ExpectationConfiguration] = rule.generate()\n expectation_configuration: ExpectationConfiguration\n for expectation_configuration in expectation_configurations:\n expectation_suite.add_expectation(\n expectation_configuration=expectation_configuration\n )\n\n return expectation_suite\n", "path": "great_expectations/rule_based_profiler/profiler.py"}], "after_files": [{"content": "import uuid\nfrom typing import Dict, List, Optional, Union\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations import DataContext\nfrom great_expectations.core import ExpectationConfiguration, ExpectationSuite\nfrom great_expectations.data_context.util import instantiate_class_from_config\nfrom great_expectations.rule_based_profiler.domain_builder.domain_builder import (\n DomainBuilder,\n)\nfrom great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import (\n ExpectationConfigurationBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (\n ParameterBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_container import (\n ParameterContainer,\n build_parameter_container_for_variables,\n)\nfrom great_expectations.rule_based_profiler.rule.rule import Rule\n\n\nclass Profiler:\n \"\"\"\n Profiler object serves to profile, or automatically evaluate a set of rules, upon a given\n batch / multiple batches of data.\n \"\"\"\n\n def __init__(\n self,\n *,\n profiler_config: Optional[Dict[str, Dict[str, Dict]]] = None,\n data_context: Optional[DataContext] = None,\n ):\n \"\"\"\n Create a new Profiler using configured rules.\n For a rule or an item in a rule configuration, instantiates the following if\n available: a domain builder, a parameter builder, and a configuration builder.\n These will be used to define profiler computation patterns.\n\n Args:\n profiler_config: Variables and Rules configuration as a dictionary\n data_context: DataContext object that defines a full runtime environment (data access, etc.)\n \"\"\"\n self._profiler_config = profiler_config\n self._data_context = data_context\n self._rules = []\n\n rules_configs: Dict[str, Dict] = self._profiler_config.get(\"rules\", {})\n rule_name: str\n rule_config: dict\n\n for rule_name, rule_config in rules_configs.items():\n domain_builder_config: dict = rule_config.get(\"domain_builder\")\n\n if domain_builder_config is None:\n raise ge_exceptions.ProfilerConfigurationError(\n message=f'Invalid rule \"{rule_name}\": no domain_builder found.'\n )\n\n domain_builder: DomainBuilder = instantiate_class_from_config(\n config=domain_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.domain_builder\"\n },\n )\n\n parameter_builders: List[ParameterBuilder] = []\n\n parameter_builder_configs: dict = rule_config.get(\"parameter_builders\")\n\n if parameter_builder_configs:\n parameter_builder_config: dict\n for parameter_builder_config in parameter_builder_configs:\n parameter_builders.append(\n instantiate_class_from_config(\n config=parameter_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.parameter_builder\"\n },\n )\n )\n\n expectation_configuration_builders: List[\n ExpectationConfigurationBuilder\n ] = []\n\n expectation_configuration_builder_configs: dict = rule_config.get(\n \"expectation_configuration_builders\"\n )\n\n if expectation_configuration_builder_configs:\n expectation_configuration_builder_config: dict\n for (\n expectation_configuration_builder_config\n ) in expectation_configuration_builder_configs:\n expectation_configuration_builders.append(\n instantiate_class_from_config(\n config=expectation_configuration_builder_config,\n runtime_environment={},\n config_defaults={\n \"class_name\": \"DefaultExpectationConfigurationBuilder\",\n \"module_name\": \"great_expectations.rule_based_profiler.expectation_configuration_builder\",\n },\n )\n )\n\n variables_configs: Dict[str, Dict] = self._profiler_config.get(\n \"variables\", {}\n )\n variables: Optional[ParameterContainer] = None\n\n if variables_configs:\n variables = build_parameter_container_for_variables(\n variables_configs=variables_configs\n )\n\n self._rules.append(\n Rule(\n name=rule_name,\n domain_builder=domain_builder,\n parameter_builders=parameter_builders,\n expectation_configuration_builders=expectation_configuration_builders,\n variables=variables,\n )\n )\n\n def profile(\n self,\n *,\n expectation_suite_name: Optional[str] = None,\n include_citation: bool = True,\n ) -> ExpectationSuite:\n \"\"\"\n Args:\n :param expectation_suite_name: A name for returned Expectation suite.\n :param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler\n :return: Set of rule evaluation results in the form of an ExpectationSuite\n \"\"\"\n if expectation_suite_name is None:\n expectation_suite_name = (\n f\"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}\"\n )\n\n expectation_suite: ExpectationSuite = ExpectationSuite(\n expectation_suite_name=expectation_suite_name\n )\n\n if include_citation:\n expectation_suite.add_citation(\n comment=\"Suite created by Rule-Based Profiler with the following config\",\n profiler_config=self._profiler_config,\n )\n\n rule: Rule\n for rule in self._rules:\n expectation_configurations: List[ExpectationConfiguration] = rule.generate()\n expectation_configuration: ExpectationConfiguration\n for expectation_configuration in expectation_configurations:\n expectation_suite.add_expectation(\n expectation_configuration=expectation_configuration\n )\n\n return expectation_suite\n", "path": "great_expectations/rule_based_profiler/profiler.py"}]} | 1,712 | 493 |
gh_patches_debug_36047 | rasdani/github-patches | git_diff | ivy-llc__ivy-15973 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Sparse Array Functions to Paddle Backend
Add [Sparse Array Functions](https://www.paddlepaddle.org.cn/documentation/docs/en/api/index\_en.html) to Paddle backend
\_
>Please keep in mind that the proper way to link an issue to this list is to comment "- [ ] #issue\_number" while the issue's title only includes the name of the function you've chosen.
\_
## Experimental
- [x] is\_native\_sparse\_array
- [x] native\_sparse\_array
- [x] native\_sparse\_array\_to\_indices\_values\_and\_shape
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/backends/paddle/experimental/sparse_array.py`
Content:
```
1 from ivy.utils.exceptions import IvyNotImplementedException
2 import paddle
3
4
5 def is_native_sparse_array(x: paddle.Tensor) -> bool:
6 return x.is_sparse_coo() or x.is_sparse_csr()
7
8
9 def native_sparse_array(
10 data=None,
11 *,
12 coo_indices=None,
13 crow_indices=None,
14 col_indices=None,
15 ccol_indices=None,
16 row_indices=None,
17 values=None,
18 dense_shape=None,
19 format="coo",
20 ):
21 raise IvyNotImplementedException()
22
23
24 def native_sparse_array_to_indices_values_and_shape(x):
25 raise IvyNotImplementedException()
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/backends/paddle/experimental/sparse_array.py b/ivy/functional/backends/paddle/experimental/sparse_array.py
--- a/ivy/functional/backends/paddle/experimental/sparse_array.py
+++ b/ivy/functional/backends/paddle/experimental/sparse_array.py
@@ -1,11 +1,26 @@
+import ivy
+from ivy.functional.ivy.experimental.sparse_array import (
+ _verify_coo_components,
+ _verify_csr_components,
+ _is_data_not_indices_values_and_shape,
+)
+from ivy.func_wrapper import (
+ with_unsupported_device_and_dtypes,
+)
from ivy.utils.exceptions import IvyNotImplementedException
import paddle
+# local
+from .. import backend_version
+
def is_native_sparse_array(x: paddle.Tensor) -> bool:
return x.is_sparse_coo() or x.is_sparse_csr()
+@with_unsupported_device_and_dtypes(
+ {"2.4.2 and below": {"cpu": ("int8",)}}, backend_version
+)
def native_sparse_array(
data=None,
*,
@@ -17,9 +32,67 @@
values=None,
dense_shape=None,
format="coo",
-):
- raise IvyNotImplementedException()
+) -> paddle.Tensor:
+ format = format.lower()
+
+ if format not in ["coo", "csr"]:
+ raise IvyNotImplementedException(
+ "paddle only supports 'coo' and 'csr' sparse formats."
+ )
+
+ if _is_data_not_indices_values_and_shape(
+ data,
+ coo_indices,
+ crow_indices,
+ col_indices,
+ ccol_indices,
+ row_indices,
+ values,
+ dense_shape,
+ ):
+ ivy.utils.assertions.check_true(
+ ivy.is_native_sparse_array(data), message="not a sparse array"
+ )
+ return data
+
+ if format == "coo":
+ _verify_coo_components(
+ indices=coo_indices, values=values, dense_shape=dense_shape
+ )
+ return paddle.sparse.sparse_coo_tensor(
+ indices=coo_indices,
+ values=values,
+ shape=dense_shape,
+ dtype=dtype,
+ place=device,
+ stop_gradient=not requires_grad,
+ )
+ else:
+ _verify_csr_components(
+ crow_indices=crow_indices,
+ col_indices=col_indices,
+ values=values,
+ dense_shape=dense_shape,
+ )
+ return paddle.sparse.sparse_csr_tensor(
+ crows=crow_indices,
+ cols=col_indices,
+ values=values,
+ shape=dense_shape,
+ dtype=dtype,
+ place=device,
+ stop_gradient=not requires_grad,
+ )
def native_sparse_array_to_indices_values_and_shape(x):
- raise IvyNotImplementedException()
+ if not is_native_sparse_array(x):
+ raise ivy.utils.exceptions.IvyException("not a Paddle Sparse Array")
+ if x.is_sparse_coo():
+ return {"coo_indices": x.indices()}, x.values(), x.shape
+ else:
+ return (
+ {"crow_indices": x.crows(), "col_indices": x.cols()},
+ x.values(),
+ x.shape,
+ )
| {"golden_diff": "diff --git a/ivy/functional/backends/paddle/experimental/sparse_array.py b/ivy/functional/backends/paddle/experimental/sparse_array.py\n--- a/ivy/functional/backends/paddle/experimental/sparse_array.py\n+++ b/ivy/functional/backends/paddle/experimental/sparse_array.py\n@@ -1,11 +1,26 @@\n+import ivy\n+from ivy.functional.ivy.experimental.sparse_array import (\n+ _verify_coo_components,\n+ _verify_csr_components,\n+ _is_data_not_indices_values_and_shape,\n+)\n+from ivy.func_wrapper import (\n+ with_unsupported_device_and_dtypes,\n+)\n from ivy.utils.exceptions import IvyNotImplementedException\n import paddle\n \n+# local\n+from .. import backend_version\n+\n \n def is_native_sparse_array(x: paddle.Tensor) -> bool:\n return x.is_sparse_coo() or x.is_sparse_csr()\n \n \n+@with_unsupported_device_and_dtypes(\n+ {\"2.4.2 and below\": {\"cpu\": (\"int8\",)}}, backend_version\n+)\n def native_sparse_array(\n data=None,\n *,\n@@ -17,9 +32,67 @@\n values=None,\n dense_shape=None,\n format=\"coo\",\n-):\n- raise IvyNotImplementedException()\n+) -> paddle.Tensor:\n+ format = format.lower()\n+\n+ if format not in [\"coo\", \"csr\"]:\n+ raise IvyNotImplementedException(\n+ \"paddle only supports 'coo' and 'csr' sparse formats.\"\n+ )\n+\n+ if _is_data_not_indices_values_and_shape(\n+ data,\n+ coo_indices,\n+ crow_indices,\n+ col_indices,\n+ ccol_indices,\n+ row_indices,\n+ values,\n+ dense_shape,\n+ ):\n+ ivy.utils.assertions.check_true(\n+ ivy.is_native_sparse_array(data), message=\"not a sparse array\"\n+ )\n+ return data\n+\n+ if format == \"coo\":\n+ _verify_coo_components(\n+ indices=coo_indices, values=values, dense_shape=dense_shape\n+ )\n+ return paddle.sparse.sparse_coo_tensor(\n+ indices=coo_indices,\n+ values=values,\n+ shape=dense_shape,\n+ dtype=dtype,\n+ place=device,\n+ stop_gradient=not requires_grad,\n+ )\n+ else:\n+ _verify_csr_components(\n+ crow_indices=crow_indices,\n+ col_indices=col_indices,\n+ values=values,\n+ dense_shape=dense_shape,\n+ )\n+ return paddle.sparse.sparse_csr_tensor(\n+ crows=crow_indices,\n+ cols=col_indices,\n+ values=values,\n+ shape=dense_shape,\n+ dtype=dtype,\n+ place=device,\n+ stop_gradient=not requires_grad,\n+ )\n \n \n def native_sparse_array_to_indices_values_and_shape(x):\n- raise IvyNotImplementedException()\n+ if not is_native_sparse_array(x):\n+ raise ivy.utils.exceptions.IvyException(\"not a Paddle Sparse Array\")\n+ if x.is_sparse_coo():\n+ return {\"coo_indices\": x.indices()}, x.values(), x.shape\n+ else:\n+ return (\n+ {\"crow_indices\": x.crows(), \"col_indices\": x.cols()},\n+ x.values(),\n+ x.shape,\n+ )\n", "issue": "Add Sparse Array Functions to Paddle Backend\nAdd [Sparse Array Functions](https://www.paddlepaddle.org.cn/documentation/docs/en/api/index\\_en.html) to Paddle backend\r\n\r\n\\_\r\n\r\n>Please keep in mind that the proper way to link an issue to this list is to comment \"- [ ] #issue\\_number\" while the issue's title only includes the name of the function you've chosen.\r\n\r\n\\_\r\n\r\n## Experimental\r\n\r\n- [x] is\\_native\\_sparse\\_array\r\n- [x] native\\_sparse\\_array\r\n- [x] native\\_sparse\\_array\\_to\\_indices\\_values\\_and\\_shape\n", "before_files": [{"content": "from ivy.utils.exceptions import IvyNotImplementedException\nimport paddle\n\n\ndef is_native_sparse_array(x: paddle.Tensor) -> bool:\n return x.is_sparse_coo() or x.is_sparse_csr()\n\n\ndef native_sparse_array(\n data=None,\n *,\n coo_indices=None,\n crow_indices=None,\n col_indices=None,\n ccol_indices=None,\n row_indices=None,\n values=None,\n dense_shape=None,\n format=\"coo\",\n):\n raise IvyNotImplementedException()\n\n\ndef native_sparse_array_to_indices_values_and_shape(x):\n raise IvyNotImplementedException()\n", "path": "ivy/functional/backends/paddle/experimental/sparse_array.py"}], "after_files": [{"content": "import ivy\nfrom ivy.functional.ivy.experimental.sparse_array import (\n _verify_coo_components,\n _verify_csr_components,\n _is_data_not_indices_values_and_shape,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_device_and_dtypes,\n)\nfrom ivy.utils.exceptions import IvyNotImplementedException\nimport paddle\n\n# local\nfrom .. import backend_version\n\n\ndef is_native_sparse_array(x: paddle.Tensor) -> bool:\n return x.is_sparse_coo() or x.is_sparse_csr()\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"int8\",)}}, backend_version\n)\ndef native_sparse_array(\n data=None,\n *,\n coo_indices=None,\n crow_indices=None,\n col_indices=None,\n ccol_indices=None,\n row_indices=None,\n values=None,\n dense_shape=None,\n format=\"coo\",\n) -> paddle.Tensor:\n format = format.lower()\n\n if format not in [\"coo\", \"csr\"]:\n raise IvyNotImplementedException(\n \"paddle only supports 'coo' and 'csr' sparse formats.\"\n )\n\n if _is_data_not_indices_values_and_shape(\n data,\n coo_indices,\n crow_indices,\n col_indices,\n ccol_indices,\n row_indices,\n values,\n dense_shape,\n ):\n ivy.utils.assertions.check_true(\n ivy.is_native_sparse_array(data), message=\"not a sparse array\"\n )\n return data\n\n if format == \"coo\":\n _verify_coo_components(\n indices=coo_indices, values=values, dense_shape=dense_shape\n )\n return paddle.sparse.sparse_coo_tensor(\n indices=coo_indices,\n values=values,\n shape=dense_shape,\n dtype=dtype,\n place=device,\n stop_gradient=not requires_grad,\n )\n else:\n _verify_csr_components(\n crow_indices=crow_indices,\n col_indices=col_indices,\n values=values,\n dense_shape=dense_shape,\n )\n return paddle.sparse.sparse_csr_tensor(\n crows=crow_indices,\n cols=col_indices,\n values=values,\n shape=dense_shape,\n dtype=dtype,\n place=device,\n stop_gradient=not requires_grad,\n )\n\n\ndef native_sparse_array_to_indices_values_and_shape(x):\n if not is_native_sparse_array(x):\n raise ivy.utils.exceptions.IvyException(\"not a Paddle Sparse Array\")\n if x.is_sparse_coo():\n return {\"coo_indices\": x.indices()}, x.values(), x.shape\n else:\n return (\n {\"crow_indices\": x.crows(), \"col_indices\": x.cols()},\n x.values(),\n x.shape,\n )\n", "path": "ivy/functional/backends/paddle/experimental/sparse_array.py"}]} | 566 | 742 |
gh_patches_debug_30271 | rasdani/github-patches | git_diff | rasterio__rasterio-886 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rio overview --ls should not modify file
Currently running `rio overview --ls` to inspect the overviews modifies the file. We could detect the `--ls` option and open in read-only mode.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/rio/overview.py`
Content:
```
1 # coding: utf-8
2 """Manage overviews of a dataset."""
3
4 from functools import reduce
5 import logging
6 import operator
7
8 import click
9
10 from . import options
11 import rasterio
12 from rasterio.enums import Resampling
13
14
15 def build_handler(ctx, param, value):
16 if value:
17 try:
18 if '^' in value:
19 base, exp_range = value.split('^')
20 exp_min, exp_max = (int(v) for v in exp_range.split('..'))
21 value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]
22 else:
23 value = [int(v) for v in value.split(',')]
24 except Exception:
25 raise click.BadParameter(u"must match 'n,n,n,…' or 'n^n..n'.")
26 return value
27
28
29 @click.command('overview', short_help="Construct overviews in an existing dataset.")
30 @options.file_in_arg
31 @click.option('--build', callback=build_handler, metavar=u"f1,f2,…|b^min..max",
32 help="A sequence of decimation factors specied as "
33 "comma-separated list of numbers or a base and range of "
34 "exponents.")
35 @click.option('--ls', help="Print the overviews for each band.",
36 is_flag=True, default=False)
37 @click.option('--rebuild', help="Reconstruct existing overviews.",
38 is_flag=True, default=False)
39 @click.option('--resampling', help="Resampling algorithm.",
40 type=click.Choice(
41 [it.name for it in Resampling if it.value in [0, 2, 5, 6, 7]]),
42 default='nearest', show_default=True)
43 @click.pass_context
44 def overview(ctx, input, build, ls, rebuild, resampling):
45 """Construct overviews in an existing dataset.
46
47 A pyramid of overviews computed once and stored in the dataset can
48 improve performance in some applications.
49
50 The decimation levels at which to build overviews can be specified as
51 a comma separated list
52
53 rio overview --build 2,4,8,16
54
55 or a base and range of exponents.
56
57 rio overview --build 2^1..4
58
59 Note that overviews can not currently be removed and are not
60 automatically updated when the dataset's primary bands are
61 modified.
62
63 Information about existing overviews can be printed using the --ls
64 option.
65
66 rio overview --ls
67
68 """
69 with ctx.obj['env']:
70 with rasterio.open(input, 'r+') as dst:
71
72 if ls:
73 resampling_method = dst.tags(
74 ns='rio_overview').get('resampling') or 'unknown'
75
76 click.echo("Overview factors:")
77 for idx in dst.indexes:
78 click.echo(" Band %d: %s (method: '%s')" % (
79 idx, dst.overviews(idx) or 'None', resampling_method))
80
81 elif rebuild:
82 # Build the same overviews for all bands.
83 factors = reduce(
84 operator.or_,
85 [set(dst.overviews(i)) for i in dst.indexes])
86
87 # Attempt to recover the resampling method from dataset tags.
88 resampling_method = dst.tags(
89 ns='rio_overview').get('resampling') or resampling
90
91 dst.build_overviews(
92 list(factors), Resampling[resampling_method])
93
94 elif build:
95 dst.build_overviews(build, Resampling[resampling])
96
97 # Save the resampling method to a tag.
98 dst.update_tags(ns='rio_overview', resampling=resampling)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/rio/overview.py b/rasterio/rio/overview.py
--- a/rasterio/rio/overview.py
+++ b/rasterio/rio/overview.py
@@ -67,9 +67,8 @@
"""
with ctx.obj['env']:
- with rasterio.open(input, 'r+') as dst:
-
- if ls:
+ if ls:
+ with rasterio.open(input, 'r') as dst:
resampling_method = dst.tags(
ns='rio_overview').get('resampling') or 'unknown'
@@ -77,8 +76,8 @@
for idx in dst.indexes:
click.echo(" Band %d: %s (method: '%s')" % (
idx, dst.overviews(idx) or 'None', resampling_method))
-
- elif rebuild:
+ elif rebuild:
+ with rasterio.open(input, 'r+') as dst:
# Build the same overviews for all bands.
factors = reduce(
operator.or_,
@@ -91,8 +90,13 @@
dst.build_overviews(
list(factors), Resampling[resampling_method])
- elif build:
+ elif build:
+ with rasterio.open(input, 'r+') as dst:
dst.build_overviews(build, Resampling[resampling])
# Save the resampling method to a tag.
dst.update_tags(ns='rio_overview', resampling=resampling)
+
+ else:
+ raise click.UsageError(
+ "Please specify --ls, --rebuild, or --build ...")
| {"golden_diff": "diff --git a/rasterio/rio/overview.py b/rasterio/rio/overview.py\n--- a/rasterio/rio/overview.py\n+++ b/rasterio/rio/overview.py\n@@ -67,9 +67,8 @@\n \n \"\"\"\n with ctx.obj['env']:\n- with rasterio.open(input, 'r+') as dst:\n-\n- if ls:\n+ if ls:\n+ with rasterio.open(input, 'r') as dst:\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or 'unknown'\n \n@@ -77,8 +76,8 @@\n for idx in dst.indexes:\n click.echo(\" Band %d: %s (method: '%s')\" % (\n idx, dst.overviews(idx) or 'None', resampling_method))\n-\n- elif rebuild:\n+ elif rebuild:\n+ with rasterio.open(input, 'r+') as dst:\n # Build the same overviews for all bands.\n factors = reduce(\n operator.or_,\n@@ -91,8 +90,13 @@\n dst.build_overviews(\n list(factors), Resampling[resampling_method])\n \n- elif build:\n+ elif build:\n+ with rasterio.open(input, 'r+') as dst:\n dst.build_overviews(build, Resampling[resampling])\n \n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n+\n+ else:\n+ raise click.UsageError(\n+ \"Please specify --ls, --rebuild, or --build ...\")\n", "issue": "rio overview --ls should not modify file\nCurrently running `rio overview --ls` to inspect the overviews modifies the file. We could detect the `--ls` option and open in read-only mode. \n\n", "before_files": [{"content": "# coding: utf-8\n\"\"\"Manage overviews of a dataset.\"\"\"\n\nfrom functools import reduce\nimport logging\nimport operator\n\nimport click\n\nfrom . import options\nimport rasterio\nfrom rasterio.enums import Resampling\n\n\ndef build_handler(ctx, param, value):\n if value:\n try:\n if '^' in value:\n base, exp_range = value.split('^')\n exp_min, exp_max = (int(v) for v in exp_range.split('..'))\n value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]\n else:\n value = [int(v) for v in value.split(',')]\n except Exception:\n raise click.BadParameter(u\"must match 'n,n,n,\u2026' or 'n^n..n'.\")\n return value\n\n\[email protected]('overview', short_help=\"Construct overviews in an existing dataset.\")\[email protected]_in_arg\[email protected]('--build', callback=build_handler, metavar=u\"f1,f2,\u2026|b^min..max\",\n help=\"A sequence of decimation factors specied as \"\n \"comma-separated list of numbers or a base and range of \"\n \"exponents.\")\[email protected]('--ls', help=\"Print the overviews for each band.\",\n is_flag=True, default=False)\[email protected]('--rebuild', help=\"Reconstruct existing overviews.\",\n is_flag=True, default=False)\[email protected]('--resampling', help=\"Resampling algorithm.\",\n type=click.Choice(\n [it.name for it in Resampling if it.value in [0, 2, 5, 6, 7]]),\n default='nearest', show_default=True)\[email protected]_context\ndef overview(ctx, input, build, ls, rebuild, resampling):\n \"\"\"Construct overviews in an existing dataset.\n\n A pyramid of overviews computed once and stored in the dataset can\n improve performance in some applications.\n\n The decimation levels at which to build overviews can be specified as\n a comma separated list\n\n rio overview --build 2,4,8,16\n\n or a base and range of exponents.\n\n rio overview --build 2^1..4\n\n Note that overviews can not currently be removed and are not\n automatically updated when the dataset's primary bands are\n modified.\n\n Information about existing overviews can be printed using the --ls\n option.\n\n rio overview --ls\n\n \"\"\"\n with ctx.obj['env']:\n with rasterio.open(input, 'r+') as dst:\n\n if ls:\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or 'unknown'\n\n click.echo(\"Overview factors:\")\n for idx in dst.indexes:\n click.echo(\" Band %d: %s (method: '%s')\" % (\n idx, dst.overviews(idx) or 'None', resampling_method))\n\n elif rebuild:\n # Build the same overviews for all bands.\n factors = reduce(\n operator.or_,\n [set(dst.overviews(i)) for i in dst.indexes])\n\n # Attempt to recover the resampling method from dataset tags.\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or resampling\n\n dst.build_overviews(\n list(factors), Resampling[resampling_method])\n\n elif build:\n dst.build_overviews(build, Resampling[resampling])\n\n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n", "path": "rasterio/rio/overview.py"}], "after_files": [{"content": "# coding: utf-8\n\"\"\"Manage overviews of a dataset.\"\"\"\n\nfrom functools import reduce\nimport logging\nimport operator\n\nimport click\n\nfrom . import options\nimport rasterio\nfrom rasterio.enums import Resampling\n\n\ndef build_handler(ctx, param, value):\n if value:\n try:\n if '^' in value:\n base, exp_range = value.split('^')\n exp_min, exp_max = (int(v) for v in exp_range.split('..'))\n value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]\n else:\n value = [int(v) for v in value.split(',')]\n except Exception:\n raise click.BadParameter(u\"must match 'n,n,n,\u2026' or 'n^n..n'.\")\n return value\n\n\[email protected]('overview', short_help=\"Construct overviews in an existing dataset.\")\[email protected]_in_arg\[email protected]('--build', callback=build_handler, metavar=u\"f1,f2,\u2026|b^min..max\",\n help=\"A sequence of decimation factors specied as \"\n \"comma-separated list of numbers or a base and range of \"\n \"exponents.\")\[email protected]('--ls', help=\"Print the overviews for each band.\",\n is_flag=True, default=False)\[email protected]('--rebuild', help=\"Reconstruct existing overviews.\",\n is_flag=True, default=False)\[email protected]('--resampling', help=\"Resampling algorithm.\",\n type=click.Choice(\n [it.name for it in Resampling if it.value in [0, 2, 5, 6, 7]]),\n default='nearest', show_default=True)\[email protected]_context\ndef overview(ctx, input, build, ls, rebuild, resampling):\n \"\"\"Construct overviews in an existing dataset.\n\n A pyramid of overviews computed once and stored in the dataset can\n improve performance in some applications.\n\n The decimation levels at which to build overviews can be specified as\n a comma separated list\n\n rio overview --build 2,4,8,16\n\n or a base and range of exponents.\n\n rio overview --build 2^1..4\n\n Note that overviews can not currently be removed and are not\n automatically updated when the dataset's primary bands are\n modified.\n\n Information about existing overviews can be printed using the --ls\n option.\n\n rio overview --ls\n\n \"\"\"\n with ctx.obj['env']:\n if ls:\n with rasterio.open(input, 'r') as dst:\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or 'unknown'\n\n click.echo(\"Overview factors:\")\n for idx in dst.indexes:\n click.echo(\" Band %d: %s (method: '%s')\" % (\n idx, dst.overviews(idx) or 'None', resampling_method))\n elif rebuild:\n with rasterio.open(input, 'r+') as dst:\n # Build the same overviews for all bands.\n factors = reduce(\n operator.or_,\n [set(dst.overviews(i)) for i in dst.indexes])\n\n # Attempt to recover the resampling method from dataset tags.\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or resampling\n\n dst.build_overviews(\n list(factors), Resampling[resampling_method])\n\n elif build:\n with rasterio.open(input, 'r+') as dst:\n dst.build_overviews(build, Resampling[resampling])\n\n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n\n else:\n raise click.UsageError(\n \"Please specify --ls, --rebuild, or --build ...\")\n", "path": "rasterio/rio/overview.py"}]} | 1,269 | 353 |
gh_patches_debug_10366 | rasdani/github-patches | git_diff | python-discord__bot-774 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reduce logging level for tag cooldowns
Sentry Issue: [BOT-19](https://sentry.io/organizations/python-discord/issues/1527377135/?referrer=github_integration)
```
<REDACTED> tried to get the 'ask' tag, but the tag is on cooldown. Cooldown ends in 2.8 seconds.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/tags.py`
Content:
```
1 import logging
2 import re
3 import time
4 from typing import Dict, List, Optional
5
6 from discord import Colour, Embed
7 from discord.ext.commands import Cog, Context, group
8
9 from bot.bot import Bot
10 from bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles
11 from bot.converters import TagContentConverter, TagNameConverter
12 from bot.decorators import with_role
13 from bot.pagination import LinePaginator
14
15 log = logging.getLogger(__name__)
16
17 TEST_CHANNELS = (
18 Channels.devtest,
19 Channels.bot,
20 Channels.helpers
21 )
22
23 REGEX_NON_ALPHABET = re.compile(r"[^a-z]", re.MULTILINE & re.IGNORECASE)
24
25
26 class Tags(Cog):
27 """Save new tags and fetch existing tags."""
28
29 def __init__(self, bot: Bot):
30 self.bot = bot
31 self.tag_cooldowns = {}
32
33 self._cache = {}
34 self._last_fetch: float = 0.0
35
36 async def _get_tags(self, is_forced: bool = False) -> None:
37 """Get all tags."""
38 # refresh only when there's a more than 5m gap from last call.
39 time_now: float = time.time()
40 if is_forced or not self._last_fetch or time_now - self._last_fetch > 5 * 60:
41 tags = await self.bot.api_client.get('bot/tags')
42 self._cache = {tag['title'].lower(): tag for tag in tags}
43 self._last_fetch = time_now
44
45 @staticmethod
46 def _fuzzy_search(search: str, target: str) -> int:
47 """A simple scoring algorithm based on how many letters are found / total, with order in mind."""
48 current, index = 0, 0
49 _search = REGEX_NON_ALPHABET.sub('', search.lower())
50 _targets = iter(REGEX_NON_ALPHABET.split(target.lower()))
51 _target = next(_targets)
52 try:
53 while True:
54 while index < len(_target) and _search[current] == _target[index]:
55 current += 1
56 index += 1
57 index, _target = 0, next(_targets)
58 except (StopIteration, IndexError):
59 pass
60 return current / len(_search) * 100
61
62 def _get_suggestions(self, tag_name: str, thresholds: Optional[List[int]] = None) -> List[str]:
63 """Return a list of suggested tags."""
64 scores: Dict[str, int] = {
65 tag_title: Tags._fuzzy_search(tag_name, tag['title'])
66 for tag_title, tag in self._cache.items()
67 }
68
69 thresholds = thresholds or [100, 90, 80, 70, 60]
70
71 for threshold in thresholds:
72 suggestions = [
73 self._cache[tag_title]
74 for tag_title, matching_score in scores.items()
75 if matching_score >= threshold
76 ]
77 if suggestions:
78 return suggestions
79
80 return []
81
82 async def _get_tag(self, tag_name: str) -> list:
83 """Get a specific tag."""
84 await self._get_tags()
85 found = [self._cache.get(tag_name.lower(), None)]
86 if not found[0]:
87 return self._get_suggestions(tag_name)
88 return found
89
90 @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)
91 async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
92 """Show all known tags, a single tag, or run a subcommand."""
93 await ctx.invoke(self.get_command, tag_name=tag_name)
94
95 @tags_group.command(name='get', aliases=('show', 'g'))
96 async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
97 """Get a specified tag, or a list of all tags if no tag is specified."""
98 def _command_on_cooldown(tag_name: str) -> bool:
99 """
100 Check if the command is currently on cooldown, on a per-tag, per-channel basis.
101
102 The cooldown duration is set in constants.py.
103 """
104 now = time.time()
105
106 cooldown_conditions = (
107 tag_name
108 and tag_name in self.tag_cooldowns
109 and (now - self.tag_cooldowns[tag_name]["time"]) < Cooldowns.tags
110 and self.tag_cooldowns[tag_name]["channel"] == ctx.channel.id
111 )
112
113 if cooldown_conditions:
114 return True
115 return False
116
117 if _command_on_cooldown(tag_name):
118 time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name]["time"])
119 log.warning(f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
120 f"Cooldown ends in {time_left:.1f} seconds.")
121 return
122
123 await self._get_tags()
124
125 if tag_name is not None:
126 founds = await self._get_tag(tag_name)
127
128 if len(founds) == 1:
129 tag = founds[0]
130 if ctx.channel.id not in TEST_CHANNELS:
131 self.tag_cooldowns[tag_name] = {
132 "time": time.time(),
133 "channel": ctx.channel.id
134 }
135 await ctx.send(embed=Embed.from_dict(tag['embed']))
136 elif founds and len(tag_name) >= 3:
137 await ctx.send(embed=Embed(
138 title='Did you mean ...',
139 description='\n'.join(tag['title'] for tag in founds[:10])
140 ))
141
142 else:
143 tags = self._cache.values()
144 if not tags:
145 await ctx.send(embed=Embed(
146 description="**There are no tags in the database!**",
147 colour=Colour.red()
148 ))
149 else:
150 embed: Embed = Embed(title="**Current tags**")
151 await LinePaginator.paginate(
152 sorted(f"**»** {tag['title']}" for tag in tags),
153 ctx,
154 embed,
155 footer_text="To show a tag, type !tags <tagname>.",
156 empty=False,
157 max_lines=15
158 )
159
160 @tags_group.command(name='set', aliases=('add', 's'))
161 @with_role(*MODERATION_ROLES)
162 async def set_command(
163 self,
164 ctx: Context,
165 tag_name: TagNameConverter,
166 *,
167 tag_content: TagContentConverter,
168 ) -> None:
169 """Create a new tag."""
170 body = {
171 'title': tag_name.lower().strip(),
172 'embed': {
173 'title': tag_name,
174 'description': tag_content
175 }
176 }
177
178 await self.bot.api_client.post('bot/tags', json=body)
179 self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')
180
181 log.debug(f"{ctx.author} successfully added the following tag to our database: \n"
182 f"tag_name: {tag_name}\n"
183 f"tag_content: '{tag_content}'\n")
184
185 await ctx.send(embed=Embed(
186 title="Tag successfully added",
187 description=f"**{tag_name}** added to tag database.",
188 colour=Colour.blurple()
189 ))
190
191 @tags_group.command(name='edit', aliases=('e', ))
192 @with_role(*MODERATION_ROLES)
193 async def edit_command(
194 self,
195 ctx: Context,
196 tag_name: TagNameConverter,
197 *,
198 tag_content: TagContentConverter,
199 ) -> None:
200 """Edit an existing tag."""
201 body = {
202 'embed': {
203 'title': tag_name,
204 'description': tag_content
205 }
206 }
207
208 await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)
209 self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')
210
211 log.debug(f"{ctx.author} successfully edited the following tag in our database: \n"
212 f"tag_name: {tag_name}\n"
213 f"tag_content: '{tag_content}'\n")
214
215 await ctx.send(embed=Embed(
216 title="Tag successfully edited",
217 description=f"**{tag_name}** edited in the database.",
218 colour=Colour.blurple()
219 ))
220
221 @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))
222 @with_role(Roles.admin, Roles.owner)
223 async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:
224 """Remove a tag from the database."""
225 await self.bot.api_client.delete(f'bot/tags/{tag_name}')
226 self._cache.pop(tag_name.lower(), None)
227
228 log.debug(f"{ctx.author} successfully deleted the tag called '{tag_name}'")
229 await ctx.send(embed=Embed(
230 title=tag_name,
231 description=f"Tag successfully removed: {tag_name}.",
232 colour=Colour.blurple()
233 ))
234
235
236 def setup(bot: Bot) -> None:
237 """Load the Tags cog."""
238 bot.add_cog(Tags(bot))
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py
--- a/bot/cogs/tags.py
+++ b/bot/cogs/tags.py
@@ -116,8 +116,10 @@
if _command_on_cooldown(tag_name):
time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name]["time"])
- log.warning(f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
- f"Cooldown ends in {time_left:.1f} seconds.")
+ log.info(
+ f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
+ f"Cooldown ends in {time_left:.1f} seconds."
+ )
return
await self._get_tags()
| {"golden_diff": "diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py\n--- a/bot/cogs/tags.py\n+++ b/bot/cogs/tags.py\n@@ -116,8 +116,10 @@\n \n if _command_on_cooldown(tag_name):\n time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name][\"time\"])\n- log.warning(f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n- f\"Cooldown ends in {time_left:.1f} seconds.\")\n+ log.info(\n+ f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n+ f\"Cooldown ends in {time_left:.1f} seconds.\"\n+ )\n return\n \n await self._get_tags()\n", "issue": "Reduce logging level for tag cooldowns\nSentry Issue: [BOT-19](https://sentry.io/organizations/python-discord/issues/1527377135/?referrer=github_integration)\n\n```\n<REDACTED> tried to get the 'ask' tag, but the tag is on cooldown. Cooldown ends in 2.8 seconds.\n```\n", "before_files": [{"content": "import logging\nimport re\nimport time\nfrom typing import Dict, List, Optional\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Cog, Context, group\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles\nfrom bot.converters import TagContentConverter, TagNameConverter\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\nTEST_CHANNELS = (\n Channels.devtest,\n Channels.bot,\n Channels.helpers\n)\n\nREGEX_NON_ALPHABET = re.compile(r\"[^a-z]\", re.MULTILINE & re.IGNORECASE)\n\n\nclass Tags(Cog):\n \"\"\"Save new tags and fetch existing tags.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.tag_cooldowns = {}\n\n self._cache = {}\n self._last_fetch: float = 0.0\n\n async def _get_tags(self, is_forced: bool = False) -> None:\n \"\"\"Get all tags.\"\"\"\n # refresh only when there's a more than 5m gap from last call.\n time_now: float = time.time()\n if is_forced or not self._last_fetch or time_now - self._last_fetch > 5 * 60:\n tags = await self.bot.api_client.get('bot/tags')\n self._cache = {tag['title'].lower(): tag for tag in tags}\n self._last_fetch = time_now\n\n @staticmethod\n def _fuzzy_search(search: str, target: str) -> int:\n \"\"\"A simple scoring algorithm based on how many letters are found / total, with order in mind.\"\"\"\n current, index = 0, 0\n _search = REGEX_NON_ALPHABET.sub('', search.lower())\n _targets = iter(REGEX_NON_ALPHABET.split(target.lower()))\n _target = next(_targets)\n try:\n while True:\n while index < len(_target) and _search[current] == _target[index]:\n current += 1\n index += 1\n index, _target = 0, next(_targets)\n except (StopIteration, IndexError):\n pass\n return current / len(_search) * 100\n\n def _get_suggestions(self, tag_name: str, thresholds: Optional[List[int]] = None) -> List[str]:\n \"\"\"Return a list of suggested tags.\"\"\"\n scores: Dict[str, int] = {\n tag_title: Tags._fuzzy_search(tag_name, tag['title'])\n for tag_title, tag in self._cache.items()\n }\n\n thresholds = thresholds or [100, 90, 80, 70, 60]\n\n for threshold in thresholds:\n suggestions = [\n self._cache[tag_title]\n for tag_title, matching_score in scores.items()\n if matching_score >= threshold\n ]\n if suggestions:\n return suggestions\n\n return []\n\n async def _get_tag(self, tag_name: str) -> list:\n \"\"\"Get a specific tag.\"\"\"\n await self._get_tags()\n found = [self._cache.get(tag_name.lower(), None)]\n if not found[0]:\n return self._get_suggestions(tag_name)\n return found\n\n @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)\n async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Show all known tags, a single tag, or run a subcommand.\"\"\"\n await ctx.invoke(self.get_command, tag_name=tag_name)\n\n @tags_group.command(name='get', aliases=('show', 'g'))\n async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Get a specified tag, or a list of all tags if no tag is specified.\"\"\"\n def _command_on_cooldown(tag_name: str) -> bool:\n \"\"\"\n Check if the command is currently on cooldown, on a per-tag, per-channel basis.\n\n The cooldown duration is set in constants.py.\n \"\"\"\n now = time.time()\n\n cooldown_conditions = (\n tag_name\n and tag_name in self.tag_cooldowns\n and (now - self.tag_cooldowns[tag_name][\"time\"]) < Cooldowns.tags\n and self.tag_cooldowns[tag_name][\"channel\"] == ctx.channel.id\n )\n\n if cooldown_conditions:\n return True\n return False\n\n if _command_on_cooldown(tag_name):\n time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name][\"time\"])\n log.warning(f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n f\"Cooldown ends in {time_left:.1f} seconds.\")\n return\n\n await self._get_tags()\n\n if tag_name is not None:\n founds = await self._get_tag(tag_name)\n\n if len(founds) == 1:\n tag = founds[0]\n if ctx.channel.id not in TEST_CHANNELS:\n self.tag_cooldowns[tag_name] = {\n \"time\": time.time(),\n \"channel\": ctx.channel.id\n }\n await ctx.send(embed=Embed.from_dict(tag['embed']))\n elif founds and len(tag_name) >= 3:\n await ctx.send(embed=Embed(\n title='Did you mean ...',\n description='\\n'.join(tag['title'] for tag in founds[:10])\n ))\n\n else:\n tags = self._cache.values()\n if not tags:\n await ctx.send(embed=Embed(\n description=\"**There are no tags in the database!**\",\n colour=Colour.red()\n ))\n else:\n embed: Embed = Embed(title=\"**Current tags**\")\n await LinePaginator.paginate(\n sorted(f\"**\u00bb** {tag['title']}\" for tag in tags),\n ctx,\n embed,\n footer_text=\"To show a tag, type !tags <tagname>.\",\n empty=False,\n max_lines=15\n )\n\n @tags_group.command(name='set', aliases=('add', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Create a new tag.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.post('bot/tags', json=body)\n self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully added the following tag to our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully added\",\n description=f\"**{tag_name}** added to tag database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='edit', aliases=('e', ))\n @with_role(*MODERATION_ROLES)\n async def edit_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Edit an existing tag.\"\"\"\n body = {\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)\n self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully edited the following tag in our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully edited\",\n description=f\"**{tag_name}** edited in the database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admin, Roles.owner)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n \"\"\"Remove a tag from the database.\"\"\"\n await self.bot.api_client.delete(f'bot/tags/{tag_name}')\n self._cache.pop(tag_name.lower(), None)\n\n log.debug(f\"{ctx.author} successfully deleted the tag called '{tag_name}'\")\n await ctx.send(embed=Embed(\n title=tag_name,\n description=f\"Tag successfully removed: {tag_name}.\",\n colour=Colour.blurple()\n ))\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Tags cog.\"\"\"\n bot.add_cog(Tags(bot))\n", "path": "bot/cogs/tags.py"}], "after_files": [{"content": "import logging\nimport re\nimport time\nfrom typing import Dict, List, Optional\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Cog, Context, group\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles\nfrom bot.converters import TagContentConverter, TagNameConverter\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\nTEST_CHANNELS = (\n Channels.devtest,\n Channels.bot,\n Channels.helpers\n)\n\nREGEX_NON_ALPHABET = re.compile(r\"[^a-z]\", re.MULTILINE & re.IGNORECASE)\n\n\nclass Tags(Cog):\n \"\"\"Save new tags and fetch existing tags.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.tag_cooldowns = {}\n\n self._cache = {}\n self._last_fetch: float = 0.0\n\n async def _get_tags(self, is_forced: bool = False) -> None:\n \"\"\"Get all tags.\"\"\"\n # refresh only when there's a more than 5m gap from last call.\n time_now: float = time.time()\n if is_forced or not self._last_fetch or time_now - self._last_fetch > 5 * 60:\n tags = await self.bot.api_client.get('bot/tags')\n self._cache = {tag['title'].lower(): tag for tag in tags}\n self._last_fetch = time_now\n\n @staticmethod\n def _fuzzy_search(search: str, target: str) -> int:\n \"\"\"A simple scoring algorithm based on how many letters are found / total, with order in mind.\"\"\"\n current, index = 0, 0\n _search = REGEX_NON_ALPHABET.sub('', search.lower())\n _targets = iter(REGEX_NON_ALPHABET.split(target.lower()))\n _target = next(_targets)\n try:\n while True:\n while index < len(_target) and _search[current] == _target[index]:\n current += 1\n index += 1\n index, _target = 0, next(_targets)\n except (StopIteration, IndexError):\n pass\n return current / len(_search) * 100\n\n def _get_suggestions(self, tag_name: str, thresholds: Optional[List[int]] = None) -> List[str]:\n \"\"\"Return a list of suggested tags.\"\"\"\n scores: Dict[str, int] = {\n tag_title: Tags._fuzzy_search(tag_name, tag['title'])\n for tag_title, tag in self._cache.items()\n }\n\n thresholds = thresholds or [100, 90, 80, 70, 60]\n\n for threshold in thresholds:\n suggestions = [\n self._cache[tag_title]\n for tag_title, matching_score in scores.items()\n if matching_score >= threshold\n ]\n if suggestions:\n return suggestions\n\n return []\n\n async def _get_tag(self, tag_name: str) -> list:\n \"\"\"Get a specific tag.\"\"\"\n await self._get_tags()\n found = [self._cache.get(tag_name.lower(), None)]\n if not found[0]:\n return self._get_suggestions(tag_name)\n return found\n\n @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)\n async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Show all known tags, a single tag, or run a subcommand.\"\"\"\n await ctx.invoke(self.get_command, tag_name=tag_name)\n\n @tags_group.command(name='get', aliases=('show', 'g'))\n async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Get a specified tag, or a list of all tags if no tag is specified.\"\"\"\n def _command_on_cooldown(tag_name: str) -> bool:\n \"\"\"\n Check if the command is currently on cooldown, on a per-tag, per-channel basis.\n\n The cooldown duration is set in constants.py.\n \"\"\"\n now = time.time()\n\n cooldown_conditions = (\n tag_name\n and tag_name in self.tag_cooldowns\n and (now - self.tag_cooldowns[tag_name][\"time\"]) < Cooldowns.tags\n and self.tag_cooldowns[tag_name][\"channel\"] == ctx.channel.id\n )\n\n if cooldown_conditions:\n return True\n return False\n\n if _command_on_cooldown(tag_name):\n time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name][\"time\"])\n log.info(\n f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n f\"Cooldown ends in {time_left:.1f} seconds.\"\n )\n return\n\n await self._get_tags()\n\n if tag_name is not None:\n founds = await self._get_tag(tag_name)\n\n if len(founds) == 1:\n tag = founds[0]\n if ctx.channel.id not in TEST_CHANNELS:\n self.tag_cooldowns[tag_name] = {\n \"time\": time.time(),\n \"channel\": ctx.channel.id\n }\n await ctx.send(embed=Embed.from_dict(tag['embed']))\n elif founds and len(tag_name) >= 3:\n await ctx.send(embed=Embed(\n title='Did you mean ...',\n description='\\n'.join(tag['title'] for tag in founds[:10])\n ))\n\n else:\n tags = self._cache.values()\n if not tags:\n await ctx.send(embed=Embed(\n description=\"**There are no tags in the database!**\",\n colour=Colour.red()\n ))\n else:\n embed: Embed = Embed(title=\"**Current tags**\")\n await LinePaginator.paginate(\n sorted(f\"**\u00bb** {tag['title']}\" for tag in tags),\n ctx,\n embed,\n footer_text=\"To show a tag, type !tags <tagname>.\",\n empty=False,\n max_lines=15\n )\n\n @tags_group.command(name='set', aliases=('add', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Create a new tag.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.post('bot/tags', json=body)\n self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully added the following tag to our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully added\",\n description=f\"**{tag_name}** added to tag database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='edit', aliases=('e', ))\n @with_role(*MODERATION_ROLES)\n async def edit_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Edit an existing tag.\"\"\"\n body = {\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)\n self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully edited the following tag in our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully edited\",\n description=f\"**{tag_name}** edited in the database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admin, Roles.owner)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n \"\"\"Remove a tag from the database.\"\"\"\n await self.bot.api_client.delete(f'bot/tags/{tag_name}')\n self._cache.pop(tag_name.lower(), None)\n\n log.debug(f\"{ctx.author} successfully deleted the tag called '{tag_name}'\")\n await ctx.send(embed=Embed(\n title=tag_name,\n description=f\"Tag successfully removed: {tag_name}.\",\n colour=Colour.blurple()\n ))\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Tags cog.\"\"\"\n bot.add_cog(Tags(bot))\n", "path": "bot/cogs/tags.py"}]} | 2,910 | 188 |
gh_patches_debug_20748 | rasdani/github-patches | git_diff | WordPress__openverse-api-318 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add audio to the ingestion server tests
Audio is currently not included in the ingestion server integration or unit tests. We should update these tests to include support for audio. Separate PRs for unit and integration tests would be best. Below is some information on these tests and how to work with them.
## Running the tests
To run the tests and get a sense of what they do, do the following steps:
```bash
cd ingestion_server
pipenv install
pipenv run python3 test/integration_tests.py
```
This is currently blocked by #143. I would've liked to run the tests to learn a bit more about how they work but this isn't yet possible.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sample_data/make_sample_pop.py`
Content:
```
1 import csv
2 import random
3
4
5 in_tsv = open("sample_data.csv", "r")
6 out_tsv = open("sample_popularity_data.csv", "w+")
7 output_fields = ["identifier", "normalized_popularity"]
8 reader = csv.DictReader(in_tsv, delimiter=",")
9 writer = csv.DictWriter(out_tsv, delimiter=",", fieldnames=output_fields)
10 writer.writeheader()
11 for row in reader:
12 pop = random.uniform(0, 100)
13 out_row = {"identifier": row["identifier"], "normalized_popularity": pop}
14 writer.writerow(out_row)
15
```
Path: `ingestion_server/ingestion_server/tasks.py`
Content:
```
1 """
2 Simple in-memory tracking of executed tasks.
3 """
4
5 import datetime as dt
6 import logging
7 from enum import Enum
8 from multiprocessing import Process
9
10 import requests
11
12 from ingestion_server.indexer import TableIndexer, elasticsearch_connect
13 from ingestion_server.ingest import reload_upstream
14
15
16 class TaskTypes(Enum):
17 # Completely reindex all data for a given model.
18 REINDEX = 0
19 # Reindex updates to a model from the database since a certain date.
20 UPDATE_INDEX = 1
21 # Download the latest copy of the data from the upstream database, then
22 # completely reindex the newly imported data.
23 INGEST_UPSTREAM = 2
24 # Create indices in Elasticsearch for QA tests.
25 # This is not intended for production use, but can be safely executed in a
26 # production environment without consequence.
27 LOAD_TEST_DATA = 3
28
29
30 class TaskTracker:
31 def __init__(self):
32 self.id_task = {}
33 self.id_action = {}
34 self.id_progress = {}
35 self.id_start_time = {}
36 self.id_finish_time = {}
37
38 def add_task(self, task, task_id, action, progress, finish_time):
39 self._prune_old_tasks()
40 self.id_task[task_id] = task
41 self.id_action[task_id] = action
42 self.id_progress[task_id] = progress
43 self.id_start_time[task_id] = dt.datetime.utcnow().timestamp()
44 self.id_finish_time[task_id] = finish_time
45 return task_id
46
47 def _prune_old_tasks(self):
48 pass
49
50 def list_task_statuses(self):
51 self._prune_old_tasks()
52 results = []
53 for _id, task in self.id_task.items():
54 percent_completed = self.id_progress[_id].value
55 active = task.is_alive()
56 start_time = self.id_start_time[_id]
57 finish_time = self.id_finish_time[_id].value
58 results.append(
59 {
60 "task_id": _id,
61 "active": active,
62 "action": self.id_action[_id],
63 "progress": percent_completed,
64 "error": percent_completed < 100 and not active,
65 "start_time": start_time,
66 "finish_time": finish_time,
67 }
68 )
69 sorted_results = sorted(results, key=lambda x: x["finish_time"])
70
71 to_utc = dt.datetime.utcfromtimestamp
72
73 def render_date(x):
74 return to_utc(x) if x != 0.0 else None
75
76 # Convert date to a readable format
77 for idx, task in enumerate(sorted_results):
78 start_time = task["start_time"]
79 finish_time = task["finish_time"]
80 sorted_results[idx]["start_time"] = str(render_date(start_time))
81 sorted_results[idx]["finish_time"] = str(render_date(finish_time))
82
83 return sorted_results
84
85
86 class Task(Process):
87 def __init__(
88 self, model, task_type, since_date, progress, task_id, finish_time, callback_url
89 ):
90 Process.__init__(self)
91 self.model = model
92 self.task_type = task_type
93 self.since_date = since_date
94 self.progress = progress
95 self.task_id = task_id
96 self.finish_time = finish_time
97 self.callback_url = callback_url
98
99 def run(self):
100 # Map task types to actions.
101 elasticsearch = elasticsearch_connect()
102 indexer = TableIndexer(
103 elasticsearch, self.model, self.progress, self.finish_time
104 )
105 if self.task_type == TaskTypes.REINDEX:
106 indexer.reindex(self.model)
107 elif self.task_type == TaskTypes.UPDATE_INDEX:
108 indexer.update(self.model, self.since_date)
109 elif self.task_type == TaskTypes.INGEST_UPSTREAM:
110 reload_upstream(self.model)
111 if self.model == "audio":
112 reload_upstream("audioset", approach="basic")
113 indexer.reindex(self.model)
114 elif self.task_type == TaskTypes.LOAD_TEST_DATA:
115 indexer.load_test_data(self.model)
116 logging.info(f"Task {self.task_id} exited.")
117 if self.callback_url:
118 try:
119 requests.post(self.callback_url)
120 except requests.exceptions.RequestException as e:
121 logging.error("Failed to send callback!")
122 logging.error(e)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ingestion_server/ingestion_server/tasks.py b/ingestion_server/ingestion_server/tasks.py
--- a/ingestion_server/ingestion_server/tasks.py
+++ b/ingestion_server/ingestion_server/tasks.py
@@ -116,7 +116,9 @@
logging.info(f"Task {self.task_id} exited.")
if self.callback_url:
try:
- requests.post(self.callback_url)
+ logging.info("Sending callback request")
+ res = requests.post(self.callback_url)
+ logging.info(f"Response: {res.text}")
except requests.exceptions.RequestException as e:
logging.error("Failed to send callback!")
logging.error(e)
diff --git a/sample_data/make_sample_pop.py b/sample_data/make_sample_pop.py
deleted file mode 100644
--- a/sample_data/make_sample_pop.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import csv
-import random
-
-
-in_tsv = open("sample_data.csv", "r")
-out_tsv = open("sample_popularity_data.csv", "w+")
-output_fields = ["identifier", "normalized_popularity"]
-reader = csv.DictReader(in_tsv, delimiter=",")
-writer = csv.DictWriter(out_tsv, delimiter=",", fieldnames=output_fields)
-writer.writeheader()
-for row in reader:
- pop = random.uniform(0, 100)
- out_row = {"identifier": row["identifier"], "normalized_popularity": pop}
- writer.writerow(out_row)
| {"golden_diff": "diff --git a/ingestion_server/ingestion_server/tasks.py b/ingestion_server/ingestion_server/tasks.py\n--- a/ingestion_server/ingestion_server/tasks.py\n+++ b/ingestion_server/ingestion_server/tasks.py\n@@ -116,7 +116,9 @@\n logging.info(f\"Task {self.task_id} exited.\")\n if self.callback_url:\n try:\n- requests.post(self.callback_url)\n+ logging.info(\"Sending callback request\")\n+ res = requests.post(self.callback_url)\n+ logging.info(f\"Response: {res.text}\")\n except requests.exceptions.RequestException as e:\n logging.error(\"Failed to send callback!\")\n logging.error(e)\ndiff --git a/sample_data/make_sample_pop.py b/sample_data/make_sample_pop.py\ndeleted file mode 100644\n--- a/sample_data/make_sample_pop.py\n+++ /dev/null\n@@ -1,14 +0,0 @@\n-import csv\n-import random\n-\n-\n-in_tsv = open(\"sample_data.csv\", \"r\")\n-out_tsv = open(\"sample_popularity_data.csv\", \"w+\")\n-output_fields = [\"identifier\", \"normalized_popularity\"]\n-reader = csv.DictReader(in_tsv, delimiter=\",\")\n-writer = csv.DictWriter(out_tsv, delimiter=\",\", fieldnames=output_fields)\n-writer.writeheader()\n-for row in reader:\n- pop = random.uniform(0, 100)\n- out_row = {\"identifier\": row[\"identifier\"], \"normalized_popularity\": pop}\n- writer.writerow(out_row)\n", "issue": "Add audio to the ingestion server tests\nAudio is currently not included in the ingestion server integration or unit tests. We should update these tests to include support for audio. Separate PRs for unit and integration tests would be best. Below is some information on these tests and how to work with them.\r\n\r\n## Running the tests \r\n\r\nTo run the tests and get a sense of what they do, do the following steps:\r\n\r\n```bash\r\ncd ingestion_server\r\npipenv install\r\npipenv run python3 test/integration_tests.py\r\n```\r\n\r\nThis is currently blocked by #143. I would've liked to run the tests to learn a bit more about how they work but this isn't yet possible.\n", "before_files": [{"content": "import csv\nimport random\n\n\nin_tsv = open(\"sample_data.csv\", \"r\")\nout_tsv = open(\"sample_popularity_data.csv\", \"w+\")\noutput_fields = [\"identifier\", \"normalized_popularity\"]\nreader = csv.DictReader(in_tsv, delimiter=\",\")\nwriter = csv.DictWriter(out_tsv, delimiter=\",\", fieldnames=output_fields)\nwriter.writeheader()\nfor row in reader:\n pop = random.uniform(0, 100)\n out_row = {\"identifier\": row[\"identifier\"], \"normalized_popularity\": pop}\n writer.writerow(out_row)\n", "path": "sample_data/make_sample_pop.py"}, {"content": "\"\"\"\nSimple in-memory tracking of executed tasks.\n\"\"\"\n\nimport datetime as dt\nimport logging\nfrom enum import Enum\nfrom multiprocessing import Process\n\nimport requests\n\nfrom ingestion_server.indexer import TableIndexer, elasticsearch_connect\nfrom ingestion_server.ingest import reload_upstream\n\n\nclass TaskTypes(Enum):\n # Completely reindex all data for a given model.\n REINDEX = 0\n # Reindex updates to a model from the database since a certain date.\n UPDATE_INDEX = 1\n # Download the latest copy of the data from the upstream database, then\n # completely reindex the newly imported data.\n INGEST_UPSTREAM = 2\n # Create indices in Elasticsearch for QA tests.\n # This is not intended for production use, but can be safely executed in a\n # production environment without consequence.\n LOAD_TEST_DATA = 3\n\n\nclass TaskTracker:\n def __init__(self):\n self.id_task = {}\n self.id_action = {}\n self.id_progress = {}\n self.id_start_time = {}\n self.id_finish_time = {}\n\n def add_task(self, task, task_id, action, progress, finish_time):\n self._prune_old_tasks()\n self.id_task[task_id] = task\n self.id_action[task_id] = action\n self.id_progress[task_id] = progress\n self.id_start_time[task_id] = dt.datetime.utcnow().timestamp()\n self.id_finish_time[task_id] = finish_time\n return task_id\n\n def _prune_old_tasks(self):\n pass\n\n def list_task_statuses(self):\n self._prune_old_tasks()\n results = []\n for _id, task in self.id_task.items():\n percent_completed = self.id_progress[_id].value\n active = task.is_alive()\n start_time = self.id_start_time[_id]\n finish_time = self.id_finish_time[_id].value\n results.append(\n {\n \"task_id\": _id,\n \"active\": active,\n \"action\": self.id_action[_id],\n \"progress\": percent_completed,\n \"error\": percent_completed < 100 and not active,\n \"start_time\": start_time,\n \"finish_time\": finish_time,\n }\n )\n sorted_results = sorted(results, key=lambda x: x[\"finish_time\"])\n\n to_utc = dt.datetime.utcfromtimestamp\n\n def render_date(x):\n return to_utc(x) if x != 0.0 else None\n\n # Convert date to a readable format\n for idx, task in enumerate(sorted_results):\n start_time = task[\"start_time\"]\n finish_time = task[\"finish_time\"]\n sorted_results[idx][\"start_time\"] = str(render_date(start_time))\n sorted_results[idx][\"finish_time\"] = str(render_date(finish_time))\n\n return sorted_results\n\n\nclass Task(Process):\n def __init__(\n self, model, task_type, since_date, progress, task_id, finish_time, callback_url\n ):\n Process.__init__(self)\n self.model = model\n self.task_type = task_type\n self.since_date = since_date\n self.progress = progress\n self.task_id = task_id\n self.finish_time = finish_time\n self.callback_url = callback_url\n\n def run(self):\n # Map task types to actions.\n elasticsearch = elasticsearch_connect()\n indexer = TableIndexer(\n elasticsearch, self.model, self.progress, self.finish_time\n )\n if self.task_type == TaskTypes.REINDEX:\n indexer.reindex(self.model)\n elif self.task_type == TaskTypes.UPDATE_INDEX:\n indexer.update(self.model, self.since_date)\n elif self.task_type == TaskTypes.INGEST_UPSTREAM:\n reload_upstream(self.model)\n if self.model == \"audio\":\n reload_upstream(\"audioset\", approach=\"basic\")\n indexer.reindex(self.model)\n elif self.task_type == TaskTypes.LOAD_TEST_DATA:\n indexer.load_test_data(self.model)\n logging.info(f\"Task {self.task_id} exited.\")\n if self.callback_url:\n try:\n requests.post(self.callback_url)\n except requests.exceptions.RequestException as e:\n logging.error(\"Failed to send callback!\")\n logging.error(e)\n", "path": "ingestion_server/ingestion_server/tasks.py"}], "after_files": [{"content": null, "path": "sample_data/make_sample_pop.py"}, {"content": "\"\"\"\nSimple in-memory tracking of executed tasks.\n\"\"\"\n\nimport datetime as dt\nimport logging\nfrom enum import Enum\nfrom multiprocessing import Process\n\nimport requests\n\nfrom ingestion_server.indexer import TableIndexer, elasticsearch_connect\nfrom ingestion_server.ingest import reload_upstream\n\n\nclass TaskTypes(Enum):\n # Completely reindex all data for a given model.\n REINDEX = 0\n # Reindex updates to a model from the database since a certain date.\n UPDATE_INDEX = 1\n # Download the latest copy of the data from the upstream database, then\n # completely reindex the newly imported data.\n INGEST_UPSTREAM = 2\n # Create indices in Elasticsearch for QA tests.\n # This is not intended for production use, but can be safely executed in a\n # production environment without consequence.\n LOAD_TEST_DATA = 3\n\n\nclass TaskTracker:\n def __init__(self):\n self.id_task = {}\n self.id_action = {}\n self.id_progress = {}\n self.id_start_time = {}\n self.id_finish_time = {}\n\n def add_task(self, task, task_id, action, progress, finish_time):\n self._prune_old_tasks()\n self.id_task[task_id] = task\n self.id_action[task_id] = action\n self.id_progress[task_id] = progress\n self.id_start_time[task_id] = dt.datetime.utcnow().timestamp()\n self.id_finish_time[task_id] = finish_time\n return task_id\n\n def _prune_old_tasks(self):\n pass\n\n def list_task_statuses(self):\n self._prune_old_tasks()\n results = []\n for _id, task in self.id_task.items():\n percent_completed = self.id_progress[_id].value\n active = task.is_alive()\n start_time = self.id_start_time[_id]\n finish_time = self.id_finish_time[_id].value\n results.append(\n {\n \"task_id\": _id,\n \"active\": active,\n \"action\": self.id_action[_id],\n \"progress\": percent_completed,\n \"error\": percent_completed < 100 and not active,\n \"start_time\": start_time,\n \"finish_time\": finish_time,\n }\n )\n sorted_results = sorted(results, key=lambda x: x[\"finish_time\"])\n\n to_utc = dt.datetime.utcfromtimestamp\n\n def render_date(x):\n return to_utc(x) if x != 0.0 else None\n\n # Convert date to a readable format\n for idx, task in enumerate(sorted_results):\n start_time = task[\"start_time\"]\n finish_time = task[\"finish_time\"]\n sorted_results[idx][\"start_time\"] = str(render_date(start_time))\n sorted_results[idx][\"finish_time\"] = str(render_date(finish_time))\n\n return sorted_results\n\n\nclass Task(Process):\n def __init__(\n self, model, task_type, since_date, progress, task_id, finish_time, callback_url\n ):\n Process.__init__(self)\n self.model = model\n self.task_type = task_type\n self.since_date = since_date\n self.progress = progress\n self.task_id = task_id\n self.finish_time = finish_time\n self.callback_url = callback_url\n\n def run(self):\n # Map task types to actions.\n elasticsearch = elasticsearch_connect()\n indexer = TableIndexer(\n elasticsearch, self.model, self.progress, self.finish_time\n )\n if self.task_type == TaskTypes.REINDEX:\n indexer.reindex(self.model)\n elif self.task_type == TaskTypes.UPDATE_INDEX:\n indexer.update(self.model, self.since_date)\n elif self.task_type == TaskTypes.INGEST_UPSTREAM:\n reload_upstream(self.model)\n if self.model == \"audio\":\n reload_upstream(\"audioset\", approach=\"basic\")\n indexer.reindex(self.model)\n elif self.task_type == TaskTypes.LOAD_TEST_DATA:\n indexer.load_test_data(self.model)\n logging.info(f\"Task {self.task_id} exited.\")\n if self.callback_url:\n try:\n logging.info(\"Sending callback request\")\n res = requests.post(self.callback_url)\n logging.info(f\"Response: {res.text}\")\n except requests.exceptions.RequestException as e:\n logging.error(\"Failed to send callback!\")\n logging.error(e)\n", "path": "ingestion_server/ingestion_server/tasks.py"}]} | 1,738 | 335 |
gh_patches_debug_41905 | rasdani/github-patches | git_diff | pytorch__ignite-478 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve create_supervised_trainer with optional output_transform
Following [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:
```python
def default_output_transform(x, y, y_pred, loss):
return loss.item()
def create_supervised_trainer(model, optimizer, loss_fn,
device=None, non_blocking=False, prepare_batch=_prepare_batch,
output_transform=default_output_transform):
if device:
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
return output_transform(x, y, y_pred, loss)
return Engine(_update)
```
cc @IlyaOvodov
Improve create_supervised_trainer with optional output_transform
Following [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:
```python
def default_output_transform(x, y, y_pred, loss):
return loss.item()
def create_supervised_trainer(model, optimizer, loss_fn,
device=None, non_blocking=False, prepare_batch=_prepare_batch,
output_transform=default_output_transform):
if device:
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
return output_transform(x, y, y_pred, loss)
return Engine(_update)
```
cc @IlyaOvodov
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/engine/__init__.py`
Content:
```
1 import torch
2
3 from ignite.engine.engine import Engine, State, Events
4 from ignite.utils import convert_tensor
5
6
7 def _prepare_batch(batch, device=None, non_blocking=False):
8 """Prepare batch for training: pass to a device with options.
9
10 """
11 x, y = batch
12 return (convert_tensor(x, device=device, non_blocking=non_blocking),
13 convert_tensor(y, device=device, non_blocking=non_blocking))
14
15
16 def create_supervised_trainer(model, optimizer, loss_fn,
17 device=None, non_blocking=False,
18 prepare_batch=_prepare_batch):
19 """
20 Factory function for creating a trainer for supervised models.
21
22 Args:
23 model (`torch.nn.Module`): the model to train.
24 optimizer (`torch.optim.Optimizer`): the optimizer to use.
25 loss_fn (torch.nn loss function): the loss function to use.
26 device (str, optional): device type specification (default: None).
27 Applies to both model and batches.
28 non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
29 with respect to the host. For other cases, this argument has no effect.
30 prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
31 tuple of tensors `(batch_x, batch_y)`.
32
33 Note: `engine.state.output` for this engine is the loss of the processed batch.
34
35 Returns:
36 Engine: a trainer engine with supervised update function.
37 """
38 if device:
39 model.to(device)
40
41 def _update(engine, batch):
42 model.train()
43 optimizer.zero_grad()
44 x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
45 y_pred = model(x)
46 loss = loss_fn(y_pred, y)
47 loss.backward()
48 optimizer.step()
49 return loss.item()
50
51 return Engine(_update)
52
53
54 def create_supervised_evaluator(model, metrics={},
55 device=None, non_blocking=False,
56 prepare_batch=_prepare_batch):
57 """
58 Factory function for creating an evaluator for supervised models.
59
60 Args:
61 model (`torch.nn.Module`): the model to train.
62 metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.
63 device (str, optional): device type specification (default: None).
64 Applies to both model and batches.
65 non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
66 with respect to the host. For other cases, this argument has no effect.
67 prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
68 tuple of tensors `(batch_x, batch_y)`.
69
70 Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.
71
72 Returns:
73 Engine: an evaluator engine with supervised inference function.
74 """
75 if device:
76 model.to(device)
77
78 def _inference(engine, batch):
79 model.eval()
80 with torch.no_grad():
81 x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
82 y_pred = model(x)
83 return y_pred, y
84
85 engine = Engine(_inference)
86
87 for name, metric in metrics.items():
88 metric.attach(engine, name)
89
90 return engine
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py
--- a/ignite/engine/__init__.py
+++ b/ignite/engine/__init__.py
@@ -15,7 +15,8 @@
def create_supervised_trainer(model, optimizer, loss_fn,
device=None, non_blocking=False,
- prepare_batch=_prepare_batch):
+ prepare_batch=_prepare_batch,
+ output_transform=lambda x, y, y_pred, loss: loss.item()):
"""
Factory function for creating a trainer for supervised models.
@@ -29,8 +30,11 @@
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value
+ to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
- Note: `engine.state.output` for this engine is the loss of the processed batch.
+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss
+ of the processed batch by default.
Returns:
Engine: a trainer engine with supervised update function.
@@ -46,14 +50,15 @@
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
- return loss.item()
+ return output_transform(x, y, y_pred, loss)
return Engine(_update)
def create_supervised_evaluator(model, metrics={},
device=None, non_blocking=False,
- prepare_batch=_prepare_batch):
+ prepare_batch=_prepare_batch,
+ output_transform=lambda x, y, y_pred: (y_pred, y,)):
"""
Factory function for creating an evaluator for supervised models.
@@ -66,8 +71,12 @@
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value
+ to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
+ output expected by metrics. If you change it you should use `output_transform` in metrics.
- Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.
+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is
+ a tuple of `(batch_pred, batch_y)` by default.
Returns:
Engine: an evaluator engine with supervised inference function.
@@ -80,7 +89,7 @@
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
- return y_pred, y
+ return output_transform(x, y, y_pred)
engine = Engine(_inference)
| {"golden_diff": "diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py\n--- a/ignite/engine/__init__.py\n+++ b/ignite/engine/__init__.py\n@@ -15,7 +15,8 @@\n \n def create_supervised_trainer(model, optimizer, loss_fn,\n device=None, non_blocking=False,\n- prepare_batch=_prepare_batch):\n+ prepare_batch=_prepare_batch,\n+ output_transform=lambda x, y, y_pred, loss: loss.item()):\n \"\"\"\n Factory function for creating a trainer for supervised models.\n \n@@ -29,8 +30,11 @@\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value\n+ to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.\n \n- Note: `engine.state.output` for this engine is the loss of the processed batch.\n+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss\n+ of the processed batch by default.\n \n Returns:\n Engine: a trainer engine with supervised update function.\n@@ -46,14 +50,15 @@\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n- return loss.item()\n+ return output_transform(x, y, y_pred, loss)\n \n return Engine(_update)\n \n \n def create_supervised_evaluator(model, metrics={},\n device=None, non_blocking=False,\n- prepare_batch=_prepare_batch):\n+ prepare_batch=_prepare_batch,\n+ output_transform=lambda x, y, y_pred: (y_pred, y,)):\n \"\"\"\n Factory function for creating an evaluator for supervised models.\n \n@@ -66,8 +71,12 @@\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value\n+ to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits\n+ output expected by metrics. If you change it you should use `output_transform` in metrics.\n \n- Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.\n+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is\n+ a tuple of `(batch_pred, batch_y)` by default.\n \n Returns:\n Engine: an evaluator engine with supervised inference function.\n@@ -80,7 +89,7 @@\n with torch.no_grad():\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n- return y_pred, y\n+ return output_transform(x, y, y_pred)\n \n engine = Engine(_inference)\n", "issue": "Improve create_supervised_trainer with optional output_transform\nFollowing [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:\r\n```python\r\ndef default_output_transform(x, y, y_pred, loss):\r\n return loss.item() \r\n\r\n\r\ndef create_supervised_trainer(model, optimizer, loss_fn,\r\n device=None, non_blocking=False, prepare_batch=_prepare_batch, \r\n output_transform=default_output_transform):\r\n if device:\r\n model.to(device)\r\n\r\n def _update(engine, batch):\r\n model.train()\r\n optimizer.zero_grad()\r\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\r\n y_pred = model(x)\r\n loss = loss_fn(y_pred, y)\r\n loss.backward()\r\n optimizer.step()\r\n return output_transform(x, y, y_pred, loss)\r\n\r\n return Engine(_update) \r\n```\r\n\r\ncc @IlyaOvodov\nImprove create_supervised_trainer with optional output_transform\nFollowing [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:\r\n```python\r\ndef default_output_transform(x, y, y_pred, loss):\r\n return loss.item() \r\n\r\n\r\ndef create_supervised_trainer(model, optimizer, loss_fn,\r\n device=None, non_blocking=False, prepare_batch=_prepare_batch, \r\n output_transform=default_output_transform):\r\n if device:\r\n model.to(device)\r\n\r\n def _update(engine, batch):\r\n model.train()\r\n optimizer.zero_grad()\r\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\r\n y_pred = model(x)\r\n loss = loss_fn(y_pred, y)\r\n loss.backward()\r\n optimizer.step()\r\n return output_transform(x, y, y_pred, loss)\r\n\r\n return Engine(_update) \r\n```\r\n\r\ncc @IlyaOvodov\n", "before_files": [{"content": "import torch\n\nfrom ignite.engine.engine import Engine, State, Events\nfrom ignite.utils import convert_tensor\n\n\ndef _prepare_batch(batch, device=None, non_blocking=False):\n \"\"\"Prepare batch for training: pass to a device with options.\n\n \"\"\"\n x, y = batch\n return (convert_tensor(x, device=device, non_blocking=non_blocking),\n convert_tensor(y, device=device, non_blocking=non_blocking))\n\n\ndef create_supervised_trainer(model, optimizer, loss_fn,\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch):\n \"\"\"\n Factory function for creating a trainer for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n optimizer (`torch.optim.Optimizer`): the optimizer to use.\n loss_fn (torch.nn loss function): the loss function to use.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n\n Note: `engine.state.output` for this engine is the loss of the processed batch.\n\n Returns:\n Engine: a trainer engine with supervised update function.\n \"\"\"\n if device:\n model.to(device)\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return loss.item()\n\n return Engine(_update)\n\n\ndef create_supervised_evaluator(model, metrics={},\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch):\n \"\"\"\n Factory function for creating an evaluator for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n\n Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n if device:\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n return y_pred, y\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine\n", "path": "ignite/engine/__init__.py"}], "after_files": [{"content": "import torch\n\nfrom ignite.engine.engine import Engine, State, Events\nfrom ignite.utils import convert_tensor\n\n\ndef _prepare_batch(batch, device=None, non_blocking=False):\n \"\"\"Prepare batch for training: pass to a device with options.\n\n \"\"\"\n x, y = batch\n return (convert_tensor(x, device=device, non_blocking=non_blocking),\n convert_tensor(y, device=device, non_blocking=non_blocking))\n\n\ndef create_supervised_trainer(model, optimizer, loss_fn,\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch,\n output_transform=lambda x, y, y_pred, loss: loss.item()):\n \"\"\"\n Factory function for creating a trainer for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n optimizer (`torch.optim.Optimizer`): the optimizer to use.\n loss_fn (torch.nn loss function): the loss function to use.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.\n\n Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss\n of the processed batch by default.\n\n Returns:\n Engine: a trainer engine with supervised update function.\n \"\"\"\n if device:\n model.to(device)\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return output_transform(x, y, y_pred, loss)\n\n return Engine(_update)\n\n\ndef create_supervised_evaluator(model, metrics={},\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch,\n output_transform=lambda x, y, y_pred: (y_pred, y,)):\n \"\"\"\n Factory function for creating an evaluator for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits\n output expected by metrics. If you change it you should use `output_transform` in metrics.\n\n Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is\n a tuple of `(batch_pred, batch_y)` by default.\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n if device:\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n return output_transform(x, y, y_pred)\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine\n", "path": "ignite/engine/__init__.py"}]} | 1,590 | 728 |
gh_patches_debug_33068 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-579 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add some support for programmatic instrumentation
[This comment](https://github.com/open-telemetry/opentelemetry-python/pull/551#issuecomment-608993167) in #551 raised the concern about making it possible to use the instrumentors in a programmatic way too, not only automatically.
Using them programmatically means adding code directly in the application that performs instrumentation, for example, [this](https://github.com/open-telemetry/opentelemetry-python/pull/551/files#diff-906a392e80f621fdbc1ea38af3c41081R19) or [this](https://github.com/open-telemetry/opentelemetry-python/pull/551/files#diff-906a392e80f621fdbc1ea38af3c41081L22).
It is very likely that this programmatic use of the instrumentors needs to be implemented in a framework-specific way, this means that the way we can do this kind of instrumentation is different for Flask and is different too for Django, for example. That means that it is possible that we end up having just special code for each framework as opposed to having a universal mechanism that works for every framework (this would mean that this issue could end up being closed without any fix).
This programmatic instrumentation mechanisms can be more sophisticated, for example, in an OpenTracing-related project [here](https://github.com/opentracing-contrib/python-django/blob/master/README.rst#tracing-individual-requests) it is shown how the programmatic instrumentation mechanism allows the user to select which application functions are to be traced.
In summary, what we want to introduce with this issue, is the capability to perform instrumentation without having to directly write the code that creates the spans but also without using the `opentelemetry-auto-instrumentation` command, but with a different mechanism (probably specifically tailored for each framework (Django, Flask, etc.)) like decorators or maybe Python context managers (the ones that use `with`, to avoid any confusion with the OpenTelemetry concept of context :slightly_smiling_face:).
@mauriciovasquezbernal @codeboten we have just talked about this :+1:
Also, to make things clear, with this approach we would have 3 ways of doing instrumentation:
1. The "normal" way (instantiating the spans directly with OpenTelemetry-provided functions)
2. The automatic way (using the `opentelemetry-auto-instrumentation` command)
3. The programmatic way (using a mechanism as described here to instrument specific frameworks)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # type: ignore
15
16 """
17 OpenTelemetry Base Instrumentor
18 """
19
20 from abc import ABC, abstractmethod
21 from logging import getLogger
22
23 _LOG = getLogger(__name__)
24
25
26 class BaseInstrumentor(ABC):
27 """An ABC for instrumentors"""
28
29 def __init__(self):
30 self._is_instrumented = False
31
32 @abstractmethod
33 def _instrument(self) -> None:
34 """Instrument"""
35
36 @abstractmethod
37 def _uninstrument(self) -> None:
38 """Uninstrument"""
39
40 def instrument(self) -> None:
41 """Instrument"""
42
43 if not self._is_instrumented:
44 result = self._instrument()
45 self._is_instrumented = True
46 return result
47
48 _LOG.warning("Attempting to instrument while already instrumented")
49
50 return None
51
52 def uninstrument(self) -> None:
53 """Uninstrument"""
54
55 if self._is_instrumented:
56 result = self._uninstrument()
57 self._is_instrumented = False
58 return result
59
60 _LOG.warning("Attempting to uninstrument while already uninstrumented")
61
62 return None
63
64
65 __all__ = ["BaseInstrumentor"]
66
```
Path: `ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # Note: This package is not named "flask" because of
16 # https://github.com/PyCQA/pylint/issues/2648
17
18 """
19 This library builds on the OpenTelemetry WSGI middleware to track web requests
20 in Flask applications. In addition to opentelemetry-ext-wsgi, it supports
21 flask-specific features such as:
22
23 * The Flask endpoint name is used as the Span name.
24 * The ``http.route`` Span attribute is set so that one can see which URL rule
25 matched a request.
26
27 Usage
28 -----
29
30 .. code-block:: python
31
32 from opentelemetry.ext.flask import FlaskInstrumentor
33 FlaskInstrumentor().instrument() # This needs to be executed before importing Flask
34 from flask import Flask
35
36 app = Flask(__name__)
37
38 @app.route("/")
39 def hello():
40 return "Hello!"
41
42 if __name__ == "__main__":
43 app.run(debug=True)
44
45 API
46 ---
47 """
48
49 import logging
50
51 import flask
52
53 import opentelemetry.ext.wsgi as otel_wsgi
54 from opentelemetry import context, propagators, trace
55 from opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor
56 from opentelemetry.ext.flask.version import __version__
57 from opentelemetry.util import time_ns
58
59 logger = logging.getLogger(__name__)
60
61 _ENVIRON_STARTTIME_KEY = "opentelemetry-flask.starttime_key"
62 _ENVIRON_SPAN_KEY = "opentelemetry-flask.span_key"
63 _ENVIRON_ACTIVATION_KEY = "opentelemetry-flask.activation_key"
64 _ENVIRON_TOKEN = "opentelemetry-flask.token"
65
66
67 class _InstrumentedFlask(flask.Flask):
68 def __init__(self, *args, **kwargs):
69
70 super().__init__(*args, **kwargs)
71
72 # Single use variable here to avoid recursion issues.
73 wsgi = self.wsgi_app
74
75 def wrapped_app(environ, start_response):
76 # We want to measure the time for route matching, etc.
77 # In theory, we could start the span here and use
78 # update_name later but that API is "highly discouraged" so
79 # we better avoid it.
80 environ[_ENVIRON_STARTTIME_KEY] = time_ns()
81
82 def _start_response(status, response_headers, *args, **kwargs):
83 span = flask.request.environ.get(_ENVIRON_SPAN_KEY)
84 if span:
85 otel_wsgi.add_response_attributes(
86 span, status, response_headers
87 )
88 else:
89 logger.warning(
90 "Flask environ's OpenTelemetry span "
91 "missing at _start_response(%s)",
92 status,
93 )
94
95 return start_response(
96 status, response_headers, *args, **kwargs
97 )
98
99 return wsgi(environ, _start_response)
100
101 self.wsgi_app = wrapped_app
102
103 @self.before_request
104 def _before_flask_request():
105 environ = flask.request.environ
106 span_name = (
107 flask.request.endpoint
108 or otel_wsgi.get_default_span_name(environ)
109 )
110 token = context.attach(
111 propagators.extract(otel_wsgi.get_header_from_environ, environ)
112 )
113
114 tracer = trace.get_tracer(__name__, __version__)
115
116 attributes = otel_wsgi.collect_request_attributes(environ)
117 if flask.request.url_rule:
118 # For 404 that result from no route found, etc, we
119 # don't have a url_rule.
120 attributes["http.route"] = flask.request.url_rule.rule
121 span = tracer.start_span(
122 span_name,
123 kind=trace.SpanKind.SERVER,
124 attributes=attributes,
125 start_time=environ.get(_ENVIRON_STARTTIME_KEY),
126 )
127 activation = tracer.use_span(span, end_on_exit=True)
128 activation.__enter__()
129 environ[_ENVIRON_ACTIVATION_KEY] = activation
130 environ[_ENVIRON_SPAN_KEY] = span
131 environ[_ENVIRON_TOKEN] = token
132
133 @self.teardown_request
134 def _teardown_flask_request(exc):
135 activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)
136 if not activation:
137 logger.warning(
138 "Flask environ's OpenTelemetry activation missing"
139 "at _teardown_flask_request(%s)",
140 exc,
141 )
142 return
143
144 if exc is None:
145 activation.__exit__(None, None, None)
146 else:
147 activation.__exit__(
148 type(exc), exc, getattr(exc, "__traceback__", None)
149 )
150 context.detach(flask.request.environ.get(_ENVIRON_TOKEN))
151
152
153 class FlaskInstrumentor(BaseInstrumentor):
154 """A instrumentor for flask.Flask
155
156 See `BaseInstrumentor`
157 """
158
159 def __init__(self):
160 super().__init__()
161 self._original_flask = None
162
163 def _instrument(self):
164 self._original_flask = flask.Flask
165 flask.Flask = _InstrumentedFlask
166
167 def _uninstrument(self):
168 flask.Flask = self._original_flask
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
--- a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
+++ b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
@@ -160,9 +160,9 @@
super().__init__()
self._original_flask = None
- def _instrument(self):
+ def _instrument(self, **kwargs):
self._original_flask = flask.Flask
flask.Flask = _InstrumentedFlask
- def _uninstrument(self):
+ def _uninstrument(self, **kwargs):
flask.Flask = self._original_flask
diff --git a/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py b/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py
--- a/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py
+++ b/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py
@@ -26,22 +26,29 @@
class BaseInstrumentor(ABC):
"""An ABC for instrumentors"""
- def __init__(self):
- self._is_instrumented = False
+ _instance = None
+ _is_instrumented = False
+
+ def __new__(cls):
+
+ if cls._instance is None:
+ cls._instance = object.__new__(cls)
+
+ return cls._instance
@abstractmethod
- def _instrument(self) -> None:
+ def _instrument(self, **kwargs):
"""Instrument"""
@abstractmethod
- def _uninstrument(self) -> None:
+ def _uninstrument(self, **kwargs):
"""Uninstrument"""
- def instrument(self) -> None:
+ def instrument(self, **kwargs):
"""Instrument"""
if not self._is_instrumented:
- result = self._instrument()
+ result = self._instrument(**kwargs)
self._is_instrumented = True
return result
@@ -49,11 +56,11 @@
return None
- def uninstrument(self) -> None:
+ def uninstrument(self, **kwargs):
"""Uninstrument"""
if self._is_instrumented:
- result = self._uninstrument()
+ result = self._uninstrument(**kwargs)
self._is_instrumented = False
return result
| {"golden_diff": "diff --git a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n--- a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n+++ b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n@@ -160,9 +160,9 @@\n super().__init__()\n self._original_flask = None\n \n- def _instrument(self):\n+ def _instrument(self, **kwargs):\n self._original_flask = flask.Flask\n flask.Flask = _InstrumentedFlask\n \n- def _uninstrument(self):\n+ def _uninstrument(self, **kwargs):\n flask.Flask = self._original_flask\ndiff --git a/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py b/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py\n--- a/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py\n+++ b/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py\n@@ -26,22 +26,29 @@\n class BaseInstrumentor(ABC):\n \"\"\"An ABC for instrumentors\"\"\"\n \n- def __init__(self):\n- self._is_instrumented = False\n+ _instance = None\n+ _is_instrumented = False\n+\n+ def __new__(cls):\n+\n+ if cls._instance is None:\n+ cls._instance = object.__new__(cls)\n+\n+ return cls._instance\n \n @abstractmethod\n- def _instrument(self) -> None:\n+ def _instrument(self, **kwargs):\n \"\"\"Instrument\"\"\"\n \n @abstractmethod\n- def _uninstrument(self) -> None:\n+ def _uninstrument(self, **kwargs):\n \"\"\"Uninstrument\"\"\"\n \n- def instrument(self) -> None:\n+ def instrument(self, **kwargs):\n \"\"\"Instrument\"\"\"\n \n if not self._is_instrumented:\n- result = self._instrument()\n+ result = self._instrument(**kwargs)\n self._is_instrumented = True\n return result\n \n@@ -49,11 +56,11 @@\n \n return None\n \n- def uninstrument(self) -> None:\n+ def uninstrument(self, **kwargs):\n \"\"\"Uninstrument\"\"\"\n \n if self._is_instrumented:\n- result = self._uninstrument()\n+ result = self._uninstrument(**kwargs)\n self._is_instrumented = False\n return result\n", "issue": "Add some support for programmatic instrumentation\n[This comment](https://github.com/open-telemetry/opentelemetry-python/pull/551#issuecomment-608993167) in #551 raised the concern about making it possible to use the instrumentors in a programmatic way too, not only automatically.\r\n\r\nUsing them programmatically means adding code directly in the application that performs instrumentation, for example, [this](https://github.com/open-telemetry/opentelemetry-python/pull/551/files#diff-906a392e80f621fdbc1ea38af3c41081R19) or [this](https://github.com/open-telemetry/opentelemetry-python/pull/551/files#diff-906a392e80f621fdbc1ea38af3c41081L22).\r\n\r\nIt is very likely that this programmatic use of the instrumentors needs to be implemented in a framework-specific way, this means that the way we can do this kind of instrumentation is different for Flask and is different too for Django, for example. That means that it is possible that we end up having just special code for each framework as opposed to having a universal mechanism that works for every framework (this would mean that this issue could end up being closed without any fix).\r\n\r\nThis programmatic instrumentation mechanisms can be more sophisticated, for example, in an OpenTracing-related project [here](https://github.com/opentracing-contrib/python-django/blob/master/README.rst#tracing-individual-requests) it is shown how the programmatic instrumentation mechanism allows the user to select which application functions are to be traced.\r\n\r\nIn summary, what we want to introduce with this issue, is the capability to perform instrumentation without having to directly write the code that creates the spans but also without using the `opentelemetry-auto-instrumentation` command, but with a different mechanism (probably specifically tailored for each framework (Django, Flask, etc.)) like decorators or maybe Python context managers (the ones that use `with`, to avoid any confusion with the OpenTelemetry concept of context :slightly_smiling_face:).\r\n\r\n@mauriciovasquezbernal @codeboten we have just talked about this :+1:\r\n\r\nAlso, to make things clear, with this approach we would have 3 ways of doing instrumentation:\r\n1. The \"normal\" way (instantiating the spans directly with OpenTelemetry-provided functions)\r\n2. The automatic way (using the `opentelemetry-auto-instrumentation` command)\r\n3. The programmatic way (using a mechanism as described here to instrument specific frameworks)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\n\"\"\"\nOpenTelemetry Base Instrumentor\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom logging import getLogger\n\n_LOG = getLogger(__name__)\n\n\nclass BaseInstrumentor(ABC):\n \"\"\"An ABC for instrumentors\"\"\"\n\n def __init__(self):\n self._is_instrumented = False\n\n @abstractmethod\n def _instrument(self) -> None:\n \"\"\"Instrument\"\"\"\n\n @abstractmethod\n def _uninstrument(self) -> None:\n \"\"\"Uninstrument\"\"\"\n\n def instrument(self) -> None:\n \"\"\"Instrument\"\"\"\n\n if not self._is_instrumented:\n result = self._instrument()\n self._is_instrumented = True\n return result\n\n _LOG.warning(\"Attempting to instrument while already instrumented\")\n\n return None\n\n def uninstrument(self) -> None:\n \"\"\"Uninstrument\"\"\"\n\n if self._is_instrumented:\n result = self._uninstrument()\n self._is_instrumented = False\n return result\n\n _LOG.warning(\"Attempting to uninstrument while already uninstrumented\")\n\n return None\n\n\n__all__ = [\"BaseInstrumentor\"]\n", "path": "opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Note: This package is not named \"flask\" because of\n# https://github.com/PyCQA/pylint/issues/2648\n\n\"\"\"\nThis library builds on the OpenTelemetry WSGI middleware to track web requests\nin Flask applications. In addition to opentelemetry-ext-wsgi, it supports\nflask-specific features such as:\n\n* The Flask endpoint name is used as the Span name.\n* The ``http.route`` Span attribute is set so that one can see which URL rule\n matched a request.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.ext.flask import FlaskInstrumentor\n FlaskInstrumentor().instrument() # This needs to be executed before importing Flask\n from flask import Flask\n\n app = Flask(__name__)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\nAPI\n---\n\"\"\"\n\nimport logging\n\nimport flask\n\nimport opentelemetry.ext.wsgi as otel_wsgi\nfrom opentelemetry import context, propagators, trace\nfrom opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.ext.flask.version import __version__\nfrom opentelemetry.util import time_ns\n\nlogger = logging.getLogger(__name__)\n\n_ENVIRON_STARTTIME_KEY = \"opentelemetry-flask.starttime_key\"\n_ENVIRON_SPAN_KEY = \"opentelemetry-flask.span_key\"\n_ENVIRON_ACTIVATION_KEY = \"opentelemetry-flask.activation_key\"\n_ENVIRON_TOKEN = \"opentelemetry-flask.token\"\n\n\nclass _InstrumentedFlask(flask.Flask):\n def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n # Single use variable here to avoid recursion issues.\n wsgi = self.wsgi_app\n\n def wrapped_app(environ, start_response):\n # We want to measure the time for route matching, etc.\n # In theory, we could start the span here and use\n # update_name later but that API is \"highly discouraged\" so\n # we better avoid it.\n environ[_ENVIRON_STARTTIME_KEY] = time_ns()\n\n def _start_response(status, response_headers, *args, **kwargs):\n span = flask.request.environ.get(_ENVIRON_SPAN_KEY)\n if span:\n otel_wsgi.add_response_attributes(\n span, status, response_headers\n )\n else:\n logger.warning(\n \"Flask environ's OpenTelemetry span \"\n \"missing at _start_response(%s)\",\n status,\n )\n\n return start_response(\n status, response_headers, *args, **kwargs\n )\n\n return wsgi(environ, _start_response)\n\n self.wsgi_app = wrapped_app\n\n @self.before_request\n def _before_flask_request():\n environ = flask.request.environ\n span_name = (\n flask.request.endpoint\n or otel_wsgi.get_default_span_name(environ)\n )\n token = context.attach(\n propagators.extract(otel_wsgi.get_header_from_environ, environ)\n )\n\n tracer = trace.get_tracer(__name__, __version__)\n\n attributes = otel_wsgi.collect_request_attributes(environ)\n if flask.request.url_rule:\n # For 404 that result from no route found, etc, we\n # don't have a url_rule.\n attributes[\"http.route\"] = flask.request.url_rule.rule\n span = tracer.start_span(\n span_name,\n kind=trace.SpanKind.SERVER,\n attributes=attributes,\n start_time=environ.get(_ENVIRON_STARTTIME_KEY),\n )\n activation = tracer.use_span(span, end_on_exit=True)\n activation.__enter__()\n environ[_ENVIRON_ACTIVATION_KEY] = activation\n environ[_ENVIRON_SPAN_KEY] = span\n environ[_ENVIRON_TOKEN] = token\n\n @self.teardown_request\n def _teardown_flask_request(exc):\n activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)\n if not activation:\n logger.warning(\n \"Flask environ's OpenTelemetry activation missing\"\n \"at _teardown_flask_request(%s)\",\n exc,\n )\n return\n\n if exc is None:\n activation.__exit__(None, None, None)\n else:\n activation.__exit__(\n type(exc), exc, getattr(exc, \"__traceback__\", None)\n )\n context.detach(flask.request.environ.get(_ENVIRON_TOKEN))\n\n\nclass FlaskInstrumentor(BaseInstrumentor):\n \"\"\"A instrumentor for flask.Flask\n\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._original_flask = None\n\n def _instrument(self):\n self._original_flask = flask.Flask\n flask.Flask = _InstrumentedFlask\n\n def _uninstrument(self):\n flask.Flask = self._original_flask\n", "path": "ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\n\"\"\"\nOpenTelemetry Base Instrumentor\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom logging import getLogger\n\n_LOG = getLogger(__name__)\n\n\nclass BaseInstrumentor(ABC):\n \"\"\"An ABC for instrumentors\"\"\"\n\n _instance = None\n _is_instrumented = False\n\n def __new__(cls):\n\n if cls._instance is None:\n cls._instance = object.__new__(cls)\n\n return cls._instance\n\n @abstractmethod\n def _instrument(self, **kwargs):\n \"\"\"Instrument\"\"\"\n\n @abstractmethod\n def _uninstrument(self, **kwargs):\n \"\"\"Uninstrument\"\"\"\n\n def instrument(self, **kwargs):\n \"\"\"Instrument\"\"\"\n\n if not self._is_instrumented:\n result = self._instrument(**kwargs)\n self._is_instrumented = True\n return result\n\n _LOG.warning(\"Attempting to instrument while already instrumented\")\n\n return None\n\n def uninstrument(self, **kwargs):\n \"\"\"Uninstrument\"\"\"\n\n if self._is_instrumented:\n result = self._uninstrument(**kwargs)\n self._is_instrumented = False\n return result\n\n _LOG.warning(\"Attempting to uninstrument while already uninstrumented\")\n\n return None\n\n\n__all__ = [\"BaseInstrumentor\"]\n", "path": "opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Note: This package is not named \"flask\" because of\n# https://github.com/PyCQA/pylint/issues/2648\n\n\"\"\"\nThis library builds on the OpenTelemetry WSGI middleware to track web requests\nin Flask applications. In addition to opentelemetry-ext-wsgi, it supports\nflask-specific features such as:\n\n* The Flask endpoint name is used as the Span name.\n* The ``http.route`` Span attribute is set so that one can see which URL rule\n matched a request.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.ext.flask import FlaskInstrumentor\n FlaskInstrumentor().instrument() # This needs to be executed before importing Flask\n from flask import Flask\n\n app = Flask(__name__)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\nAPI\n---\n\"\"\"\n\nimport logging\n\nimport flask\n\nimport opentelemetry.ext.wsgi as otel_wsgi\nfrom opentelemetry import context, propagators, trace\nfrom opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.ext.flask.version import __version__\nfrom opentelemetry.util import time_ns\n\nlogger = logging.getLogger(__name__)\n\n_ENVIRON_STARTTIME_KEY = \"opentelemetry-flask.starttime_key\"\n_ENVIRON_SPAN_KEY = \"opentelemetry-flask.span_key\"\n_ENVIRON_ACTIVATION_KEY = \"opentelemetry-flask.activation_key\"\n_ENVIRON_TOKEN = \"opentelemetry-flask.token\"\n\n\nclass _InstrumentedFlask(flask.Flask):\n def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n # Single use variable here to avoid recursion issues.\n wsgi = self.wsgi_app\n\n def wrapped_app(environ, start_response):\n # We want to measure the time for route matching, etc.\n # In theory, we could start the span here and use\n # update_name later but that API is \"highly discouraged\" so\n # we better avoid it.\n environ[_ENVIRON_STARTTIME_KEY] = time_ns()\n\n def _start_response(status, response_headers, *args, **kwargs):\n span = flask.request.environ.get(_ENVIRON_SPAN_KEY)\n if span:\n otel_wsgi.add_response_attributes(\n span, status, response_headers\n )\n else:\n logger.warning(\n \"Flask environ's OpenTelemetry span \"\n \"missing at _start_response(%s)\",\n status,\n )\n\n return start_response(\n status, response_headers, *args, **kwargs\n )\n\n return wsgi(environ, _start_response)\n\n self.wsgi_app = wrapped_app\n\n @self.before_request\n def _before_flask_request():\n environ = flask.request.environ\n span_name = (\n flask.request.endpoint\n or otel_wsgi.get_default_span_name(environ)\n )\n token = context.attach(\n propagators.extract(otel_wsgi.get_header_from_environ, environ)\n )\n\n tracer = trace.get_tracer(__name__, __version__)\n\n attributes = otel_wsgi.collect_request_attributes(environ)\n if flask.request.url_rule:\n # For 404 that result from no route found, etc, we\n # don't have a url_rule.\n attributes[\"http.route\"] = flask.request.url_rule.rule\n span = tracer.start_span(\n span_name,\n kind=trace.SpanKind.SERVER,\n attributes=attributes,\n start_time=environ.get(_ENVIRON_STARTTIME_KEY),\n )\n activation = tracer.use_span(span, end_on_exit=True)\n activation.__enter__()\n environ[_ENVIRON_ACTIVATION_KEY] = activation\n environ[_ENVIRON_SPAN_KEY] = span\n environ[_ENVIRON_TOKEN] = token\n\n @self.teardown_request\n def _teardown_flask_request(exc):\n activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)\n if not activation:\n logger.warning(\n \"Flask environ's OpenTelemetry activation missing\"\n \"at _teardown_flask_request(%s)\",\n exc,\n )\n return\n\n if exc is None:\n activation.__exit__(None, None, None)\n else:\n activation.__exit__(\n type(exc), exc, getattr(exc, \"__traceback__\", None)\n )\n context.detach(flask.request.environ.get(_ENVIRON_TOKEN))\n\n\nclass FlaskInstrumentor(BaseInstrumentor):\n \"\"\"A instrumentor for flask.Flask\n\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._original_flask = None\n\n def _instrument(self, **kwargs):\n self._original_flask = flask.Flask\n flask.Flask = _InstrumentedFlask\n\n def _uninstrument(self, **kwargs):\n flask.Flask = self._original_flask\n", "path": "ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py"}]} | 2,979 | 588 |
gh_patches_debug_17174 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2956 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add exption handling for hasattr because it could throw
Fixed https://github.com/mitmproxy/mitmproxy/issues/2849
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/command.py`
Content:
```
1 """
2 This module manges and invokes typed commands.
3 """
4 import inspect
5 import types
6 import io
7 import typing
8 import shlex
9 import textwrap
10 import functools
11 import sys
12
13 from mitmproxy import exceptions
14 import mitmproxy.types
15
16
17 def verify_arg_signature(f: typing.Callable, args: list, kwargs: dict) -> None:
18 sig = inspect.signature(f)
19 try:
20 sig.bind(*args, **kwargs)
21 except TypeError as v:
22 raise exceptions.CommandError("command argument mismatch: %s" % v.args[0])
23
24
25 def lexer(s):
26 # mypy mis-identifies shlex.shlex as abstract
27 lex = shlex.shlex(s, posix=True) # type: ignore
28 lex.wordchars += "."
29 lex.whitespace_split = True
30 lex.commenters = ''
31 return lex
32
33
34 def typename(t: type) -> str:
35 """
36 Translates a type to an explanatory string.
37 """
38 to = mitmproxy.types.CommandTypes.get(t, None)
39 if not to:
40 raise NotImplementedError(t)
41 return to.display
42
43
44 class Command:
45 def __init__(self, manager, path, func) -> None:
46 self.path = path
47 self.manager = manager
48 self.func = func
49 sig = inspect.signature(self.func)
50 self.help = None
51 if func.__doc__:
52 txt = func.__doc__.strip()
53 self.help = "\n".join(textwrap.wrap(txt))
54
55 self.has_positional = False
56 for i in sig.parameters.values():
57 # This is the kind for *args parameters
58 if i.kind == i.VAR_POSITIONAL:
59 self.has_positional = True
60 self.paramtypes = [v.annotation for v in sig.parameters.values()]
61 self.returntype = sig.return_annotation
62
63 def paramnames(self) -> typing.Sequence[str]:
64 v = [typename(i) for i in self.paramtypes]
65 if self.has_positional:
66 v[-1] = "*" + v[-1]
67 return v
68
69 def retname(self) -> str:
70 return typename(self.returntype) if self.returntype else ""
71
72 def signature_help(self) -> str:
73 params = " ".join(self.paramnames())
74 ret = self.retname()
75 if ret:
76 ret = " -> " + ret
77 return "%s %s%s" % (self.path, params, ret)
78
79 def prepare_args(self, args: typing.Sequence[str]) -> typing.List[typing.Any]:
80 verify_arg_signature(self.func, list(args), {})
81
82 remainder = [] # type: typing.Sequence[str]
83 if self.has_positional:
84 remainder = args[len(self.paramtypes) - 1:]
85 args = args[:len(self.paramtypes) - 1]
86
87 pargs = []
88 for arg, paramtype in zip(args, self.paramtypes):
89 pargs.append(parsearg(self.manager, arg, paramtype))
90 pargs.extend(remainder)
91 return pargs
92
93 def call(self, args: typing.Sequence[str]) -> typing.Any:
94 """
95 Call the command with a list of arguments. At this point, all
96 arguments are strings.
97 """
98 pargs = self.prepare_args(args)
99
100 with self.manager.master.handlecontext():
101 ret = self.func(*pargs)
102
103 if ret is None and self.returntype is None:
104 return
105 typ = mitmproxy.types.CommandTypes.get(self.returntype)
106 if not typ.is_valid(self.manager, typ, ret):
107 raise exceptions.CommandError(
108 "%s returned unexpected data - expected %s" % (
109 self.path, typ.display
110 )
111 )
112 return ret
113
114
115 ParseResult = typing.NamedTuple(
116 "ParseResult",
117 [
118 ("value", str),
119 ("type", typing.Type),
120 ("valid", bool),
121 ],
122 )
123
124
125 class CommandManager(mitmproxy.types._CommandBase):
126 def __init__(self, master):
127 self.master = master
128 self.commands = {} # type: typing.Dict[str, Command]
129
130 def collect_commands(self, addon):
131 for i in dir(addon):
132 if not i.startswith("__"):
133 o = getattr(addon, i)
134 if hasattr(o, "command_path"):
135 self.add(o.command_path, o)
136
137 def add(self, path: str, func: typing.Callable):
138 self.commands[path] = Command(self, path, func)
139
140 def parse_partial(
141 self,
142 cmdstr: str
143 ) -> typing.Tuple[typing.Sequence[ParseResult], typing.Sequence[str]]:
144 """
145 Parse a possibly partial command. Return a sequence of ParseResults and a sequence of remainder type help items.
146 """
147 buf = io.StringIO(cmdstr)
148 parts = [] # type: typing.List[str]
149 lex = lexer(buf)
150 while 1:
151 remainder = cmdstr[buf.tell():]
152 try:
153 t = lex.get_token()
154 except ValueError:
155 parts.append(remainder)
156 break
157 if not t:
158 break
159 parts.append(t)
160 if not parts:
161 parts = [""]
162 elif cmdstr.endswith(" "):
163 parts.append("")
164
165 parse = [] # type: typing.List[ParseResult]
166 params = [] # type: typing.List[type]
167 typ = None # type: typing.Type
168 for i in range(len(parts)):
169 if i == 0:
170 typ = mitmproxy.types.Cmd
171 if parts[i] in self.commands:
172 params.extend(self.commands[parts[i]].paramtypes)
173 elif params:
174 typ = params.pop(0)
175 if typ == mitmproxy.types.Cmd and params and params[0] == mitmproxy.types.Arg:
176 if parts[i] in self.commands:
177 params[:] = self.commands[parts[i]].paramtypes
178 else:
179 typ = mitmproxy.types.Unknown
180
181 to = mitmproxy.types.CommandTypes.get(typ, None)
182 valid = False
183 if to:
184 try:
185 to.parse(self, typ, parts[i])
186 except exceptions.TypeError:
187 valid = False
188 else:
189 valid = True
190
191 parse.append(
192 ParseResult(
193 value=parts[i],
194 type=typ,
195 valid=valid,
196 )
197 )
198
199 remhelp = [] # type: typing.List[str]
200 for x in params:
201 remt = mitmproxy.types.CommandTypes.get(x, None)
202 remhelp.append(remt.display)
203
204 return parse, remhelp
205
206 def call_args(self, path: str, args: typing.Sequence[str]) -> typing.Any:
207 """
208 Call a command using a list of string arguments. May raise CommandError.
209 """
210 if path not in self.commands:
211 raise exceptions.CommandError("Unknown command: %s" % path)
212 return self.commands[path].call(args)
213
214 def call(self, cmdstr: str):
215 """
216 Call a command using a string. May raise CommandError.
217 """
218 parts = list(lexer(cmdstr))
219 if not len(parts) >= 1:
220 raise exceptions.CommandError("Invalid command: %s" % cmdstr)
221 return self.call_args(parts[0], parts[1:])
222
223 def dump(self, out=sys.stdout) -> None:
224 cmds = list(self.commands.values())
225 cmds.sort(key=lambda x: x.signature_help())
226 for c in cmds:
227 for hl in (c.help or "").splitlines():
228 print("# " + hl, file=out)
229 print(c.signature_help(), file=out)
230 print(file=out)
231
232
233 def parsearg(manager: CommandManager, spec: str, argtype: type) -> typing.Any:
234 """
235 Convert a string to a argument to the appropriate type.
236 """
237 t = mitmproxy.types.CommandTypes.get(argtype, None)
238 if not t:
239 raise exceptions.CommandError("Unsupported argument type: %s" % argtype)
240 try:
241 return t.parse(manager, argtype, spec) # type: ignore
242 except exceptions.TypeError as e:
243 raise exceptions.CommandError from e
244
245
246 def command(path):
247 def decorator(function):
248 @functools.wraps(function)
249 def wrapper(*args, **kwargs):
250 verify_arg_signature(function, args, kwargs)
251 return function(*args, **kwargs)
252 wrapper.__dict__["command_path"] = path
253 return wrapper
254 return decorator
255
256
257 def argument(name, type):
258 """
259 Set the type of a command argument at runtime. This is useful for more
260 specific types such as mitmproxy.types.Choice, which we cannot annotate
261 directly as mypy does not like that.
262 """
263 def decorator(f: types.FunctionType) -> types.FunctionType:
264 assert name in f.__annotations__
265 f.__annotations__[name] = type
266 return f
267 return decorator
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/command.py b/mitmproxy/command.py
--- a/mitmproxy/command.py
+++ b/mitmproxy/command.py
@@ -1,5 +1,5 @@
"""
- This module manges and invokes typed commands.
+ This module manages and invokes typed commands.
"""
import inspect
import types
@@ -131,8 +131,13 @@
for i in dir(addon):
if not i.startswith("__"):
o = getattr(addon, i)
- if hasattr(o, "command_path"):
- self.add(o.command_path, o)
+ try:
+ is_command = hasattr(o, "command_path")
+ except Exception:
+ pass # hasattr may raise if o implements __getattr__.
+ else:
+ if is_command:
+ self.add(o.command_path, o)
def add(self, path: str, func: typing.Callable):
self.commands[path] = Command(self, path, func)
| {"golden_diff": "diff --git a/mitmproxy/command.py b/mitmproxy/command.py\n--- a/mitmproxy/command.py\n+++ b/mitmproxy/command.py\n@@ -1,5 +1,5 @@\n \"\"\"\n- This module manges and invokes typed commands.\n+ This module manages and invokes typed commands.\n \"\"\"\n import inspect\n import types\n@@ -131,8 +131,13 @@\n for i in dir(addon):\n if not i.startswith(\"__\"):\n o = getattr(addon, i)\n- if hasattr(o, \"command_path\"):\n- self.add(o.command_path, o)\n+ try:\n+ is_command = hasattr(o, \"command_path\")\n+ except Exception:\n+ pass # hasattr may raise if o implements __getattr__.\n+ else:\n+ if is_command:\n+ self.add(o.command_path, o)\n \n def add(self, path: str, func: typing.Callable):\n self.commands[path] = Command(self, path, func)\n", "issue": "Add exption handling for hasattr because it could throw\nFixed https://github.com/mitmproxy/mitmproxy/issues/2849\n", "before_files": [{"content": "\"\"\"\n This module manges and invokes typed commands.\n\"\"\"\nimport inspect\nimport types\nimport io\nimport typing\nimport shlex\nimport textwrap\nimport functools\nimport sys\n\nfrom mitmproxy import exceptions\nimport mitmproxy.types\n\n\ndef verify_arg_signature(f: typing.Callable, args: list, kwargs: dict) -> None:\n sig = inspect.signature(f)\n try:\n sig.bind(*args, **kwargs)\n except TypeError as v:\n raise exceptions.CommandError(\"command argument mismatch: %s\" % v.args[0])\n\n\ndef lexer(s):\n # mypy mis-identifies shlex.shlex as abstract\n lex = shlex.shlex(s, posix=True) # type: ignore\n lex.wordchars += \".\"\n lex.whitespace_split = True\n lex.commenters = ''\n return lex\n\n\ndef typename(t: type) -> str:\n \"\"\"\n Translates a type to an explanatory string.\n \"\"\"\n to = mitmproxy.types.CommandTypes.get(t, None)\n if not to:\n raise NotImplementedError(t)\n return to.display\n\n\nclass Command:\n def __init__(self, manager, path, func) -> None:\n self.path = path\n self.manager = manager\n self.func = func\n sig = inspect.signature(self.func)\n self.help = None\n if func.__doc__:\n txt = func.__doc__.strip()\n self.help = \"\\n\".join(textwrap.wrap(txt))\n\n self.has_positional = False\n for i in sig.parameters.values():\n # This is the kind for *args parameters\n if i.kind == i.VAR_POSITIONAL:\n self.has_positional = True\n self.paramtypes = [v.annotation for v in sig.parameters.values()]\n self.returntype = sig.return_annotation\n\n def paramnames(self) -> typing.Sequence[str]:\n v = [typename(i) for i in self.paramtypes]\n if self.has_positional:\n v[-1] = \"*\" + v[-1]\n return v\n\n def retname(self) -> str:\n return typename(self.returntype) if self.returntype else \"\"\n\n def signature_help(self) -> str:\n params = \" \".join(self.paramnames())\n ret = self.retname()\n if ret:\n ret = \" -> \" + ret\n return \"%s %s%s\" % (self.path, params, ret)\n\n def prepare_args(self, args: typing.Sequence[str]) -> typing.List[typing.Any]:\n verify_arg_signature(self.func, list(args), {})\n\n remainder = [] # type: typing.Sequence[str]\n if self.has_positional:\n remainder = args[len(self.paramtypes) - 1:]\n args = args[:len(self.paramtypes) - 1]\n\n pargs = []\n for arg, paramtype in zip(args, self.paramtypes):\n pargs.append(parsearg(self.manager, arg, paramtype))\n pargs.extend(remainder)\n return pargs\n\n def call(self, args: typing.Sequence[str]) -> typing.Any:\n \"\"\"\n Call the command with a list of arguments. At this point, all\n arguments are strings.\n \"\"\"\n pargs = self.prepare_args(args)\n\n with self.manager.master.handlecontext():\n ret = self.func(*pargs)\n\n if ret is None and self.returntype is None:\n return\n typ = mitmproxy.types.CommandTypes.get(self.returntype)\n if not typ.is_valid(self.manager, typ, ret):\n raise exceptions.CommandError(\n \"%s returned unexpected data - expected %s\" % (\n self.path, typ.display\n )\n )\n return ret\n\n\nParseResult = typing.NamedTuple(\n \"ParseResult\",\n [\n (\"value\", str),\n (\"type\", typing.Type),\n (\"valid\", bool),\n ],\n)\n\n\nclass CommandManager(mitmproxy.types._CommandBase):\n def __init__(self, master):\n self.master = master\n self.commands = {} # type: typing.Dict[str, Command]\n\n def collect_commands(self, addon):\n for i in dir(addon):\n if not i.startswith(\"__\"):\n o = getattr(addon, i)\n if hasattr(o, \"command_path\"):\n self.add(o.command_path, o)\n\n def add(self, path: str, func: typing.Callable):\n self.commands[path] = Command(self, path, func)\n\n def parse_partial(\n self,\n cmdstr: str\n ) -> typing.Tuple[typing.Sequence[ParseResult], typing.Sequence[str]]:\n \"\"\"\n Parse a possibly partial command. Return a sequence of ParseResults and a sequence of remainder type help items.\n \"\"\"\n buf = io.StringIO(cmdstr)\n parts = [] # type: typing.List[str]\n lex = lexer(buf)\n while 1:\n remainder = cmdstr[buf.tell():]\n try:\n t = lex.get_token()\n except ValueError:\n parts.append(remainder)\n break\n if not t:\n break\n parts.append(t)\n if not parts:\n parts = [\"\"]\n elif cmdstr.endswith(\" \"):\n parts.append(\"\")\n\n parse = [] # type: typing.List[ParseResult]\n params = [] # type: typing.List[type]\n typ = None # type: typing.Type\n for i in range(len(parts)):\n if i == 0:\n typ = mitmproxy.types.Cmd\n if parts[i] in self.commands:\n params.extend(self.commands[parts[i]].paramtypes)\n elif params:\n typ = params.pop(0)\n if typ == mitmproxy.types.Cmd and params and params[0] == mitmproxy.types.Arg:\n if parts[i] in self.commands:\n params[:] = self.commands[parts[i]].paramtypes\n else:\n typ = mitmproxy.types.Unknown\n\n to = mitmproxy.types.CommandTypes.get(typ, None)\n valid = False\n if to:\n try:\n to.parse(self, typ, parts[i])\n except exceptions.TypeError:\n valid = False\n else:\n valid = True\n\n parse.append(\n ParseResult(\n value=parts[i],\n type=typ,\n valid=valid,\n )\n )\n\n remhelp = [] # type: typing.List[str]\n for x in params:\n remt = mitmproxy.types.CommandTypes.get(x, None)\n remhelp.append(remt.display)\n\n return parse, remhelp\n\n def call_args(self, path: str, args: typing.Sequence[str]) -> typing.Any:\n \"\"\"\n Call a command using a list of string arguments. May raise CommandError.\n \"\"\"\n if path not in self.commands:\n raise exceptions.CommandError(\"Unknown command: %s\" % path)\n return self.commands[path].call(args)\n\n def call(self, cmdstr: str):\n \"\"\"\n Call a command using a string. May raise CommandError.\n \"\"\"\n parts = list(lexer(cmdstr))\n if not len(parts) >= 1:\n raise exceptions.CommandError(\"Invalid command: %s\" % cmdstr)\n return self.call_args(parts[0], parts[1:])\n\n def dump(self, out=sys.stdout) -> None:\n cmds = list(self.commands.values())\n cmds.sort(key=lambda x: x.signature_help())\n for c in cmds:\n for hl in (c.help or \"\").splitlines():\n print(\"# \" + hl, file=out)\n print(c.signature_help(), file=out)\n print(file=out)\n\n\ndef parsearg(manager: CommandManager, spec: str, argtype: type) -> typing.Any:\n \"\"\"\n Convert a string to a argument to the appropriate type.\n \"\"\"\n t = mitmproxy.types.CommandTypes.get(argtype, None)\n if not t:\n raise exceptions.CommandError(\"Unsupported argument type: %s\" % argtype)\n try:\n return t.parse(manager, argtype, spec) # type: ignore\n except exceptions.TypeError as e:\n raise exceptions.CommandError from e\n\n\ndef command(path):\n def decorator(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n verify_arg_signature(function, args, kwargs)\n return function(*args, **kwargs)\n wrapper.__dict__[\"command_path\"] = path\n return wrapper\n return decorator\n\n\ndef argument(name, type):\n \"\"\"\n Set the type of a command argument at runtime. This is useful for more\n specific types such as mitmproxy.types.Choice, which we cannot annotate\n directly as mypy does not like that.\n \"\"\"\n def decorator(f: types.FunctionType) -> types.FunctionType:\n assert name in f.__annotations__\n f.__annotations__[name] = type\n return f\n return decorator\n", "path": "mitmproxy/command.py"}], "after_files": [{"content": "\"\"\"\n This module manages and invokes typed commands.\n\"\"\"\nimport inspect\nimport types\nimport io\nimport typing\nimport shlex\nimport textwrap\nimport functools\nimport sys\n\nfrom mitmproxy import exceptions\nimport mitmproxy.types\n\n\ndef verify_arg_signature(f: typing.Callable, args: list, kwargs: dict) -> None:\n sig = inspect.signature(f)\n try:\n sig.bind(*args, **kwargs)\n except TypeError as v:\n raise exceptions.CommandError(\"command argument mismatch: %s\" % v.args[0])\n\n\ndef lexer(s):\n # mypy mis-identifies shlex.shlex as abstract\n lex = shlex.shlex(s, posix=True) # type: ignore\n lex.wordchars += \".\"\n lex.whitespace_split = True\n lex.commenters = ''\n return lex\n\n\ndef typename(t: type) -> str:\n \"\"\"\n Translates a type to an explanatory string.\n \"\"\"\n to = mitmproxy.types.CommandTypes.get(t, None)\n if not to:\n raise NotImplementedError(t)\n return to.display\n\n\nclass Command:\n def __init__(self, manager, path, func) -> None:\n self.path = path\n self.manager = manager\n self.func = func\n sig = inspect.signature(self.func)\n self.help = None\n if func.__doc__:\n txt = func.__doc__.strip()\n self.help = \"\\n\".join(textwrap.wrap(txt))\n\n self.has_positional = False\n for i in sig.parameters.values():\n # This is the kind for *args parameters\n if i.kind == i.VAR_POSITIONAL:\n self.has_positional = True\n self.paramtypes = [v.annotation for v in sig.parameters.values()]\n self.returntype = sig.return_annotation\n\n def paramnames(self) -> typing.Sequence[str]:\n v = [typename(i) for i in self.paramtypes]\n if self.has_positional:\n v[-1] = \"*\" + v[-1]\n return v\n\n def retname(self) -> str:\n return typename(self.returntype) if self.returntype else \"\"\n\n def signature_help(self) -> str:\n params = \" \".join(self.paramnames())\n ret = self.retname()\n if ret:\n ret = \" -> \" + ret\n return \"%s %s%s\" % (self.path, params, ret)\n\n def prepare_args(self, args: typing.Sequence[str]) -> typing.List[typing.Any]:\n verify_arg_signature(self.func, list(args), {})\n\n remainder = [] # type: typing.Sequence[str]\n if self.has_positional:\n remainder = args[len(self.paramtypes) - 1:]\n args = args[:len(self.paramtypes) - 1]\n\n pargs = []\n for arg, paramtype in zip(args, self.paramtypes):\n pargs.append(parsearg(self.manager, arg, paramtype))\n pargs.extend(remainder)\n return pargs\n\n def call(self, args: typing.Sequence[str]) -> typing.Any:\n \"\"\"\n Call the command with a list of arguments. At this point, all\n arguments are strings.\n \"\"\"\n pargs = self.prepare_args(args)\n\n with self.manager.master.handlecontext():\n ret = self.func(*pargs)\n\n if ret is None and self.returntype is None:\n return\n typ = mitmproxy.types.CommandTypes.get(self.returntype)\n if not typ.is_valid(self.manager, typ, ret):\n raise exceptions.CommandError(\n \"%s returned unexpected data - expected %s\" % (\n self.path, typ.display\n )\n )\n return ret\n\n\nParseResult = typing.NamedTuple(\n \"ParseResult\",\n [\n (\"value\", str),\n (\"type\", typing.Type),\n (\"valid\", bool),\n ],\n)\n\n\nclass CommandManager(mitmproxy.types._CommandBase):\n def __init__(self, master):\n self.master = master\n self.commands = {} # type: typing.Dict[str, Command]\n\n def collect_commands(self, addon):\n for i in dir(addon):\n if not i.startswith(\"__\"):\n o = getattr(addon, i)\n try:\n is_command = hasattr(o, \"command_path\")\n except Exception:\n pass # hasattr may raise if o implements __getattr__.\n else:\n if is_command:\n self.add(o.command_path, o)\n\n def add(self, path: str, func: typing.Callable):\n self.commands[path] = Command(self, path, func)\n\n def parse_partial(\n self,\n cmdstr: str\n ) -> typing.Tuple[typing.Sequence[ParseResult], typing.Sequence[str]]:\n \"\"\"\n Parse a possibly partial command. Return a sequence of ParseResults and a sequence of remainder type help items.\n \"\"\"\n buf = io.StringIO(cmdstr)\n parts = [] # type: typing.List[str]\n lex = lexer(buf)\n while 1:\n remainder = cmdstr[buf.tell():]\n try:\n t = lex.get_token()\n except ValueError:\n parts.append(remainder)\n break\n if not t:\n break\n parts.append(t)\n if not parts:\n parts = [\"\"]\n elif cmdstr.endswith(\" \"):\n parts.append(\"\")\n\n parse = [] # type: typing.List[ParseResult]\n params = [] # type: typing.List[type]\n typ = None # type: typing.Type\n for i in range(len(parts)):\n if i == 0:\n typ = mitmproxy.types.Cmd\n if parts[i] in self.commands:\n params.extend(self.commands[parts[i]].paramtypes)\n elif params:\n typ = params.pop(0)\n if typ == mitmproxy.types.Cmd and params and params[0] == mitmproxy.types.Arg:\n if parts[i] in self.commands:\n params[:] = self.commands[parts[i]].paramtypes\n else:\n typ = mitmproxy.types.Unknown\n\n to = mitmproxy.types.CommandTypes.get(typ, None)\n valid = False\n if to:\n try:\n to.parse(self, typ, parts[i])\n except exceptions.TypeError:\n valid = False\n else:\n valid = True\n\n parse.append(\n ParseResult(\n value=parts[i],\n type=typ,\n valid=valid,\n )\n )\n\n remhelp = [] # type: typing.List[str]\n for x in params:\n remt = mitmproxy.types.CommandTypes.get(x, None)\n remhelp.append(remt.display)\n\n return parse, remhelp\n\n def call_args(self, path: str, args: typing.Sequence[str]) -> typing.Any:\n \"\"\"\n Call a command using a list of string arguments. May raise CommandError.\n \"\"\"\n if path not in self.commands:\n raise exceptions.CommandError(\"Unknown command: %s\" % path)\n return self.commands[path].call(args)\n\n def call(self, cmdstr: str):\n \"\"\"\n Call a command using a string. May raise CommandError.\n \"\"\"\n parts = list(lexer(cmdstr))\n if not len(parts) >= 1:\n raise exceptions.CommandError(\"Invalid command: %s\" % cmdstr)\n return self.call_args(parts[0], parts[1:])\n\n def dump(self, out=sys.stdout) -> None:\n cmds = list(self.commands.values())\n cmds.sort(key=lambda x: x.signature_help())\n for c in cmds:\n for hl in (c.help or \"\").splitlines():\n print(\"# \" + hl, file=out)\n print(c.signature_help(), file=out)\n print(file=out)\n\n\ndef parsearg(manager: CommandManager, spec: str, argtype: type) -> typing.Any:\n \"\"\"\n Convert a string to a argument to the appropriate type.\n \"\"\"\n t = mitmproxy.types.CommandTypes.get(argtype, None)\n if not t:\n raise exceptions.CommandError(\"Unsupported argument type: %s\" % argtype)\n try:\n return t.parse(manager, argtype, spec) # type: ignore\n except exceptions.TypeError as e:\n raise exceptions.CommandError from e\n\n\ndef command(path):\n def decorator(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n verify_arg_signature(function, args, kwargs)\n return function(*args, **kwargs)\n wrapper.__dict__[\"command_path\"] = path\n return wrapper\n return decorator\n\n\ndef argument(name, type):\n \"\"\"\n Set the type of a command argument at runtime. This is useful for more\n specific types such as mitmproxy.types.Choice, which we cannot annotate\n directly as mypy does not like that.\n \"\"\"\n def decorator(f: types.FunctionType) -> types.FunctionType:\n assert name in f.__annotations__\n f.__annotations__[name] = type\n return f\n return decorator\n", "path": "mitmproxy/command.py"}]} | 2,879 | 215 |
gh_patches_debug_3543 | rasdani/github-patches | git_diff | beeware__toga-1634 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Source installs no longer working
#1614 made some changes to the packaging of modules to support the release package workflow.
The wheels generated from this process appear to work fine; however, source installs don't appear to be working. I've had problems on both macOS and Android.
**To Reproduce**
Steps to reproduce the behavior:
1. `briefcase run` or `briefcase run android` on Tutorial 0.
**Expected behavior**
App should start.
**Environment:**
- Operating System: macOS
- Python version: 3.10
- Software versions:
- Briefcase: 0.3.11
- Toga: 96881f093
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/web/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import re
3
4 from setuptools import setup
5
6 # Version handline needs to be programatic because
7 # we can't import toga_web to compute the version;
8 # and to support versioned subpackage dependencies
9 with open('src/toga_web/__init__.py', encoding='utf8') as version_file:
10 version_match = re.search(
11 r"^__version__ = ['\"]([^'\"]*)['\"]",
12 version_file.read(),
13 re.M
14 )
15 if version_match:
16 version = version_match.group(1)
17 else:
18 raise RuntimeError("Unable to find version string.")
19
20 setup(
21 version=version,
22 install_requires=[
23 # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name
24 # toga-core is ambigous when on the package hasn't been published to
25 # PyPI. As a workaround, don't specify the dependency, and manually
26 # ensure that toga-core is installed.
27 # 'toga-core==%s' % version,
28 ],
29 )
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/web/setup.py b/src/web/setup.py
--- a/src/web/setup.py
+++ b/src/web/setup.py
@@ -20,10 +20,6 @@
setup(
version=version,
install_requires=[
- # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name
- # toga-core is ambigous when on the package hasn't been published to
- # PyPI. As a workaround, don't specify the dependency, and manually
- # ensure that toga-core is installed.
- # 'toga-core==%s' % version,
+ 'toga-core==%s' % version,
],
)
| {"golden_diff": "diff --git a/src/web/setup.py b/src/web/setup.py\n--- a/src/web/setup.py\n+++ b/src/web/setup.py\n@@ -20,10 +20,6 @@\n setup(\n version=version,\n install_requires=[\n- # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name\n- # toga-core is ambigous when on the package hasn't been published to\n- # PyPI. As a workaround, don't specify the dependency, and manually\n- # ensure that toga-core is installed.\n- # 'toga-core==%s' % version,\n+ 'toga-core==%s' % version,\n ],\n )\n", "issue": "Source installs no longer working\n#1614 made some changes to the packaging of modules to support the release package workflow.\r\n\r\nThe wheels generated from this process appear to work fine; however, source installs don't appear to be working. I've had problems on both macOS and Android.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. `briefcase run` or `briefcase run android` on Tutorial 0.\r\n\r\n**Expected behavior**\r\n\r\nApp should start.\r\n\r\n**Environment:**\r\n - Operating System: macOS\r\n - Python version: 3.10\r\n - Software versions:\r\n - Briefcase: 0.3.11\r\n - Toga: 96881f093\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_web to compute the version;\n# and to support versioned subpackage dependencies\nwith open('src/toga_web/__init__.py', encoding='utf8') as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file.read(),\n re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name\n # toga-core is ambigous when on the package hasn't been published to\n # PyPI. As a workaround, don't specify the dependency, and manually\n # ensure that toga-core is installed.\n # 'toga-core==%s' % version,\n ],\n)\n", "path": "src/web/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_web to compute the version;\n# and to support versioned subpackage dependencies\nwith open('src/toga_web/__init__.py', encoding='utf8') as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file.read(),\n re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n 'toga-core==%s' % version,\n ],\n)\n", "path": "src/web/setup.py"}]} | 694 | 159 |
gh_patches_debug_2990 | rasdani/github-patches | git_diff | aio-libs-abandoned__aioredis-py-535 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a BUSYGROUP reply error
The XGROUP CREATE command can return a BUSYGROUP error when a group already exists: https://redis.io/commands/xgroup
I think the `ReplyError` subclass for matching it would look like this:
```py
class BusyGroupError(ReplyError):
MATCH_REPLY = "BUSYGROUP Consumer Group name already exists"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aioredis/errors.py`
Content:
```
1 __all__ = [
2 'RedisError',
3 'ProtocolError',
4 'ReplyError',
5 'MaxClientsError',
6 'AuthError',
7 'PipelineError',
8 'MultiExecError',
9 'WatchVariableError',
10 'ChannelClosedError',
11 'ConnectionClosedError',
12 'ConnectionForcedCloseError',
13 'PoolClosedError',
14 'MasterNotFoundError',
15 'SlaveNotFoundError',
16 'ReadOnlyError',
17 ]
18
19
20 class RedisError(Exception):
21 """Base exception class for aioredis exceptions."""
22
23
24 class ProtocolError(RedisError):
25 """Raised when protocol error occurs."""
26
27
28 class ReplyError(RedisError):
29 """Raised for redis error replies (-ERR)."""
30
31 MATCH_REPLY = None
32
33 def __new__(cls, msg, *args):
34 for klass in cls.__subclasses__():
35 if msg and klass.MATCH_REPLY and msg.startswith(klass.MATCH_REPLY):
36 return klass(msg, *args)
37 return super().__new__(cls, msg, *args)
38
39
40 class MaxClientsError(ReplyError):
41 """Raised for redis server when the maximum number of client has been
42 reached."""
43
44 MATCH_REPLY = "ERR max number of clients reached"
45
46
47 class AuthError(ReplyError):
48 """Raised when authentication errors occurs."""
49
50 MATCH_REPLY = ("NOAUTH ", "ERR invalid password")
51
52
53 class PipelineError(RedisError):
54 """Raised if command within pipeline raised error."""
55
56 def __init__(self, errors):
57 super().__init__('{} errors:'.format(self.__class__.__name__), errors)
58
59
60 class MultiExecError(PipelineError):
61 """Raised if command within MULTI/EXEC block caused error."""
62
63
64 class WatchVariableError(MultiExecError):
65 """Raised if watched variable changed (EXEC returns None)."""
66
67
68 class ChannelClosedError(RedisError):
69 """Raised when Pub/Sub channel is unsubscribed and messages queue is empty.
70 """
71
72
73 class ReadOnlyError(RedisError):
74 """Raised from slave when read-only mode is enabled"""
75
76
77 class MasterNotFoundError(RedisError):
78 """Raised for sentinel master not found error."""
79
80
81 class SlaveNotFoundError(RedisError):
82 """Raised for sentinel slave not found error."""
83
84
85 class MasterReplyError(RedisError):
86 """Raised by sentinel client for master error replies."""
87
88
89 class SlaveReplyError(RedisError):
90 """Raised by sentinel client for slave error replies."""
91
92
93 class ConnectionClosedError(RedisError):
94 """Raised if connection to server was closed."""
95
96
97 class ConnectionForcedCloseError(ConnectionClosedError):
98 """Raised if connection was closed with .close() method."""
99
100
101 class PoolClosedError(RedisError):
102 """Raised if pool is closed."""
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aioredis/errors.py b/aioredis/errors.py
--- a/aioredis/errors.py
+++ b/aioredis/errors.py
@@ -50,6 +50,12 @@
MATCH_REPLY = ("NOAUTH ", "ERR invalid password")
+class BusyGroupError(ReplyError):
+ """Raised if Consumer Group name already exists."""
+
+ MATCH_REPLY = "BUSYGROUP Consumer Group name already exists"
+
+
class PipelineError(RedisError):
"""Raised if command within pipeline raised error."""
| {"golden_diff": "diff --git a/aioredis/errors.py b/aioredis/errors.py\n--- a/aioredis/errors.py\n+++ b/aioredis/errors.py\n@@ -50,6 +50,12 @@\n MATCH_REPLY = (\"NOAUTH \", \"ERR invalid password\")\n \n \n+class BusyGroupError(ReplyError):\n+ \"\"\"Raised if Consumer Group name already exists.\"\"\"\n+\n+ MATCH_REPLY = \"BUSYGROUP Consumer Group name already exists\"\n+\n+\n class PipelineError(RedisError):\n \"\"\"Raised if command within pipeline raised error.\"\"\"\n", "issue": "Add a BUSYGROUP reply error\nThe XGROUP CREATE command can return a BUSYGROUP error when a group already exists: https://redis.io/commands/xgroup\r\n\r\nI think the `ReplyError` subclass for matching it would look like this:\r\n\r\n```py\r\nclass BusyGroupError(ReplyError):\r\n MATCH_REPLY = \"BUSYGROUP Consumer Group name already exists\"\r\n```\n", "before_files": [{"content": "__all__ = [\n 'RedisError',\n 'ProtocolError',\n 'ReplyError',\n 'MaxClientsError',\n 'AuthError',\n 'PipelineError',\n 'MultiExecError',\n 'WatchVariableError',\n 'ChannelClosedError',\n 'ConnectionClosedError',\n 'ConnectionForcedCloseError',\n 'PoolClosedError',\n 'MasterNotFoundError',\n 'SlaveNotFoundError',\n 'ReadOnlyError',\n ]\n\n\nclass RedisError(Exception):\n \"\"\"Base exception class for aioredis exceptions.\"\"\"\n\n\nclass ProtocolError(RedisError):\n \"\"\"Raised when protocol error occurs.\"\"\"\n\n\nclass ReplyError(RedisError):\n \"\"\"Raised for redis error replies (-ERR).\"\"\"\n\n MATCH_REPLY = None\n\n def __new__(cls, msg, *args):\n for klass in cls.__subclasses__():\n if msg and klass.MATCH_REPLY and msg.startswith(klass.MATCH_REPLY):\n return klass(msg, *args)\n return super().__new__(cls, msg, *args)\n\n\nclass MaxClientsError(ReplyError):\n \"\"\"Raised for redis server when the maximum number of client has been\n reached.\"\"\"\n\n MATCH_REPLY = \"ERR max number of clients reached\"\n\n\nclass AuthError(ReplyError):\n \"\"\"Raised when authentication errors occurs.\"\"\"\n\n MATCH_REPLY = (\"NOAUTH \", \"ERR invalid password\")\n\n\nclass PipelineError(RedisError):\n \"\"\"Raised if command within pipeline raised error.\"\"\"\n\n def __init__(self, errors):\n super().__init__('{} errors:'.format(self.__class__.__name__), errors)\n\n\nclass MultiExecError(PipelineError):\n \"\"\"Raised if command within MULTI/EXEC block caused error.\"\"\"\n\n\nclass WatchVariableError(MultiExecError):\n \"\"\"Raised if watched variable changed (EXEC returns None).\"\"\"\n\n\nclass ChannelClosedError(RedisError):\n \"\"\"Raised when Pub/Sub channel is unsubscribed and messages queue is empty.\n \"\"\"\n\n\nclass ReadOnlyError(RedisError):\n \"\"\"Raised from slave when read-only mode is enabled\"\"\"\n\n\nclass MasterNotFoundError(RedisError):\n \"\"\"Raised for sentinel master not found error.\"\"\"\n\n\nclass SlaveNotFoundError(RedisError):\n \"\"\"Raised for sentinel slave not found error.\"\"\"\n\n\nclass MasterReplyError(RedisError):\n \"\"\"Raised by sentinel client for master error replies.\"\"\"\n\n\nclass SlaveReplyError(RedisError):\n \"\"\"Raised by sentinel client for slave error replies.\"\"\"\n\n\nclass ConnectionClosedError(RedisError):\n \"\"\"Raised if connection to server was closed.\"\"\"\n\n\nclass ConnectionForcedCloseError(ConnectionClosedError):\n \"\"\"Raised if connection was closed with .close() method.\"\"\"\n\n\nclass PoolClosedError(RedisError):\n \"\"\"Raised if pool is closed.\"\"\"\n", "path": "aioredis/errors.py"}], "after_files": [{"content": "__all__ = [\n 'RedisError',\n 'ProtocolError',\n 'ReplyError',\n 'MaxClientsError',\n 'AuthError',\n 'PipelineError',\n 'MultiExecError',\n 'WatchVariableError',\n 'ChannelClosedError',\n 'ConnectionClosedError',\n 'ConnectionForcedCloseError',\n 'PoolClosedError',\n 'MasterNotFoundError',\n 'SlaveNotFoundError',\n 'ReadOnlyError',\n ]\n\n\nclass RedisError(Exception):\n \"\"\"Base exception class for aioredis exceptions.\"\"\"\n\n\nclass ProtocolError(RedisError):\n \"\"\"Raised when protocol error occurs.\"\"\"\n\n\nclass ReplyError(RedisError):\n \"\"\"Raised for redis error replies (-ERR).\"\"\"\n\n MATCH_REPLY = None\n\n def __new__(cls, msg, *args):\n for klass in cls.__subclasses__():\n if msg and klass.MATCH_REPLY and msg.startswith(klass.MATCH_REPLY):\n return klass(msg, *args)\n return super().__new__(cls, msg, *args)\n\n\nclass MaxClientsError(ReplyError):\n \"\"\"Raised for redis server when the maximum number of client has been\n reached.\"\"\"\n\n MATCH_REPLY = \"ERR max number of clients reached\"\n\n\nclass AuthError(ReplyError):\n \"\"\"Raised when authentication errors occurs.\"\"\"\n\n MATCH_REPLY = (\"NOAUTH \", \"ERR invalid password\")\n\n\nclass BusyGroupError(ReplyError):\n \"\"\"Raised if Consumer Group name already exists.\"\"\"\n\n MATCH_REPLY = \"BUSYGROUP Consumer Group name already exists\"\n\n\nclass PipelineError(RedisError):\n \"\"\"Raised if command within pipeline raised error.\"\"\"\n\n def __init__(self, errors):\n super().__init__('{} errors:'.format(self.__class__.__name__), errors)\n\n\nclass MultiExecError(PipelineError):\n \"\"\"Raised if command within MULTI/EXEC block caused error.\"\"\"\n\n\nclass WatchVariableError(MultiExecError):\n \"\"\"Raised if watched variable changed (EXEC returns None).\"\"\"\n\n\nclass ChannelClosedError(RedisError):\n \"\"\"Raised when Pub/Sub channel is unsubscribed and messages queue is empty.\n \"\"\"\n\n\nclass ReadOnlyError(RedisError):\n \"\"\"Raised from slave when read-only mode is enabled\"\"\"\n\n\nclass MasterNotFoundError(RedisError):\n \"\"\"Raised for sentinel master not found error.\"\"\"\n\n\nclass SlaveNotFoundError(RedisError):\n \"\"\"Raised for sentinel slave not found error.\"\"\"\n\n\nclass MasterReplyError(RedisError):\n \"\"\"Raised by sentinel client for master error replies.\"\"\"\n\n\nclass SlaveReplyError(RedisError):\n \"\"\"Raised by sentinel client for slave error replies.\"\"\"\n\n\nclass ConnectionClosedError(RedisError):\n \"\"\"Raised if connection to server was closed.\"\"\"\n\n\nclass ConnectionForcedCloseError(ConnectionClosedError):\n \"\"\"Raised if connection was closed with .close() method.\"\"\"\n\n\nclass PoolClosedError(RedisError):\n \"\"\"Raised if pool is closed.\"\"\"\n", "path": "aioredis/errors.py"}]} | 1,107 | 118 |
gh_patches_debug_5305 | rasdani/github-patches | git_diff | conan-io__conan-2793 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tools.os_info.detect_windows_subsystem() should not raise
To help us debug your issue please explain:
*conanfile.py*
```
...
def build(self):
...
if tools.os_info.detect_windows_subsystem():
make = "make"
use_win_bash = True
```
commnad:
```
$ conan create . danitmb/testing
...
ERROR: TBB/4.4.4@danimtb/testing: Error in build() method, line 42
if tools.os_info.detect_windows_subsystem():
ConanException: bash is not in the path
```
Using `cygwin_installer/2.9.0@bincrafters/stable` and `msys2_installer/20161025@bincrafters/stable` bash is in path and `tools.os_info.detect_windows_subsystem()` behaves well.
So it would be better to not raise in case you want to check if you are in a windows subsystem and just return `None`.
- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).
- [x] I've specified the Conan version, operating system version and any tool that can be relevant: conan 1.2.3
- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/tools/oss.py`
Content:
```
1 import multiprocessing
2 import platform
3 import subprocess
4 import sys
5
6 import os
7
8 from conans.client.tools.env import environment_append
9 from conans.errors import ConanException
10 from conans.model.version import Version
11 from conans.util.log import logger
12 from conans.client.tools import which
13
14 _global_output = None
15
16
17 def args_to_string(args):
18 if not args:
19 return ""
20 if sys.platform == 'win32':
21 return subprocess.list2cmdline(args)
22 else:
23 return " ".join("'" + arg.replace("'", r"'\''") + "'" for arg in args)
24
25
26 def cpu_count():
27 try:
28 env_cpu_count = os.getenv("CONAN_CPU_COUNT", None)
29 return int(env_cpu_count) if env_cpu_count else multiprocessing.cpu_count()
30 except NotImplementedError:
31 _global_output.warn("multiprocessing.cpu_count() not implemented. Defaulting to 1 cpu")
32 return 1 # Safe guess
33
34
35 def detected_architecture():
36 # FIXME: Very weak check but not very common to run conan in other architectures
37 machine = platform.machine()
38 if "64" in machine:
39 return "x86_64"
40 elif "86" in machine:
41 return "x86"
42 elif "armv8" in machine:
43 return "armv8"
44 elif "armv7" in machine:
45 return "armv7"
46
47 return None
48
49 # DETECT OS, VERSION AND DISTRIBUTIONS
50
51
52 class OSInfo(object):
53 """ Usage:
54 (os_info.is_linux) # True/False
55 (os_info.is_windows) # True/False
56 (os_info.is_macos) # True/False
57 (os_info.is_freebsd) # True/False
58 (os_info.is_solaris) # True/False
59
60 (os_info.linux_distro) # debian, ubuntu, fedora, centos...
61
62 (os_info.os_version) # 5.1
63 (os_info.os_version_name) # Windows 7, El Capitan
64
65 if os_info.os_version > "10.1":
66 pass
67 if os_info.os_version == "10.1.0":
68 pass
69 """
70
71 def __init__(self):
72 self.os_version = None
73 self.os_version_name = None
74 self.is_linux = platform.system() == "Linux"
75 self.linux_distro = None
76 self.is_windows = platform.system() == "Windows"
77 self.is_macos = platform.system() == "Darwin"
78 self.is_freebsd = platform.system() == "FreeBSD"
79 self.is_solaris = platform.system() == "SunOS"
80
81 if self.is_linux:
82 import distro
83 self.linux_distro = distro.id()
84 self.os_version = Version(distro.version())
85 version_name = distro.codename()
86 self.os_version_name = version_name if version_name != "n/a" else ""
87 if not self.os_version_name and self.linux_distro == "debian":
88 self.os_version_name = self.get_debian_version_name(self.os_version)
89 elif self.is_windows:
90 self.os_version = self.get_win_os_version()
91 self.os_version_name = self.get_win_version_name(self.os_version)
92 elif self.is_macos:
93 self.os_version = Version(platform.mac_ver()[0])
94 self.os_version_name = self.get_osx_version_name(self.os_version)
95 elif self.is_freebsd:
96 self.os_version = self.get_freebsd_version()
97 self.os_version_name = "FreeBSD %s" % self.os_version
98 elif self.is_solaris:
99 self.os_version = Version(platform.release())
100 self.os_version_name = self.get_solaris_version_name(self.os_version)
101
102 @property
103 def with_apt(self):
104 return self.is_linux and self.linux_distro in \
105 ("debian", "ubuntu", "knoppix", "linuxmint", "raspbian")
106
107 @property
108 def with_yum(self):
109 return self.is_linux and self.linux_distro in \
110 ("centos", "redhat", "fedora", "pidora", "scientific",
111 "xenserver", "amazon", "oracle", "rhel")
112
113 @property
114 def with_pacman(self):
115 if self.is_linux:
116 return self.linux_distro == "arch"
117 elif self.is_windows and which('uname.exe'):
118 uname = subprocess.check_output(['uname.exe', '-s']).decode()
119 return uname.startswith('MSYS_NT') and which('pacman.exe')
120 return False
121
122 @property
123 def with_zypper(self):
124 return self.is_linux and self.linux_distro in \
125 ("opensuse", "sles")
126
127 @staticmethod
128 def get_win_os_version():
129 """
130 Get's the OS major and minor versions. Returns a tuple of
131 (OS_MAJOR, OS_MINOR).
132 """
133 import ctypes
134
135 class _OSVERSIONINFOEXW(ctypes.Structure):
136 _fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),
137 ('dwMajorVersion', ctypes.c_ulong),
138 ('dwMinorVersion', ctypes.c_ulong),
139 ('dwBuildNumber', ctypes.c_ulong),
140 ('dwPlatformId', ctypes.c_ulong),
141 ('szCSDVersion', ctypes.c_wchar * 128),
142 ('wServicePackMajor', ctypes.c_ushort),
143 ('wServicePackMinor', ctypes.c_ushort),
144 ('wSuiteMask', ctypes.c_ushort),
145 ('wProductType', ctypes.c_byte),
146 ('wReserved', ctypes.c_byte)]
147
148 os_version = _OSVERSIONINFOEXW()
149 os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
150 retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
151 if retcode != 0:
152 return None
153
154 return Version("%d.%d" % (os_version.dwMajorVersion, os_version.dwMinorVersion))
155
156 @staticmethod
157 def get_debian_version_name(version):
158 if not version:
159 return None
160 elif version.major() == "8.Y.Z":
161 return "jessie"
162 elif version.major() == "7.Y.Z":
163 return "wheezy"
164 elif version.major() == "6.Y.Z":
165 return "squeeze"
166 elif version.major() == "5.Y.Z":
167 return "lenny"
168 elif version.major() == "4.Y.Z":
169 return "etch"
170 elif version.minor() == "3.1.Z":
171 return "sarge"
172 elif version.minor() == "3.0.Z":
173 return "woody"
174
175 @staticmethod
176 def get_win_version_name(version):
177 if not version:
178 return None
179 elif version.major() == "5.Y.Z":
180 return "Windows XP"
181 elif version.minor() == "6.0.Z":
182 return "Windows Vista"
183 elif version.minor() == "6.1.Z":
184 return "Windows 7"
185 elif version.minor() == "6.2.Z":
186 return "Windows 8"
187 elif version.minor() == "6.3.Z":
188 return "Windows 8.1"
189 elif version.minor() == "10.0.Z":
190 return "Windows 10"
191
192 @staticmethod
193 def get_osx_version_name(version):
194 if not version:
195 return None
196 elif version.minor() == "10.13.Z":
197 return "High Sierra"
198 elif version.minor() == "10.12.Z":
199 return "Sierra"
200 elif version.minor() == "10.11.Z":
201 return "El Capitan"
202 elif version.minor() == "10.10.Z":
203 return "Yosemite"
204 elif version.minor() == "10.9.Z":
205 return "Mavericks"
206 elif version.minor() == "10.8.Z":
207 return "Mountain Lion"
208 elif version.minor() == "10.7.Z":
209 return "Lion"
210 elif version.minor() == "10.6.Z":
211 return "Snow Leopard"
212 elif version.minor() == "10.5.Z":
213 return "Leopard"
214 elif version.minor() == "10.4.Z":
215 return "Tiger"
216 elif version.minor() == "10.3.Z":
217 return "Panther"
218 elif version.minor() == "10.2.Z":
219 return "Jaguar"
220 elif version.minor() == "10.1.Z":
221 return "Puma"
222 elif version.minor() == "10.0.Z":
223 return "Cheetha"
224
225 @staticmethod
226 def get_freebsd_version():
227 return platform.release().split("-")[0]
228
229 @staticmethod
230 def get_solaris_version_name(version):
231 if not version:
232 return None
233 elif version.minor() == "5.10":
234 return "Solaris 10"
235 elif version.minor() == "5.11":
236 return "Solaris 11"
237
238 @staticmethod
239 def bash_path():
240 if os.getenv("CONAN_BASH_PATH"):
241 return os.getenv("CONAN_BASH_PATH")
242 return which("bash")
243
244 @staticmethod
245 def uname(options=None):
246 options = " %s" % options if options else ""
247 if platform.system() != "Windows":
248 raise ConanException("Command only for Windows operating system")
249 custom_bash_path = OSInfo.bash_path()
250 if not custom_bash_path:
251 raise ConanException("bash is not in the path")
252
253 command = '"%s" -c "uname%s"' % (custom_bash_path, options)
254 try:
255 # the uname executable is many times located in the same folder as bash.exe
256 with environment_append({"PATH": [os.path.dirname(custom_bash_path)]}):
257 ret = subprocess.check_output(command, shell=True, ).decode().strip().lower()
258 return ret
259 except Exception:
260 return None
261
262 @staticmethod
263 def detect_windows_subsystem():
264 from conans.client.tools.win import CYGWIN, MSYS2, MSYS, WSL
265 output = OSInfo.uname()
266 if not output:
267 return None
268 if "cygwin" in output:
269 return CYGWIN
270 elif "msys" in output or "mingw" in output:
271 output = OSInfo.uname("-or")
272 if output.startswith("2"):
273 return MSYS2
274 elif output.startswith("1"):
275 return MSYS
276 else:
277 return None
278 elif "linux" in output:
279 return WSL
280 else:
281 return None
282
283
284 def cross_building(settings, self_os=None, self_arch=None):
285
286 ret = get_cross_building_settings(settings, self_os, self_arch)
287 build_os, build_arch, host_os, host_arch = ret
288
289 if host_os is not None and (build_os != host_os):
290 return True
291 if host_arch is not None and (build_arch != host_arch):
292 return True
293
294 return False
295
296
297 def get_cross_building_settings(settings, self_os=None, self_arch=None):
298 build_os = self_os or settings.get_safe("os_build") or \
299 {"Darwin": "Macos"}.get(platform.system(), platform.system())
300 build_arch = self_arch or settings.get_safe("arch_build") or detected_architecture()
301 host_os = settings.get_safe("os")
302 host_arch = settings.get_safe("arch")
303
304 return build_os, build_arch, host_os, host_arch
305
306
307 try:
308 os_info = OSInfo()
309 except Exception as exc:
310 logger.error(exc)
311 _global_output.error("Error detecting os_info")
312
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/tools/oss.py b/conans/client/tools/oss.py
--- a/conans/client/tools/oss.py
+++ b/conans/client/tools/oss.py
@@ -262,7 +262,10 @@
@staticmethod
def detect_windows_subsystem():
from conans.client.tools.win import CYGWIN, MSYS2, MSYS, WSL
- output = OSInfo.uname()
+ try:
+ output = OSInfo.uname()
+ except ConanException:
+ return None
if not output:
return None
if "cygwin" in output:
| {"golden_diff": "diff --git a/conans/client/tools/oss.py b/conans/client/tools/oss.py\n--- a/conans/client/tools/oss.py\n+++ b/conans/client/tools/oss.py\n@@ -262,7 +262,10 @@\n @staticmethod\n def detect_windows_subsystem():\n from conans.client.tools.win import CYGWIN, MSYS2, MSYS, WSL\n- output = OSInfo.uname()\n+ try:\n+ output = OSInfo.uname()\n+ except ConanException:\n+ return None\n if not output:\n return None\n if \"cygwin\" in output:\n", "issue": "tools.os_info.detect_windows_subsystem() should not raise\nTo help us debug your issue please explain:\r\n\r\n*conanfile.py*\r\n```\r\n...\r\n def build(self):\r\n ...\r\n if tools.os_info.detect_windows_subsystem():\r\n make = \"make\"\r\n use_win_bash = True\r\n```\r\n\r\ncommnad:\r\n```\r\n$ conan create . danitmb/testing\r\n...\r\nERROR: TBB/4.4.4@danimtb/testing: Error in build() method, line 42\r\n if tools.os_info.detect_windows_subsystem():\r\n ConanException: bash is not in the path\r\n```\r\n\r\nUsing `cygwin_installer/2.9.0@bincrafters/stable` and `msys2_installer/20161025@bincrafters/stable` bash is in path and `tools.os_info.detect_windows_subsystem()` behaves well.\r\n\r\nSo it would be better to not raise in case you want to check if you are in a windows subsystem and just return `None`.\r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://raw.githubusercontent.com/conan-io/conan/develop/.github/CONTRIBUTING.md).\r\n- [x] I've specified the Conan version, operating system version and any tool that can be relevant: conan 1.2.3\r\n- [x] I've explained the steps to reproduce the error or the motivation/use case of the question/suggestion.\r\n\r\n\n", "before_files": [{"content": "import multiprocessing\nimport platform\nimport subprocess\nimport sys\n\nimport os\n\nfrom conans.client.tools.env import environment_append\nfrom conans.errors import ConanException\nfrom conans.model.version import Version\nfrom conans.util.log import logger\nfrom conans.client.tools import which\n\n_global_output = None\n\n\ndef args_to_string(args):\n if not args:\n return \"\"\n if sys.platform == 'win32':\n return subprocess.list2cmdline(args)\n else:\n return \" \".join(\"'\" + arg.replace(\"'\", r\"'\\''\") + \"'\" for arg in args)\n\n\ndef cpu_count():\n try:\n env_cpu_count = os.getenv(\"CONAN_CPU_COUNT\", None)\n return int(env_cpu_count) if env_cpu_count else multiprocessing.cpu_count()\n except NotImplementedError:\n _global_output.warn(\"multiprocessing.cpu_count() not implemented. Defaulting to 1 cpu\")\n return 1 # Safe guess\n\n\ndef detected_architecture():\n # FIXME: Very weak check but not very common to run conan in other architectures\n machine = platform.machine()\n if \"64\" in machine:\n return \"x86_64\"\n elif \"86\" in machine:\n return \"x86\"\n elif \"armv8\" in machine:\n return \"armv8\"\n elif \"armv7\" in machine:\n return \"armv7\"\n\n return None\n\n# DETECT OS, VERSION AND DISTRIBUTIONS\n\n\nclass OSInfo(object):\n \"\"\" Usage:\n (os_info.is_linux) # True/False\n (os_info.is_windows) # True/False\n (os_info.is_macos) # True/False\n (os_info.is_freebsd) # True/False\n (os_info.is_solaris) # True/False\n\n (os_info.linux_distro) # debian, ubuntu, fedora, centos...\n\n (os_info.os_version) # 5.1\n (os_info.os_version_name) # Windows 7, El Capitan\n\n if os_info.os_version > \"10.1\":\n pass\n if os_info.os_version == \"10.1.0\":\n pass\n \"\"\"\n\n def __init__(self):\n self.os_version = None\n self.os_version_name = None\n self.is_linux = platform.system() == \"Linux\"\n self.linux_distro = None\n self.is_windows = platform.system() == \"Windows\"\n self.is_macos = platform.system() == \"Darwin\"\n self.is_freebsd = platform.system() == \"FreeBSD\"\n self.is_solaris = platform.system() == \"SunOS\"\n\n if self.is_linux:\n import distro\n self.linux_distro = distro.id()\n self.os_version = Version(distro.version())\n version_name = distro.codename()\n self.os_version_name = version_name if version_name != \"n/a\" else \"\"\n if not self.os_version_name and self.linux_distro == \"debian\":\n self.os_version_name = self.get_debian_version_name(self.os_version)\n elif self.is_windows:\n self.os_version = self.get_win_os_version()\n self.os_version_name = self.get_win_version_name(self.os_version)\n elif self.is_macos:\n self.os_version = Version(platform.mac_ver()[0])\n self.os_version_name = self.get_osx_version_name(self.os_version)\n elif self.is_freebsd:\n self.os_version = self.get_freebsd_version()\n self.os_version_name = \"FreeBSD %s\" % self.os_version\n elif self.is_solaris:\n self.os_version = Version(platform.release())\n self.os_version_name = self.get_solaris_version_name(self.os_version)\n\n @property\n def with_apt(self):\n return self.is_linux and self.linux_distro in \\\n (\"debian\", \"ubuntu\", \"knoppix\", \"linuxmint\", \"raspbian\")\n\n @property\n def with_yum(self):\n return self.is_linux and self.linux_distro in \\\n (\"centos\", \"redhat\", \"fedora\", \"pidora\", \"scientific\",\n \"xenserver\", \"amazon\", \"oracle\", \"rhel\")\n\n @property\n def with_pacman(self):\n if self.is_linux:\n return self.linux_distro == \"arch\"\n elif self.is_windows and which('uname.exe'):\n uname = subprocess.check_output(['uname.exe', '-s']).decode()\n return uname.startswith('MSYS_NT') and which('pacman.exe')\n return False\n\n @property\n def with_zypper(self):\n return self.is_linux and self.linux_distro in \\\n (\"opensuse\", \"sles\")\n\n @staticmethod\n def get_win_os_version():\n \"\"\"\n Get's the OS major and minor versions. Returns a tuple of\n (OS_MAJOR, OS_MINOR).\n \"\"\"\n import ctypes\n\n class _OSVERSIONINFOEXW(ctypes.Structure):\n _fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),\n ('dwMajorVersion', ctypes.c_ulong),\n ('dwMinorVersion', ctypes.c_ulong),\n ('dwBuildNumber', ctypes.c_ulong),\n ('dwPlatformId', ctypes.c_ulong),\n ('szCSDVersion', ctypes.c_wchar * 128),\n ('wServicePackMajor', ctypes.c_ushort),\n ('wServicePackMinor', ctypes.c_ushort),\n ('wSuiteMask', ctypes.c_ushort),\n ('wProductType', ctypes.c_byte),\n ('wReserved', ctypes.c_byte)]\n\n os_version = _OSVERSIONINFOEXW()\n os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)\n retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))\n if retcode != 0:\n return None\n\n return Version(\"%d.%d\" % (os_version.dwMajorVersion, os_version.dwMinorVersion))\n\n @staticmethod\n def get_debian_version_name(version):\n if not version:\n return None\n elif version.major() == \"8.Y.Z\":\n return \"jessie\"\n elif version.major() == \"7.Y.Z\":\n return \"wheezy\"\n elif version.major() == \"6.Y.Z\":\n return \"squeeze\"\n elif version.major() == \"5.Y.Z\":\n return \"lenny\"\n elif version.major() == \"4.Y.Z\":\n return \"etch\"\n elif version.minor() == \"3.1.Z\":\n return \"sarge\"\n elif version.minor() == \"3.0.Z\":\n return \"woody\"\n\n @staticmethod\n def get_win_version_name(version):\n if not version:\n return None\n elif version.major() == \"5.Y.Z\":\n return \"Windows XP\"\n elif version.minor() == \"6.0.Z\":\n return \"Windows Vista\"\n elif version.minor() == \"6.1.Z\":\n return \"Windows 7\"\n elif version.minor() == \"6.2.Z\":\n return \"Windows 8\"\n elif version.minor() == \"6.3.Z\":\n return \"Windows 8.1\"\n elif version.minor() == \"10.0.Z\":\n return \"Windows 10\"\n\n @staticmethod\n def get_osx_version_name(version):\n if not version:\n return None\n elif version.minor() == \"10.13.Z\":\n return \"High Sierra\"\n elif version.minor() == \"10.12.Z\":\n return \"Sierra\"\n elif version.minor() == \"10.11.Z\":\n return \"El Capitan\"\n elif version.minor() == \"10.10.Z\":\n return \"Yosemite\"\n elif version.minor() == \"10.9.Z\":\n return \"Mavericks\"\n elif version.minor() == \"10.8.Z\":\n return \"Mountain Lion\"\n elif version.minor() == \"10.7.Z\":\n return \"Lion\"\n elif version.minor() == \"10.6.Z\":\n return \"Snow Leopard\"\n elif version.minor() == \"10.5.Z\":\n return \"Leopard\"\n elif version.minor() == \"10.4.Z\":\n return \"Tiger\"\n elif version.minor() == \"10.3.Z\":\n return \"Panther\"\n elif version.minor() == \"10.2.Z\":\n return \"Jaguar\"\n elif version.minor() == \"10.1.Z\":\n return \"Puma\"\n elif version.minor() == \"10.0.Z\":\n return \"Cheetha\"\n\n @staticmethod\n def get_freebsd_version():\n return platform.release().split(\"-\")[0]\n\n @staticmethod\n def get_solaris_version_name(version):\n if not version:\n return None\n elif version.minor() == \"5.10\":\n return \"Solaris 10\"\n elif version.minor() == \"5.11\":\n return \"Solaris 11\"\n\n @staticmethod\n def bash_path():\n if os.getenv(\"CONAN_BASH_PATH\"):\n return os.getenv(\"CONAN_BASH_PATH\")\n return which(\"bash\")\n\n @staticmethod\n def uname(options=None):\n options = \" %s\" % options if options else \"\"\n if platform.system() != \"Windows\":\n raise ConanException(\"Command only for Windows operating system\")\n custom_bash_path = OSInfo.bash_path()\n if not custom_bash_path:\n raise ConanException(\"bash is not in the path\")\n\n command = '\"%s\" -c \"uname%s\"' % (custom_bash_path, options)\n try:\n # the uname executable is many times located in the same folder as bash.exe\n with environment_append({\"PATH\": [os.path.dirname(custom_bash_path)]}):\n ret = subprocess.check_output(command, shell=True, ).decode().strip().lower()\n return ret\n except Exception:\n return None\n\n @staticmethod\n def detect_windows_subsystem():\n from conans.client.tools.win import CYGWIN, MSYS2, MSYS, WSL\n output = OSInfo.uname()\n if not output:\n return None\n if \"cygwin\" in output:\n return CYGWIN\n elif \"msys\" in output or \"mingw\" in output:\n output = OSInfo.uname(\"-or\")\n if output.startswith(\"2\"):\n return MSYS2\n elif output.startswith(\"1\"):\n return MSYS\n else:\n return None\n elif \"linux\" in output:\n return WSL\n else:\n return None\n\n\ndef cross_building(settings, self_os=None, self_arch=None):\n\n ret = get_cross_building_settings(settings, self_os, self_arch)\n build_os, build_arch, host_os, host_arch = ret\n\n if host_os is not None and (build_os != host_os):\n return True\n if host_arch is not None and (build_arch != host_arch):\n return True\n\n return False\n\n\ndef get_cross_building_settings(settings, self_os=None, self_arch=None):\n build_os = self_os or settings.get_safe(\"os_build\") or \\\n {\"Darwin\": \"Macos\"}.get(platform.system(), platform.system())\n build_arch = self_arch or settings.get_safe(\"arch_build\") or detected_architecture()\n host_os = settings.get_safe(\"os\")\n host_arch = settings.get_safe(\"arch\")\n\n return build_os, build_arch, host_os, host_arch\n\n\ntry:\n os_info = OSInfo()\nexcept Exception as exc:\n logger.error(exc)\n _global_output.error(\"Error detecting os_info\")\n", "path": "conans/client/tools/oss.py"}], "after_files": [{"content": "import multiprocessing\nimport platform\nimport subprocess\nimport sys\n\nimport os\n\nfrom conans.client.tools.env import environment_append\nfrom conans.errors import ConanException\nfrom conans.model.version import Version\nfrom conans.util.log import logger\nfrom conans.client.tools import which\n\n_global_output = None\n\n\ndef args_to_string(args):\n if not args:\n return \"\"\n if sys.platform == 'win32':\n return subprocess.list2cmdline(args)\n else:\n return \" \".join(\"'\" + arg.replace(\"'\", r\"'\\''\") + \"'\" for arg in args)\n\n\ndef cpu_count():\n try:\n env_cpu_count = os.getenv(\"CONAN_CPU_COUNT\", None)\n return int(env_cpu_count) if env_cpu_count else multiprocessing.cpu_count()\n except NotImplementedError:\n _global_output.warn(\"multiprocessing.cpu_count() not implemented. Defaulting to 1 cpu\")\n return 1 # Safe guess\n\n\ndef detected_architecture():\n # FIXME: Very weak check but not very common to run conan in other architectures\n machine = platform.machine()\n if \"64\" in machine:\n return \"x86_64\"\n elif \"86\" in machine:\n return \"x86\"\n elif \"armv8\" in machine:\n return \"armv8\"\n elif \"armv7\" in machine:\n return \"armv7\"\n\n return None\n\n# DETECT OS, VERSION AND DISTRIBUTIONS\n\n\nclass OSInfo(object):\n \"\"\" Usage:\n (os_info.is_linux) # True/False\n (os_info.is_windows) # True/False\n (os_info.is_macos) # True/False\n (os_info.is_freebsd) # True/False\n (os_info.is_solaris) # True/False\n\n (os_info.linux_distro) # debian, ubuntu, fedora, centos...\n\n (os_info.os_version) # 5.1\n (os_info.os_version_name) # Windows 7, El Capitan\n\n if os_info.os_version > \"10.1\":\n pass\n if os_info.os_version == \"10.1.0\":\n pass\n \"\"\"\n\n def __init__(self):\n self.os_version = None\n self.os_version_name = None\n self.is_linux = platform.system() == \"Linux\"\n self.linux_distro = None\n self.is_windows = platform.system() == \"Windows\"\n self.is_macos = platform.system() == \"Darwin\"\n self.is_freebsd = platform.system() == \"FreeBSD\"\n self.is_solaris = platform.system() == \"SunOS\"\n\n if self.is_linux:\n import distro\n self.linux_distro = distro.id()\n self.os_version = Version(distro.version())\n version_name = distro.codename()\n self.os_version_name = version_name if version_name != \"n/a\" else \"\"\n if not self.os_version_name and self.linux_distro == \"debian\":\n self.os_version_name = self.get_debian_version_name(self.os_version)\n elif self.is_windows:\n self.os_version = self.get_win_os_version()\n self.os_version_name = self.get_win_version_name(self.os_version)\n elif self.is_macos:\n self.os_version = Version(platform.mac_ver()[0])\n self.os_version_name = self.get_osx_version_name(self.os_version)\n elif self.is_freebsd:\n self.os_version = self.get_freebsd_version()\n self.os_version_name = \"FreeBSD %s\" % self.os_version\n elif self.is_solaris:\n self.os_version = Version(platform.release())\n self.os_version_name = self.get_solaris_version_name(self.os_version)\n\n @property\n def with_apt(self):\n return self.is_linux and self.linux_distro in \\\n (\"debian\", \"ubuntu\", \"knoppix\", \"linuxmint\", \"raspbian\")\n\n @property\n def with_yum(self):\n return self.is_linux and self.linux_distro in \\\n (\"centos\", \"redhat\", \"fedora\", \"pidora\", \"scientific\",\n \"xenserver\", \"amazon\", \"oracle\", \"rhel\")\n\n @property\n def with_pacman(self):\n if self.is_linux:\n return self.linux_distro == \"arch\"\n elif self.is_windows and which('uname.exe'):\n uname = subprocess.check_output(['uname.exe', '-s']).decode()\n return uname.startswith('MSYS_NT') and which('pacman.exe')\n return False\n\n @property\n def with_zypper(self):\n return self.is_linux and self.linux_distro in \\\n (\"opensuse\", \"sles\")\n\n @staticmethod\n def get_win_os_version():\n \"\"\"\n Get's the OS major and minor versions. Returns a tuple of\n (OS_MAJOR, OS_MINOR).\n \"\"\"\n import ctypes\n\n class _OSVERSIONINFOEXW(ctypes.Structure):\n _fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),\n ('dwMajorVersion', ctypes.c_ulong),\n ('dwMinorVersion', ctypes.c_ulong),\n ('dwBuildNumber', ctypes.c_ulong),\n ('dwPlatformId', ctypes.c_ulong),\n ('szCSDVersion', ctypes.c_wchar * 128),\n ('wServicePackMajor', ctypes.c_ushort),\n ('wServicePackMinor', ctypes.c_ushort),\n ('wSuiteMask', ctypes.c_ushort),\n ('wProductType', ctypes.c_byte),\n ('wReserved', ctypes.c_byte)]\n\n os_version = _OSVERSIONINFOEXW()\n os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)\n retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))\n if retcode != 0:\n return None\n\n return Version(\"%d.%d\" % (os_version.dwMajorVersion, os_version.dwMinorVersion))\n\n @staticmethod\n def get_debian_version_name(version):\n if not version:\n return None\n elif version.major() == \"8.Y.Z\":\n return \"jessie\"\n elif version.major() == \"7.Y.Z\":\n return \"wheezy\"\n elif version.major() == \"6.Y.Z\":\n return \"squeeze\"\n elif version.major() == \"5.Y.Z\":\n return \"lenny\"\n elif version.major() == \"4.Y.Z\":\n return \"etch\"\n elif version.minor() == \"3.1.Z\":\n return \"sarge\"\n elif version.minor() == \"3.0.Z\":\n return \"woody\"\n\n @staticmethod\n def get_win_version_name(version):\n if not version:\n return None\n elif version.major() == \"5.Y.Z\":\n return \"Windows XP\"\n elif version.minor() == \"6.0.Z\":\n return \"Windows Vista\"\n elif version.minor() == \"6.1.Z\":\n return \"Windows 7\"\n elif version.minor() == \"6.2.Z\":\n return \"Windows 8\"\n elif version.minor() == \"6.3.Z\":\n return \"Windows 8.1\"\n elif version.minor() == \"10.0.Z\":\n return \"Windows 10\"\n\n @staticmethod\n def get_osx_version_name(version):\n if not version:\n return None\n elif version.minor() == \"10.13.Z\":\n return \"High Sierra\"\n elif version.minor() == \"10.12.Z\":\n return \"Sierra\"\n elif version.minor() == \"10.11.Z\":\n return \"El Capitan\"\n elif version.minor() == \"10.10.Z\":\n return \"Yosemite\"\n elif version.minor() == \"10.9.Z\":\n return \"Mavericks\"\n elif version.minor() == \"10.8.Z\":\n return \"Mountain Lion\"\n elif version.minor() == \"10.7.Z\":\n return \"Lion\"\n elif version.minor() == \"10.6.Z\":\n return \"Snow Leopard\"\n elif version.minor() == \"10.5.Z\":\n return \"Leopard\"\n elif version.minor() == \"10.4.Z\":\n return \"Tiger\"\n elif version.minor() == \"10.3.Z\":\n return \"Panther\"\n elif version.minor() == \"10.2.Z\":\n return \"Jaguar\"\n elif version.minor() == \"10.1.Z\":\n return \"Puma\"\n elif version.minor() == \"10.0.Z\":\n return \"Cheetha\"\n\n @staticmethod\n def get_freebsd_version():\n return platform.release().split(\"-\")[0]\n\n @staticmethod\n def get_solaris_version_name(version):\n if not version:\n return None\n elif version.minor() == \"5.10\":\n return \"Solaris 10\"\n elif version.minor() == \"5.11\":\n return \"Solaris 11\"\n\n @staticmethod\n def bash_path():\n if os.getenv(\"CONAN_BASH_PATH\"):\n return os.getenv(\"CONAN_BASH_PATH\")\n return which(\"bash\")\n\n @staticmethod\n def uname(options=None):\n options = \" %s\" % options if options else \"\"\n if platform.system() != \"Windows\":\n raise ConanException(\"Command only for Windows operating system\")\n custom_bash_path = OSInfo.bash_path()\n if not custom_bash_path:\n raise ConanException(\"bash is not in the path\")\n\n command = '\"%s\" -c \"uname%s\"' % (custom_bash_path, options)\n try:\n # the uname executable is many times located in the same folder as bash.exe\n with environment_append({\"PATH\": [os.path.dirname(custom_bash_path)]}):\n ret = subprocess.check_output(command, shell=True, ).decode().strip().lower()\n return ret\n except Exception:\n return None\n\n @staticmethod\n def detect_windows_subsystem():\n from conans.client.tools.win import CYGWIN, MSYS2, MSYS, WSL\n try:\n output = OSInfo.uname()\n except ConanException:\n return None\n if not output:\n return None\n if \"cygwin\" in output:\n return CYGWIN\n elif \"msys\" in output or \"mingw\" in output:\n output = OSInfo.uname(\"-or\")\n if output.startswith(\"2\"):\n return MSYS2\n elif output.startswith(\"1\"):\n return MSYS\n else:\n return None\n elif \"linux\" in output:\n return WSL\n else:\n return None\n\n\ndef cross_building(settings, self_os=None, self_arch=None):\n\n ret = get_cross_building_settings(settings, self_os, self_arch)\n build_os, build_arch, host_os, host_arch = ret\n\n if host_os is not None and (build_os != host_os):\n return True\n if host_arch is not None and (build_arch != host_arch):\n return True\n\n return False\n\n\ndef get_cross_building_settings(settings, self_os=None, self_arch=None):\n build_os = self_os or settings.get_safe(\"os_build\") or \\\n {\"Darwin\": \"Macos\"}.get(platform.system(), platform.system())\n build_arch = self_arch or settings.get_safe(\"arch_build\") or detected_architecture()\n host_os = settings.get_safe(\"os\")\n host_arch = settings.get_safe(\"arch\")\n\n return build_os, build_arch, host_os, host_arch\n\n\ntry:\n os_info = OSInfo()\nexcept Exception as exc:\n logger.error(exc)\n _global_output.error(\"Error detecting os_info\")\n", "path": "conans/client/tools/oss.py"}]} | 3,961 | 140 |
gh_patches_debug_1898 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1813 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OpenTelemetry distro as a default distro for OpenTelemetry Instrumentation
The `opentelemetry-instrumentation` auto instrumentation doesn't work without installing `opentelemetry-distro` as the components initialisation is done in distro package. How does a regular user know about this and shouldn't openetemetry distro be the default and can give an option to let user use others?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/getting_started/otlpcollector_example.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # otcollector.py
16 import time
17
18 from opentelemetry import trace
19 from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
20 OTLPSpanExporter,
21 )
22 from opentelemetry.sdk.trace import TracerProvider
23 from opentelemetry.sdk.trace.export import BatchSpanProcessor
24
25 span_exporter = OTLPSpanExporter(
26 # optional
27 # endpoint:="myCollectorURL:4317",
28 # credentials=ChannelCredentials(credentials),
29 # headers=(("metadata", "metadata")),
30 )
31 tracer_provider = TracerProvider()
32 trace.set_tracer_provider(tracer_provider)
33 span_processor = BatchSpanProcessor(span_exporter)
34 tracer_provider.add_span_processor(span_processor)
35
36 # Configure the tracer to use the collector exporter
37 tracer = trace.get_tracer_provider().get_tracer(__name__)
38
39 with tracer.start_as_current_span("foo"):
40 print("Hello world!")
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/getting_started/otlpcollector_example.py b/docs/getting_started/otlpcollector_example.py
--- a/docs/getting_started/otlpcollector_example.py
+++ b/docs/getting_started/otlpcollector_example.py
@@ -24,7 +24,7 @@
span_exporter = OTLPSpanExporter(
# optional
- # endpoint:="myCollectorURL:4317",
+ # endpoint="myCollectorURL:4317",
# credentials=ChannelCredentials(credentials),
# headers=(("metadata", "metadata")),
)
| {"golden_diff": "diff --git a/docs/getting_started/otlpcollector_example.py b/docs/getting_started/otlpcollector_example.py\n--- a/docs/getting_started/otlpcollector_example.py\n+++ b/docs/getting_started/otlpcollector_example.py\n@@ -24,7 +24,7 @@\n \n span_exporter = OTLPSpanExporter(\n # optional\n- # endpoint:=\"myCollectorURL:4317\",\n+ # endpoint=\"myCollectorURL:4317\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n )\n", "issue": "OpenTelemetry distro as a default distro for OpenTelemetry Instrumentation\nThe `opentelemetry-instrumentation` auto instrumentation doesn't work without installing `opentelemetry-distro` as the components initialisation is done in distro package. How does a regular user know about this and shouldn't openetemetry distro be the default and can give an option to let user use others? \n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# otcollector.py\nimport time\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (\n OTLPSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nspan_exporter = OTLPSpanExporter(\n # optional\n # endpoint:=\"myCollectorURL:4317\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n)\ntracer_provider = TracerProvider()\ntrace.set_tracer_provider(tracer_provider)\nspan_processor = BatchSpanProcessor(span_exporter)\ntracer_provider.add_span_processor(span_processor)\n\n# Configure the tracer to use the collector exporter\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n", "path": "docs/getting_started/otlpcollector_example.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# otcollector.py\nimport time\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (\n OTLPSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nspan_exporter = OTLPSpanExporter(\n # optional\n # endpoint=\"myCollectorURL:4317\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n)\ntracer_provider = TracerProvider()\ntrace.set_tracer_provider(tracer_provider)\nspan_processor = BatchSpanProcessor(span_exporter)\ntracer_provider.add_span_processor(span_processor)\n\n# Configure the tracer to use the collector exporter\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n", "path": "docs/getting_started/otlpcollector_example.py"}]} | 735 | 127 |
gh_patches_debug_22463 | rasdani/github-patches | git_diff | feast-dev__feast-3514 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feast ui does not work on proxy subpath
## Expected Behavior
Feast UI should work when it is served behind a proxy, on a subpath e.g. `/feast-ui`
## Current Behavior
Parts of the feast UI works behind a subpath, but not entirely (nothing is displayed, just the feast logo with a "404" text - refer to screenshot). No requests in the network tab of the web browser are hitting 404.

## Steps to reproduce
Serve feast UI as you would e.g. `feature_store.serve_ui()`, optionally passing in the `root_path` parameter (it does not help).
Set up an nginx pod with the following configuration (i.e. the nginx pod should have `/etc/nginx/conf.d/default.conf` with the following contents - `dummy_project` is the project name, and `http://feast-ui-service:8080` is where the feast UI can be accessed from your nginx pod / container):
```
server {
listen 80 default_server;
location = /feast-ui/ {
rewrite (.*) /feast-ui/p/dummy_project permanent;
}
location /feast-ui/ {
proxy_pass http://feast-ui-service:8080/;
}
location / {
proxy_pass http://feast-ui-service:8080/;
}
}
```
This configuration works on localhost when nginx can listen on the root path `/`. However, note that the URL after all the redirects is wrong (it does not have the prefix).
- The first block is required to force a redirect to the `/p/{project_name}`. Without this, the page will display 404 as above.
- The second block is required to strip away `/feast-ui` so the UI app does not receive that path that it is not aware of
- The third block is a trick to make this setup work in a local environment, because the app itself will redirect the user back to `/p/dummy_project` (without the prefix), which we then proxy into the feast UI app. However, in an actual environment, this setup does not work, because when the url does not contain the `/feast-ui` prefix, the ingress will not route it to the nginx pod, so the nginx pod cannot proxy the connection to the right place.
Ideally, if the feast ui app is capable of being served on a subpath, only the second `location` block should be required in the nginx configuration. The first and third `location` blocks are workarounds.
### Specifications
- Version: 0.29.0
## Possible Solution
The app should redirect to relative and not absolute paths
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/ui_server.py`
Content:
```
1 import json
2 import threading
3 from typing import Callable, Optional
4
5 import pkg_resources
6 import uvicorn
7 from fastapi import FastAPI, Response
8 from fastapi.middleware.cors import CORSMiddleware
9 from fastapi.staticfiles import StaticFiles
10
11 import feast
12
13
14 def get_app(
15 store: "feast.FeatureStore",
16 get_registry_dump: Callable,
17 project_id: str,
18 registry_ttl_secs: int,
19 host: str,
20 port: int,
21 ):
22 app = FastAPI()
23
24 app.add_middleware(
25 CORSMiddleware,
26 allow_origins=["*"],
27 allow_credentials=True,
28 allow_methods=["*"],
29 allow_headers=["*"],
30 )
31
32 # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down
33 registry_proto = None
34 shutting_down = False
35 active_timer: Optional[threading.Timer] = None
36
37 def async_refresh():
38 store.refresh_registry()
39 nonlocal registry_proto
40 registry_proto = store.registry.proto()
41 if shutting_down:
42 return
43 nonlocal active_timer
44 active_timer = threading.Timer(registry_ttl_secs, async_refresh)
45 active_timer.start()
46
47 @app.on_event("shutdown")
48 def shutdown_event():
49 nonlocal shutting_down
50 shutting_down = True
51 if active_timer:
52 active_timer.cancel()
53
54 async_refresh()
55
56 ui_dir = pkg_resources.resource_filename(__name__, "ui/build/")
57 # Initialize with the projects-list.json file
58 with open(ui_dir + "projects-list.json", mode="w") as f:
59 projects_dict = {
60 "projects": [
61 {
62 "name": "Project",
63 "description": "Test project",
64 "id": project_id,
65 "registryPath": "/registry",
66 }
67 ]
68 }
69 f.write(json.dumps(projects_dict))
70
71 @app.get("/registry")
72 def read_registry():
73 return Response(
74 content=registry_proto.SerializeToString(),
75 media_type="application/octet-stream",
76 )
77
78 # For all other paths (such as paths that would otherwise be handled by react router), pass to React
79 @app.api_route("/p/{path_name:path}", methods=["GET"])
80 def catch_all():
81 filename = ui_dir + "index.html"
82
83 with open(filename) as f:
84 content = f.read()
85
86 return Response(content, media_type="text/html")
87
88 app.mount(
89 "/",
90 StaticFiles(directory=ui_dir, html=True),
91 name="site",
92 )
93
94 return app
95
96
97 def start_server(
98 store: "feast.FeatureStore",
99 host: str,
100 port: int,
101 get_registry_dump: Callable,
102 project_id: str,
103 registry_ttl_sec: int,
104 root_path: str = "",
105 ):
106 app = get_app(
107 store,
108 get_registry_dump,
109 project_id,
110 registry_ttl_sec,
111 host,
112 port,
113 )
114 assert root_path is not None
115 uvicorn.run(app, host=host, port=port, root_path=root_path)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py
--- a/sdk/python/feast/ui_server.py
+++ b/sdk/python/feast/ui_server.py
@@ -13,11 +13,9 @@
def get_app(
store: "feast.FeatureStore",
- get_registry_dump: Callable,
project_id: str,
registry_ttl_secs: int,
- host: str,
- port: int,
+ root_path: str = "",
):
app = FastAPI()
@@ -62,7 +60,7 @@
"name": "Project",
"description": "Test project",
"id": project_id,
- "registryPath": "/registry",
+ "registryPath": f"{root_path}/registry",
}
]
}
@@ -105,11 +103,8 @@
):
app = get_app(
store,
- get_registry_dump,
project_id,
registry_ttl_sec,
- host,
- port,
+ root_path,
)
- assert root_path is not None
- uvicorn.run(app, host=host, port=port, root_path=root_path)
+ uvicorn.run(app, host=host, port=port)
| {"golden_diff": "diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py\n--- a/sdk/python/feast/ui_server.py\n+++ b/sdk/python/feast/ui_server.py\n@@ -13,11 +13,9 @@\n \n def get_app(\n store: \"feast.FeatureStore\",\n- get_registry_dump: Callable,\n project_id: str,\n registry_ttl_secs: int,\n- host: str,\n- port: int,\n+ root_path: str = \"\",\n ):\n app = FastAPI()\n \n@@ -62,7 +60,7 @@\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n- \"registryPath\": \"/registry\",\n+ \"registryPath\": f\"{root_path}/registry\",\n }\n ]\n }\n@@ -105,11 +103,8 @@\n ):\n app = get_app(\n store,\n- get_registry_dump,\n project_id,\n registry_ttl_sec,\n- host,\n- port,\n+ root_path,\n )\n- assert root_path is not None\n- uvicorn.run(app, host=host, port=port, root_path=root_path)\n+ uvicorn.run(app, host=host, port=port)\n", "issue": "feast ui does not work on proxy subpath\n## Expected Behavior \r\n\r\nFeast UI should work when it is served behind a proxy, on a subpath e.g. `/feast-ui`\r\n\r\n## Current Behavior\r\n\r\nParts of the feast UI works behind a subpath, but not entirely (nothing is displayed, just the feast logo with a \"404\" text - refer to screenshot). No requests in the network tab of the web browser are hitting 404.\r\n\r\n\r\n\r\n## Steps to reproduce\r\n\r\nServe feast UI as you would e.g. `feature_store.serve_ui()`, optionally passing in the `root_path` parameter (it does not help).\r\n\r\nSet up an nginx pod with the following configuration (i.e. the nginx pod should have `/etc/nginx/conf.d/default.conf` with the following contents - `dummy_project` is the project name, and `http://feast-ui-service:8080` is where the feast UI can be accessed from your nginx pod / container):\r\n\r\n```\r\nserver {\r\n listen 80 default_server;\r\n\r\n location = /feast-ui/ {\r\n rewrite (.*) /feast-ui/p/dummy_project permanent;\r\n }\r\n\r\n location /feast-ui/ {\r\n proxy_pass http://feast-ui-service:8080/;\r\n }\r\n\r\n location / {\r\n proxy_pass http://feast-ui-service:8080/;\r\n }\r\n}\r\n```\r\n\r\nThis configuration works on localhost when nginx can listen on the root path `/`. However, note that the URL after all the redirects is wrong (it does not have the prefix).\r\n\r\n- The first block is required to force a redirect to the `/p/{project_name}`. Without this, the page will display 404 as above.\r\n- The second block is required to strip away `/feast-ui` so the UI app does not receive that path that it is not aware of\r\n- The third block is a trick to make this setup work in a local environment, because the app itself will redirect the user back to `/p/dummy_project` (without the prefix), which we then proxy into the feast UI app. However, in an actual environment, this setup does not work, because when the url does not contain the `/feast-ui` prefix, the ingress will not route it to the nginx pod, so the nginx pod cannot proxy the connection to the right place.\r\n\r\nIdeally, if the feast ui app is capable of being served on a subpath, only the second `location` block should be required in the nginx configuration. The first and third `location` blocks are workarounds.\r\n\r\n### Specifications\r\n\r\n- Version: 0.29.0\r\n\r\n## Possible Solution\r\n\r\nThe app should redirect to relative and not absolute paths\n", "before_files": [{"content": "import json\nimport threading\nfrom typing import Callable, Optional\n\nimport pkg_resources\nimport uvicorn\nfrom fastapi import FastAPI, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.staticfiles import StaticFiles\n\nimport feast\n\n\ndef get_app(\n store: \"feast.FeatureStore\",\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_secs: int,\n host: str,\n port: int,\n):\n app = FastAPI()\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down\n registry_proto = None\n shutting_down = False\n active_timer: Optional[threading.Timer] = None\n\n def async_refresh():\n store.refresh_registry()\n nonlocal registry_proto\n registry_proto = store.registry.proto()\n if shutting_down:\n return\n nonlocal active_timer\n active_timer = threading.Timer(registry_ttl_secs, async_refresh)\n active_timer.start()\n\n @app.on_event(\"shutdown\")\n def shutdown_event():\n nonlocal shutting_down\n shutting_down = True\n if active_timer:\n active_timer.cancel()\n\n async_refresh()\n\n ui_dir = pkg_resources.resource_filename(__name__, \"ui/build/\")\n # Initialize with the projects-list.json file\n with open(ui_dir + \"projects-list.json\", mode=\"w\") as f:\n projects_dict = {\n \"projects\": [\n {\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n \"registryPath\": \"/registry\",\n }\n ]\n }\n f.write(json.dumps(projects_dict))\n\n @app.get(\"/registry\")\n def read_registry():\n return Response(\n content=registry_proto.SerializeToString(),\n media_type=\"application/octet-stream\",\n )\n\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n filename = ui_dir + \"index.html\"\n\n with open(filename) as f:\n content = f.read()\n\n return Response(content, media_type=\"text/html\")\n\n app.mount(\n \"/\",\n StaticFiles(directory=ui_dir, html=True),\n name=\"site\",\n )\n\n return app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_sec: int,\n root_path: str = \"\",\n):\n app = get_app(\n store,\n get_registry_dump,\n project_id,\n registry_ttl_sec,\n host,\n port,\n )\n assert root_path is not None\n uvicorn.run(app, host=host, port=port, root_path=root_path)\n", "path": "sdk/python/feast/ui_server.py"}], "after_files": [{"content": "import json\nimport threading\nfrom typing import Callable, Optional\n\nimport pkg_resources\nimport uvicorn\nfrom fastapi import FastAPI, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.staticfiles import StaticFiles\n\nimport feast\n\n\ndef get_app(\n store: \"feast.FeatureStore\",\n project_id: str,\n registry_ttl_secs: int,\n root_path: str = \"\",\n):\n app = FastAPI()\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down\n registry_proto = None\n shutting_down = False\n active_timer: Optional[threading.Timer] = None\n\n def async_refresh():\n store.refresh_registry()\n nonlocal registry_proto\n registry_proto = store.registry.proto()\n if shutting_down:\n return\n nonlocal active_timer\n active_timer = threading.Timer(registry_ttl_secs, async_refresh)\n active_timer.start()\n\n @app.on_event(\"shutdown\")\n def shutdown_event():\n nonlocal shutting_down\n shutting_down = True\n if active_timer:\n active_timer.cancel()\n\n async_refresh()\n\n ui_dir = pkg_resources.resource_filename(__name__, \"ui/build/\")\n # Initialize with the projects-list.json file\n with open(ui_dir + \"projects-list.json\", mode=\"w\") as f:\n projects_dict = {\n \"projects\": [\n {\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n \"registryPath\": f\"{root_path}/registry\",\n }\n ]\n }\n f.write(json.dumps(projects_dict))\n\n @app.get(\"/registry\")\n def read_registry():\n return Response(\n content=registry_proto.SerializeToString(),\n media_type=\"application/octet-stream\",\n )\n\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n filename = ui_dir + \"index.html\"\n\n with open(filename) as f:\n content = f.read()\n\n return Response(content, media_type=\"text/html\")\n\n app.mount(\n \"/\",\n StaticFiles(directory=ui_dir, html=True),\n name=\"site\",\n )\n\n return app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_sec: int,\n root_path: str = \"\",\n):\n app = get_app(\n store,\n project_id,\n registry_ttl_sec,\n root_path,\n )\n uvicorn.run(app, host=host, port=port)\n", "path": "sdk/python/feast/ui_server.py"}]} | 1,779 | 281 |
gh_patches_debug_18421 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2512 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
More 504s: on the results framework page
@Geerts reports on Skype: 504 Gateway timeout hunter strikes again: http://rsr.test.akvo.org/rest/v1/indicator_period_data_framework/?format=json&period__indicator__result__project=2780
Via: http://rsr.test.akvo.org/en/myrsr/results/2780/#results,13323,5679
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/indicator_period_data.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment
9
10 from ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,
11 IndicatorPeriodDataCommentSerializer)
12 from ..viewsets import PublicProjectViewSet
13
14 from django.http import HttpResponseForbidden
15
16 from rest_framework import status
17 from rest_framework.decorators import api_view, permission_classes
18 from rest_framework.response import Response
19
20
21 class IndicatorPeriodDataViewSet(PublicProjectViewSet):
22 """
23 """
24 queryset = IndicatorPeriodData.objects.all()
25 serializer_class = IndicatorPeriodDataSerializer
26
27 project_relation = 'period__indicator__result__project__'
28
29
30 class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):
31 """
32 """
33 queryset = IndicatorPeriodData.objects.all()
34 serializer_class = IndicatorPeriodDataFrameworkSerializer
35 project_relation = 'period__indicator__result__project__'
36
37
38 class IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):
39 """
40 """
41 queryset = IndicatorPeriodDataComment.objects.all()
42 serializer_class = IndicatorPeriodDataCommentSerializer
43 project_relation = 'data__period__indicator__result__project__'
44
45
46 @api_view(['POST'])
47 def indicator_upload_file(request, pk=None):
48 """
49 Special API call for directly uploading a file.
50
51 :param request; A Django request object.
52 :param pk; The primary key of an IndicatorPeriodData instance.
53 """
54 update = IndicatorPeriodData.objects.get(pk=pk)
55 upload_file = request.data['file']
56
57 # Permissions
58 user = getattr(request, 'user', None)
59 if not user:
60 return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)
61
62 # TODO: Check if user is allowed to upload a file
63 # if not user.has_perm('rsr.change_project', update.period.indicator.result.project):
64 # return Response({'error': 'User has no permission to place an update'},
65 # status=status.HTTP_403_FORBIDDEN)
66
67 try:
68 file_type = request.POST.copy()['type']
69 if file_type == 'photo':
70 update.photo = upload_file
71 update.save(update_fields=['photo'])
72 return Response({'file': update.photo.url})
73 elif file_type == 'file':
74 update.file = upload_file
75 update.save(update_fields=['file'])
76 return Response({'file': update.file.url})
77 except Exception as e:
78 return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)
79
```
Path: `akvo/rest/views/partnership.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import Partnership
9
10 from ..serializers import PartnershipSerializer, PartnershipBasicSerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class PartnershipViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = Partnership.objects.all()
18 serializer_class = PartnershipSerializer
19
20 def get_queryset(self):
21 """Allow filtering on partner_type."""
22 partner_type = self.request.query_params.get('partner_type', None)
23 if partner_type and partner_type in Partnership.PARTNER_TYPES_TO_ROLES_MAP.keys():
24 self.queryset = self.queryset.filter(
25 iati_organisation_role=Partnership.PARTNER_TYPES_TO_ROLES_MAP[partner_type]
26 ).distinct()
27 return super(PartnershipViewSet, self).get_queryset()
28
29
30 class PartnershipMoreLinkViewSet(PublicProjectViewSet):
31 """
32 Specific endpoint for the '+X partners' links in RSR. Contains the name, long name and logo of
33 an organisation and the partnership role.
34 """
35 queryset = Partnership.objects.all()
36 serializer_class = PartnershipBasicSerializer
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py
--- a/akvo/rest/views/indicator_period_data.py
+++ b/akvo/rest/views/indicator_period_data.py
@@ -30,7 +30,13 @@
class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):
"""
"""
- queryset = IndicatorPeriodData.objects.all()
+ queryset = IndicatorPeriodData.objects.select_related(
+ 'period',
+ 'user'
+ ).prefetch_related(
+ 'comments',
+ 'comments__user'
+ ).all()
serializer_class = IndicatorPeriodDataFrameworkSerializer
project_relation = 'period__indicator__result__project__'
diff --git a/akvo/rest/views/partnership.py b/akvo/rest/views/partnership.py
--- a/akvo/rest/views/partnership.py
+++ b/akvo/rest/views/partnership.py
@@ -14,7 +14,7 @@
class PartnershipViewSet(PublicProjectViewSet):
"""
"""
- queryset = Partnership.objects.all()
+ queryset = Partnership.objects.select_related('organisation', 'project').all()
serializer_class = PartnershipSerializer
def get_queryset(self):
| {"golden_diff": "diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py\n--- a/akvo/rest/views/indicator_period_data.py\n+++ b/akvo/rest/views/indicator_period_data.py\n@@ -30,7 +30,13 @@\n class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n- queryset = IndicatorPeriodData.objects.all()\n+ queryset = IndicatorPeriodData.objects.select_related(\n+ 'period',\n+ 'user'\n+ ).prefetch_related(\n+ 'comments',\n+ 'comments__user'\n+ ).all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n \ndiff --git a/akvo/rest/views/partnership.py b/akvo/rest/views/partnership.py\n--- a/akvo/rest/views/partnership.py\n+++ b/akvo/rest/views/partnership.py\n@@ -14,7 +14,7 @@\n class PartnershipViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n- queryset = Partnership.objects.all()\n+ queryset = Partnership.objects.select_related('organisation', 'project').all()\n serializer_class = PartnershipSerializer\n \n def get_queryset(self):\n", "issue": "More 504s: on the results framework page\n@Geerts reports on Skype: 504 Gateway timeout hunter strikes again: http://rsr.test.akvo.org/rest/v1/indicator_period_data_framework/?format=json&period__indicator__result__project=2780\r\n\r\nVia: http://rsr.test.akvo.org/en/myrsr/results/2780/#results,13323,5679\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom django.http import HttpResponseForbidden\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataSerializer\n\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodDataComment.objects.all()\n serializer_class = IndicatorPeriodDataCommentSerializer\n project_relation = 'data__period__indicator__result__project__'\n\n\n@api_view(['POST'])\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.data['file']\n\n # Permissions\n user = getattr(request, 'user', None)\n if not user:\n return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n\n # TODO: Check if user is allowed to upload a file\n # if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n # return Response({'error': 'User has no permission to place an update'},\n # status=status.HTTP_403_FORBIDDEN)\n\n try:\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n return Response({'file': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n", "path": "akvo/rest/views/indicator_period_data.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import Partnership\n\nfrom ..serializers import PartnershipSerializer, PartnershipBasicSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass PartnershipViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = Partnership.objects.all()\n serializer_class = PartnershipSerializer\n\n def get_queryset(self):\n \"\"\"Allow filtering on partner_type.\"\"\"\n partner_type = self.request.query_params.get('partner_type', None)\n if partner_type and partner_type in Partnership.PARTNER_TYPES_TO_ROLES_MAP.keys():\n self.queryset = self.queryset.filter(\n iati_organisation_role=Partnership.PARTNER_TYPES_TO_ROLES_MAP[partner_type]\n ).distinct()\n return super(PartnershipViewSet, self).get_queryset()\n\n\nclass PartnershipMoreLinkViewSet(PublicProjectViewSet):\n \"\"\"\n Specific endpoint for the '+X partners' links in RSR. Contains the name, long name and logo of\n an organisation and the partnership role.\n \"\"\"\n queryset = Partnership.objects.all()\n serializer_class = PartnershipBasicSerializer\n", "path": "akvo/rest/views/partnership.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom django.http import HttpResponseForbidden\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataSerializer\n\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.select_related(\n 'period',\n 'user'\n ).prefetch_related(\n 'comments',\n 'comments__user'\n ).all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodDataComment.objects.all()\n serializer_class = IndicatorPeriodDataCommentSerializer\n project_relation = 'data__period__indicator__result__project__'\n\n\n@api_view(['POST'])\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.data['file']\n\n # Permissions\n user = getattr(request, 'user', None)\n if not user:\n return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n\n # TODO: Check if user is allowed to upload a file\n # if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n # return Response({'error': 'User has no permission to place an update'},\n # status=status.HTTP_403_FORBIDDEN)\n\n try:\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n return Response({'file': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n", "path": "akvo/rest/views/indicator_period_data.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import Partnership\n\nfrom ..serializers import PartnershipSerializer, PartnershipBasicSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass PartnershipViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = Partnership.objects.select_related('organisation', 'project').all()\n serializer_class = PartnershipSerializer\n\n def get_queryset(self):\n \"\"\"Allow filtering on partner_type.\"\"\"\n partner_type = self.request.query_params.get('partner_type', None)\n if partner_type and partner_type in Partnership.PARTNER_TYPES_TO_ROLES_MAP.keys():\n self.queryset = self.queryset.filter(\n iati_organisation_role=Partnership.PARTNER_TYPES_TO_ROLES_MAP[partner_type]\n ).distinct()\n return super(PartnershipViewSet, self).get_queryset()\n\n\nclass PartnershipMoreLinkViewSet(PublicProjectViewSet):\n \"\"\"\n Specific endpoint for the '+X partners' links in RSR. Contains the name, long name and logo of\n an organisation and the partnership role.\n \"\"\"\n queryset = Partnership.objects.all()\n serializer_class = PartnershipBasicSerializer\n", "path": "akvo/rest/views/partnership.py"}]} | 1,483 | 269 |
gh_patches_debug_33321 | rasdani/github-patches | git_diff | fedora-infra__bodhi-3173 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support Sourceware bugs in Fedora enhanced markdown
Many key projects reside on sourceware.org including glibc, gdb, binutils, elfutils, libabigail, systemtap etc.
Could you please add markdown support for sourceware.org bugzilla (https://sourceware.org/bugzilla/)?
I suggest a unified markup of SWBZ#XXXX or SW#XXXX for all projects on the main sourceware bugzilla instance.
Likewise gcc compiler bugs are also on sourceware but use a distinct instance (https://gcc.gnu.org/bugzilla/)
I suggest a markup of GCC#XXXX for gcc bugs.
Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/ffmarkdown.py`
Content:
```
1 # Copyright © 2014-2019 Red Hat, Inc. and others.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
18 # USA.
19 """
20 Fedora-flavored Markdown.
21
22 Author: Ralph Bean <[email protected]>
23 """
24
25 from markdown.extensions import Extension
26 import markdown.inlinepatterns
27 import markdown.postprocessors
28 import markdown.util
29 import pyramid.threadlocal
30
31 from bodhi import MENTION_RE
32
33
34 BUGZILLA_RE = r'([a-zA-Z]+)(#[0-9]{5,})'
35
36
37 def user_url(name):
38 """
39 Return a URL to the given username.
40
41 Args:
42 name (basestring): The username of the user we want a URL for.
43 Returns:
44 basestring: A URL to the requested user.
45 """
46 request = pyramid.threadlocal.get_current_request()
47 return request.route_url('user', name=name)
48
49
50 def bug_url(tracker, idx):
51 """
52 Return the URL for the given bug.
53
54 Args:
55 tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',
56 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'.
57 idx (basestring or int): The bug number.
58 Returns:
59 basestring: The URL of the given bug.
60 Raises:
61 KeyError: If the given tracker is not supported by this function.
62 """
63 try:
64 return {
65 'fedora': "https://bugzilla.redhat.com/show_bug.cgi?id=%s",
66 'gnome': "https://bugzilla.gnome.org/show_bug.cgi?id=%s",
67 'kde': "https://bugs.kde.org/show_bug.cgi?id=%s",
68 'mozilla': "https://bugzilla.mozilla.org/show_bug.cgi?id=%s",
69 'pear': "http://pear.php.net/bugs/bug.php?id=%s",
70 'perl': "https://rt.cpan.org/Public/Bug/Display.html?id=%s",
71 'php': "https://bugs.php.net/bug.php?id=%s",
72 'python': "https://bugs.python.org/issue%s",
73 'rh': "https://bugzilla.redhat.com/show_bug.cgi?id=%s",
74 'rhbz': "https://bugzilla.redhat.com/show_bug.cgi?id=%s"}[tracker.lower()] % idx
75
76 except KeyError:
77 return None
78
79
80 class MentionPattern(markdown.inlinepatterns.Pattern):
81 """Match username mentions and point to their profiles."""
82
83 def handleMatch(self, m):
84 """
85 Build and return an Element that links to the matched User's profile.
86
87 Args:
88 m (re.MatchObject): The regex match on the username.
89 Return:
90 xml.etree.Element: An html anchor referencing the user's profile.
91 """
92 el = markdown.util.etree.Element("a")
93 name = markdown.util.AtomicString(m.group(2))
94 el.set('href', user_url(name[1:]))
95 el.text = name
96 return el
97
98
99 class BugzillaPattern(markdown.inlinepatterns.Pattern):
100 """Match bug tracker patterns."""
101
102 def handleMatch(self, m):
103 """
104 Build and return an Element that links to the referenced bug.
105
106 Args:
107 m (re.MatchObject): The regex match on the bug.
108 Returns:
109 xml.etree.Element: An html anchor referencing the matched bug.
110 """
111 tracker = markdown.util.AtomicString(m.group(2))
112 idx = markdown.util.AtomicString(m.group(3))
113 url = bug_url(tracker, idx[1:])
114
115 if url is None:
116 return tracker + idx
117
118 el = markdown.util.etree.Element("a")
119 el.set('href', url)
120 el.text = idx
121 return el
122
123
124 class SurroundProcessor(markdown.postprocessors.Postprocessor):
125 """A postprocessor to surround the text with a markdown <div>."""
126
127 def run(self, text):
128 """
129 Return text wrapped in a <div> with a markdown class.
130
131 Args:
132 text (str): The text to wrap in a <div>.
133 Returns:
134 str: The text wrapped in a <div>.
135 """
136 return "<div class='markdown'>" + text + "</div>"
137
138
139 class BodhiExtension(Extension):
140 """Bodhi's markdown Extension."""
141
142 def extendMarkdown(self, md, md_globals):
143 """
144 Extend markdown to add our patterns and postprocessor.
145
146 Args:
147 md (Markdown): An instance of the Markdown class.
148 md_globals (dict): Contains all the various global variables within the markdown module.
149 """
150 md.inlinePatterns.add('mention', MentionPattern(MENTION_RE, md), '_end')
151 md.inlinePatterns.add('bugzilla', BugzillaPattern(BUGZILLA_RE, md), '_end')
152 md.postprocessors.add('surround', SurroundProcessor(md), '_end')
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/ffmarkdown.py b/bodhi/server/ffmarkdown.py
--- a/bodhi/server/ffmarkdown.py
+++ b/bodhi/server/ffmarkdown.py
@@ -53,7 +53,8 @@
Args:
tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',
- 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'.
+ 'gcc', 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', 'rhbz'
+ or 'sourceware'.
idx (basestring or int): The bug number.
Returns:
basestring: The URL of the given bug.
@@ -61,17 +62,21 @@
KeyError: If the given tracker is not supported by this function.
"""
try:
- return {
+ trackers = {
'fedora': "https://bugzilla.redhat.com/show_bug.cgi?id=%s",
+ 'gcc': "https://gcc.gnu.org/bugzilla/show_bug.cgi?id=%s",
'gnome': "https://bugzilla.gnome.org/show_bug.cgi?id=%s",
'kde': "https://bugs.kde.org/show_bug.cgi?id=%s",
'mozilla': "https://bugzilla.mozilla.org/show_bug.cgi?id=%s",
- 'pear': "http://pear.php.net/bugs/bug.php?id=%s",
+ 'pear': "https://pear.php.net/bugs/bug.php?id=%s",
'perl': "https://rt.cpan.org/Public/Bug/Display.html?id=%s",
'php': "https://bugs.php.net/bug.php?id=%s",
'python': "https://bugs.python.org/issue%s",
'rh': "https://bugzilla.redhat.com/show_bug.cgi?id=%s",
- 'rhbz': "https://bugzilla.redhat.com/show_bug.cgi?id=%s"}[tracker.lower()] % idx
+ 'rhbz': "https://bugzilla.redhat.com/show_bug.cgi?id=%s",
+ 'sourceware': "https://sourceware.org/bugzilla/show_bug.cgi?id=%s"}
+
+ return trackers[tracker.lower()] % idx
except KeyError:
return None
| {"golden_diff": "diff --git a/bodhi/server/ffmarkdown.py b/bodhi/server/ffmarkdown.py\n--- a/bodhi/server/ffmarkdown.py\n+++ b/bodhi/server/ffmarkdown.py\n@@ -53,7 +53,8 @@\n \n Args:\n tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',\n- 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'.\n+ 'gcc', 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', 'rhbz'\n+ or 'sourceware'.\n idx (basestring or int): The bug number.\n Returns:\n basestring: The URL of the given bug.\n@@ -61,17 +62,21 @@\n KeyError: If the given tracker is not supported by this function.\n \"\"\"\n try:\n- return {\n+ trackers = {\n 'fedora': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n+ 'gcc': \"https://gcc.gnu.org/bugzilla/show_bug.cgi?id=%s\",\n 'gnome': \"https://bugzilla.gnome.org/show_bug.cgi?id=%s\",\n 'kde': \"https://bugs.kde.org/show_bug.cgi?id=%s\",\n 'mozilla': \"https://bugzilla.mozilla.org/show_bug.cgi?id=%s\",\n- 'pear': \"http://pear.php.net/bugs/bug.php?id=%s\",\n+ 'pear': \"https://pear.php.net/bugs/bug.php?id=%s\",\n 'perl': \"https://rt.cpan.org/Public/Bug/Display.html?id=%s\",\n 'php': \"https://bugs.php.net/bug.php?id=%s\",\n 'python': \"https://bugs.python.org/issue%s\",\n 'rh': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n- 'rhbz': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\"}[tracker.lower()] % idx\n+ 'rhbz': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n+ 'sourceware': \"https://sourceware.org/bugzilla/show_bug.cgi?id=%s\"}\n+\n+ return trackers[tracker.lower()] % idx\n \n except KeyError:\n return None\n", "issue": "Support Sourceware bugs in Fedora enhanced markdown\nMany key projects reside on sourceware.org including glibc, gdb, binutils, elfutils, libabigail, systemtap etc.\r\n\r\nCould you please add markdown support for sourceware.org bugzilla (https://sourceware.org/bugzilla/)?\r\n\r\nI suggest a unified markup of SWBZ#XXXX or SW#XXXX for all projects on the main sourceware bugzilla instance.\r\n\r\nLikewise gcc compiler bugs are also on sourceware but use a distinct instance (https://gcc.gnu.org/bugzilla/)\r\n\r\nI suggest a markup of GCC#XXXX for gcc bugs.\r\n\r\nThank you!\n", "before_files": [{"content": "# Copyright \u00a9 2014-2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n\"\"\"\nFedora-flavored Markdown.\n\nAuthor: Ralph Bean <[email protected]>\n\"\"\"\n\nfrom markdown.extensions import Extension\nimport markdown.inlinepatterns\nimport markdown.postprocessors\nimport markdown.util\nimport pyramid.threadlocal\n\nfrom bodhi import MENTION_RE\n\n\nBUGZILLA_RE = r'([a-zA-Z]+)(#[0-9]{5,})'\n\n\ndef user_url(name):\n \"\"\"\n Return a URL to the given username.\n\n Args:\n name (basestring): The username of the user we want a URL for.\n Returns:\n basestring: A URL to the requested user.\n \"\"\"\n request = pyramid.threadlocal.get_current_request()\n return request.route_url('user', name=name)\n\n\ndef bug_url(tracker, idx):\n \"\"\"\n Return the URL for the given bug.\n\n Args:\n tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',\n 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'.\n idx (basestring or int): The bug number.\n Returns:\n basestring: The URL of the given bug.\n Raises:\n KeyError: If the given tracker is not supported by this function.\n \"\"\"\n try:\n return {\n 'fedora': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'gnome': \"https://bugzilla.gnome.org/show_bug.cgi?id=%s\",\n 'kde': \"https://bugs.kde.org/show_bug.cgi?id=%s\",\n 'mozilla': \"https://bugzilla.mozilla.org/show_bug.cgi?id=%s\",\n 'pear': \"http://pear.php.net/bugs/bug.php?id=%s\",\n 'perl': \"https://rt.cpan.org/Public/Bug/Display.html?id=%s\",\n 'php': \"https://bugs.php.net/bug.php?id=%s\",\n 'python': \"https://bugs.python.org/issue%s\",\n 'rh': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'rhbz': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\"}[tracker.lower()] % idx\n\n except KeyError:\n return None\n\n\nclass MentionPattern(markdown.inlinepatterns.Pattern):\n \"\"\"Match username mentions and point to their profiles.\"\"\"\n\n def handleMatch(self, m):\n \"\"\"\n Build and return an Element that links to the matched User's profile.\n\n Args:\n m (re.MatchObject): The regex match on the username.\n Return:\n xml.etree.Element: An html anchor referencing the user's profile.\n \"\"\"\n el = markdown.util.etree.Element(\"a\")\n name = markdown.util.AtomicString(m.group(2))\n el.set('href', user_url(name[1:]))\n el.text = name\n return el\n\n\nclass BugzillaPattern(markdown.inlinepatterns.Pattern):\n \"\"\"Match bug tracker patterns.\"\"\"\n\n def handleMatch(self, m):\n \"\"\"\n Build and return an Element that links to the referenced bug.\n\n Args:\n m (re.MatchObject): The regex match on the bug.\n Returns:\n xml.etree.Element: An html anchor referencing the matched bug.\n \"\"\"\n tracker = markdown.util.AtomicString(m.group(2))\n idx = markdown.util.AtomicString(m.group(3))\n url = bug_url(tracker, idx[1:])\n\n if url is None:\n return tracker + idx\n\n el = markdown.util.etree.Element(\"a\")\n el.set('href', url)\n el.text = idx\n return el\n\n\nclass SurroundProcessor(markdown.postprocessors.Postprocessor):\n \"\"\"A postprocessor to surround the text with a markdown <div>.\"\"\"\n\n def run(self, text):\n \"\"\"\n Return text wrapped in a <div> with a markdown class.\n\n Args:\n text (str): The text to wrap in a <div>.\n Returns:\n str: The text wrapped in a <div>.\n \"\"\"\n return \"<div class='markdown'>\" + text + \"</div>\"\n\n\nclass BodhiExtension(Extension):\n \"\"\"Bodhi's markdown Extension.\"\"\"\n\n def extendMarkdown(self, md, md_globals):\n \"\"\"\n Extend markdown to add our patterns and postprocessor.\n\n Args:\n md (Markdown): An instance of the Markdown class.\n md_globals (dict): Contains all the various global variables within the markdown module.\n \"\"\"\n md.inlinePatterns.add('mention', MentionPattern(MENTION_RE, md), '_end')\n md.inlinePatterns.add('bugzilla', BugzillaPattern(BUGZILLA_RE, md), '_end')\n md.postprocessors.add('surround', SurroundProcessor(md), '_end')\n", "path": "bodhi/server/ffmarkdown.py"}], "after_files": [{"content": "# Copyright \u00a9 2014-2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n\"\"\"\nFedora-flavored Markdown.\n\nAuthor: Ralph Bean <[email protected]>\n\"\"\"\n\nfrom markdown.extensions import Extension\nimport markdown.inlinepatterns\nimport markdown.postprocessors\nimport markdown.util\nimport pyramid.threadlocal\n\nfrom bodhi import MENTION_RE\n\n\nBUGZILLA_RE = r'([a-zA-Z]+)(#[0-9]{5,})'\n\n\ndef user_url(name):\n \"\"\"\n Return a URL to the given username.\n\n Args:\n name (basestring): The username of the user we want a URL for.\n Returns:\n basestring: A URL to the requested user.\n \"\"\"\n request = pyramid.threadlocal.get_current_request()\n return request.route_url('user', name=name)\n\n\ndef bug_url(tracker, idx):\n \"\"\"\n Return the URL for the given bug.\n\n Args:\n tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',\n 'gcc', 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', 'rhbz'\n or 'sourceware'.\n idx (basestring or int): The bug number.\n Returns:\n basestring: The URL of the given bug.\n Raises:\n KeyError: If the given tracker is not supported by this function.\n \"\"\"\n try:\n trackers = {\n 'fedora': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'gcc': \"https://gcc.gnu.org/bugzilla/show_bug.cgi?id=%s\",\n 'gnome': \"https://bugzilla.gnome.org/show_bug.cgi?id=%s\",\n 'kde': \"https://bugs.kde.org/show_bug.cgi?id=%s\",\n 'mozilla': \"https://bugzilla.mozilla.org/show_bug.cgi?id=%s\",\n 'pear': \"https://pear.php.net/bugs/bug.php?id=%s\",\n 'perl': \"https://rt.cpan.org/Public/Bug/Display.html?id=%s\",\n 'php': \"https://bugs.php.net/bug.php?id=%s\",\n 'python': \"https://bugs.python.org/issue%s\",\n 'rh': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'rhbz': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'sourceware': \"https://sourceware.org/bugzilla/show_bug.cgi?id=%s\"}\n\n return trackers[tracker.lower()] % idx\n\n except KeyError:\n return None\n\n\nclass MentionPattern(markdown.inlinepatterns.Pattern):\n \"\"\"Match username mentions and point to their profiles.\"\"\"\n\n def handleMatch(self, m):\n \"\"\"\n Build and return an Element that links to the matched User's profile.\n\n Args:\n m (re.MatchObject): The regex match on the username.\n Return:\n xml.etree.Element: An html anchor referencing the user's profile.\n \"\"\"\n el = markdown.util.etree.Element(\"a\")\n name = markdown.util.AtomicString(m.group(2))\n el.set('href', user_url(name[1:]))\n el.text = name\n return el\n\n\nclass BugzillaPattern(markdown.inlinepatterns.Pattern):\n \"\"\"Match bug tracker patterns.\"\"\"\n\n def handleMatch(self, m):\n \"\"\"\n Build and return an Element that links to the referenced bug.\n\n Args:\n m (re.MatchObject): The regex match on the bug.\n Returns:\n xml.etree.Element: An html anchor referencing the matched bug.\n \"\"\"\n tracker = markdown.util.AtomicString(m.group(2))\n idx = markdown.util.AtomicString(m.group(3))\n url = bug_url(tracker, idx[1:])\n\n if url is None:\n return tracker + idx\n\n el = markdown.util.etree.Element(\"a\")\n el.set('href', url)\n el.text = idx\n return el\n\n\nclass SurroundProcessor(markdown.postprocessors.Postprocessor):\n \"\"\"A postprocessor to surround the text with a markdown <div>.\"\"\"\n\n def run(self, text):\n \"\"\"\n Return text wrapped in a <div> with a markdown class.\n\n Args:\n text (str): The text to wrap in a <div>.\n Returns:\n str: The text wrapped in a <div>.\n \"\"\"\n return \"<div class='markdown'>\" + text + \"</div>\"\n\n\nclass BodhiExtension(Extension):\n \"\"\"Bodhi's markdown Extension.\"\"\"\n\n def extendMarkdown(self, md, md_globals):\n \"\"\"\n Extend markdown to add our patterns and postprocessor.\n\n Args:\n md (Markdown): An instance of the Markdown class.\n md_globals (dict): Contains all the various global variables within the markdown module.\n \"\"\"\n md.inlinePatterns.add('mention', MentionPattern(MENTION_RE, md), '_end')\n md.inlinePatterns.add('bugzilla', BugzillaPattern(BUGZILLA_RE, md), '_end')\n md.postprocessors.add('surround', SurroundProcessor(md), '_end')\n", "path": "bodhi/server/ffmarkdown.py"}]} | 1,957 | 524 |
gh_patches_debug_60611 | rasdani/github-patches | git_diff | cloudtools__troposphere-605 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: IAM Role title should not be validated
The title specified in troposphere is not the actual physical resource name and should be validated as such (https://github.com/cloudtools/troposphere/blob/fe72f7d3f7b0711a22173c1240134173aafef574/troposphere/iam.py#L75-L77).
The next snippet was created today using `troposphere==1.5.0`and `boto3==1.2.6`:
``` python
>>> import boto3
>>> import troposphere
>>> import troposphere.iam
>>> import time
>>>
>>> long_title = 'VeryLongName{}'.format('Z' * 100)
>>> print len(long_title)
112
>>>
>>> # create a role
...
>>> role = troposphere.iam.Role(
... long_title,
... AssumeRolePolicyDocument={
... "Statement": [{
... "Action": ["sts:AssumeRole"],
... "Effect": "Allow",
... "Principal": {"Service": ["ec2.amazonaws.com"]}
... }]
... })
Traceback (most recent call last):
File "<stdin>", line 8, in <module>
File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/__init__.py", line 44, in __init__
self.validate_title()
File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/iam.py", line 77, in validate_title
iam_role_name(self.title)
File "/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/validators.py", line 98, in iam_role_name
raise ValueError('IAM Role Name may not exceed 64 characters')
ValueError: IAM Role Name may not exceed 64 characters
>>>
>>>
>>> # delete validator
...
>>> del troposphere.iam.Role.validate_title
>>> # try again
...
>>> role = troposphere.iam.Role(
... long_title,
... AssumeRolePolicyDocument={
... "Statement": [{
... "Action": ["sts:AssumeRole"],
... "Effect": "Allow",
... "Principal": {"Service": ["ec2.amazonaws.com"]}
... }]
... })
>>> template = troposphere.Template()
>>> template.add_resource(role)
<troposphere.iam.Role object at 0x10ee02990>
>>> print template.to_json()
{
"Resources": {
"VeryLongNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ": {
"Properties": {
"AssumeRolePolicyDocument": {
"Statement": [
{
"Action": [
"sts:AssumeRole"
],
"Effect": "Allow",
"Principal": {
"Service": [
"ec2.amazonaws.com"
]
}
}
]
}
},
"Type": "AWS::IAM::Role"
}
}
}
>>> client = boto3.client('cloudformation', 'us-east-1')
>>> stack = client.create_stack(
... StackName='testTroposphere',
... TemplateBody=template.to_json(),
... Capabilities=['CAPABILITY_IAM'])
>>>
>>> while client.describe_stacks(StackName=stack['StackId'])['Stacks'][0]['StackStatus'] != 'CREATE_COMPLETE':
... import time
... time.sleep(1)
...
>>> resources = client.describe_stack_resources(StackName=stack['StackId'])
>>> for r in resources['StackResources']:
... physical_id = r['PhysicalResourceId']
... print("{} ({} chars)".format(physical_id, len(physical_id)))
...
testTroposphere-VeryLongNameZZZZZZZZZZZZZZZZZZZZZZ-PTHEM9FPNX28 (63 chars)
```
The snippet above shows that the physical id was chosen by CloudFormation and isn't just a trimmed version of the title (it includes a random part too).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/iam.py`
Content:
```
1 # Copyright (c) 2012-2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSObject, AWSProperty
7 from .validators import integer, boolean, status
8 from .validators import iam_path, iam_role_name, iam_group_name, iam_user_name
9
10 try:
11 from awacs.aws import Policy
12 policytypes = (dict, Policy)
13 except ImportError:
14 policytypes = dict,
15
16
17 Active = "Active"
18 Inactive = "Inactive"
19
20
21 class AccessKey(AWSObject):
22 resource_type = "AWS::IAM::AccessKey"
23
24 props = {
25 'Serial': (integer, False),
26 'Status': (status, False),
27 'UserName': (basestring, True),
28 }
29
30
31 class PolicyType(AWSObject):
32 resource_type = "AWS::IAM::Policy"
33
34 props = {
35 'Groups': ([basestring], False),
36 'PolicyDocument': (policytypes, True),
37 'PolicyName': (basestring, True),
38 'Roles': ([basestring], False),
39 'Users': ([basestring], False),
40 }
41
42
43 class Policy(AWSProperty):
44 props = {
45 'PolicyDocument': (policytypes, True),
46 'PolicyName': (basestring, True),
47 }
48
49 PolicyProperty = Policy
50
51
52 class Group(AWSObject):
53 def validate_title(self):
54 iam_group_name(self.title)
55
56 resource_type = "AWS::IAM::Group"
57
58 props = {
59 'GroupName': (iam_group_name, False),
60 'ManagedPolicyArns': ([basestring], False),
61 'Path': (iam_path, False),
62 'Policies': ([Policy], False),
63 }
64
65
66 class InstanceProfile(AWSObject):
67 resource_type = "AWS::IAM::InstanceProfile"
68
69 props = {
70 'Path': (iam_path, False),
71 'Roles': (list, True),
72 }
73
74
75 class Role(AWSObject):
76 def validate_title(self):
77 iam_role_name(self.title)
78
79 resource_type = "AWS::IAM::Role"
80
81 props = {
82 'AssumeRolePolicyDocument': (policytypes, True),
83 'ManagedPolicyArns': ([basestring], False),
84 'Path': (iam_path, False),
85 'Policies': ([Policy], False),
86 'RoleName': (iam_role_name, False),
87 }
88
89
90 class LoginProfile(AWSProperty):
91 props = {
92 'Password': (basestring, True),
93 'PasswordResetRequired': (boolean, False),
94 }
95
96
97 class User(AWSObject):
98 resource_type = "AWS::IAM::User"
99
100 props = {
101 'Path': (iam_path, False),
102 'Groups': ([basestring], False),
103 'ManagedPolicyArns': ([basestring], False),
104 'LoginProfile': (LoginProfile, False),
105 'Policies': ([Policy], False),
106 'UserName': (iam_user_name, False),
107 }
108
109
110 class UserToGroupAddition(AWSObject):
111 resource_type = "AWS::IAM::UserToGroupAddition"
112
113 props = {
114 'GroupName': (basestring, True),
115 'Users': (list, True),
116 }
117
118
119 class ManagedPolicy(AWSObject):
120 resource_type = "AWS::IAM::ManagedPolicy"
121
122 props = {
123 'Description': (basestring, False),
124 'Groups': ([basestring], False),
125 'Path': (iam_path, False),
126 'PolicyDocument': (policytypes, True),
127 'Roles': ([basestring], False),
128 'Users': ([basestring], False),
129 }
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/troposphere/iam.py b/troposphere/iam.py
--- a/troposphere/iam.py
+++ b/troposphere/iam.py
@@ -50,9 +50,6 @@
class Group(AWSObject):
- def validate_title(self):
- iam_group_name(self.title)
-
resource_type = "AWS::IAM::Group"
props = {
@@ -73,9 +70,6 @@
class Role(AWSObject):
- def validate_title(self):
- iam_role_name(self.title)
-
resource_type = "AWS::IAM::Role"
props = {
| {"golden_diff": "diff --git a/troposphere/iam.py b/troposphere/iam.py\n--- a/troposphere/iam.py\n+++ b/troposphere/iam.py\n@@ -50,9 +50,6 @@\n \n \n class Group(AWSObject):\n- def validate_title(self):\n- iam_group_name(self.title)\n-\n resource_type = \"AWS::IAM::Group\"\n \n props = {\n@@ -73,9 +70,6 @@\n \n \n class Role(AWSObject):\n- def validate_title(self):\n- iam_role_name(self.title)\n-\n resource_type = \"AWS::IAM::Role\"\n \n props = {\n", "issue": "BUG: IAM Role title should not be validated\nThe title specified in troposphere is not the actual physical resource name and should be validated as such (https://github.com/cloudtools/troposphere/blob/fe72f7d3f7b0711a22173c1240134173aafef574/troposphere/iam.py#L75-L77).\n\nThe next snippet was created today using `troposphere==1.5.0`and `boto3==1.2.6`:\n\n``` python\n>>> import boto3\n>>> import troposphere\n>>> import troposphere.iam\n>>> import time\n>>>\n>>> long_title = 'VeryLongName{}'.format('Z' * 100)\n>>> print len(long_title)\n112\n>>>\n>>> # create a role\n...\n>>> role = troposphere.iam.Role(\n... long_title,\n... AssumeRolePolicyDocument={\n... \"Statement\": [{\n... \"Action\": [\"sts:AssumeRole\"],\n... \"Effect\": \"Allow\",\n... \"Principal\": {\"Service\": [\"ec2.amazonaws.com\"]}\n... }]\n... })\nTraceback (most recent call last):\n File \"<stdin>\", line 8, in <module>\n File \"/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/__init__.py\", line 44, in __init__\n self.validate_title()\n File \"/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/iam.py\", line 77, in validate_title\n iam_role_name(self.title)\n File \"/Users/hugo/.virtualenvs/tmp-5ce4367de56b6bde/lib/python2.7/site-packages/troposphere/validators.py\", line 98, in iam_role_name\n raise ValueError('IAM Role Name may not exceed 64 characters')\nValueError: IAM Role Name may not exceed 64 characters\n>>>\n>>>\n>>> # delete validator\n...\n>>> del troposphere.iam.Role.validate_title\n>>> # try again\n...\n>>> role = troposphere.iam.Role(\n... long_title,\n... AssumeRolePolicyDocument={\n... \"Statement\": [{\n... \"Action\": [\"sts:AssumeRole\"],\n... \"Effect\": \"Allow\",\n... \"Principal\": {\"Service\": [\"ec2.amazonaws.com\"]}\n... }]\n... })\n>>> template = troposphere.Template()\n>>> template.add_resource(role)\n<troposphere.iam.Role object at 0x10ee02990>\n>>> print template.to_json()\n{\n \"Resources\": {\n \"VeryLongNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\": {\n \"Properties\": {\n \"AssumeRolePolicyDocument\": {\n \"Statement\": [\n {\n \"Action\": [\n \"sts:AssumeRole\"\n ],\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": [\n \"ec2.amazonaws.com\"\n ]\n }\n }\n ]\n }\n },\n \"Type\": \"AWS::IAM::Role\"\n }\n }\n}\n>>> client = boto3.client('cloudformation', 'us-east-1')\n>>> stack = client.create_stack(\n... StackName='testTroposphere',\n... TemplateBody=template.to_json(),\n... Capabilities=['CAPABILITY_IAM'])\n>>>\n>>> while client.describe_stacks(StackName=stack['StackId'])['Stacks'][0]['StackStatus'] != 'CREATE_COMPLETE':\n... import time\n... time.sleep(1)\n...\n>>> resources = client.describe_stack_resources(StackName=stack['StackId'])\n>>> for r in resources['StackResources']:\n... physical_id = r['PhysicalResourceId']\n... print(\"{} ({} chars)\".format(physical_id, len(physical_id)))\n...\ntestTroposphere-VeryLongNameZZZZZZZZZZZZZZZZZZZZZZ-PTHEM9FPNX28 (63 chars)\n```\n\nThe snippet above shows that the physical id was chosen by CloudFormation and isn't just a trimmed version of the title (it includes a random part too).\n\n", "before_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n def validate_title(self):\n iam_group_name(self.title)\n\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n }\n\n\nclass Role(AWSObject):\n def validate_title(self):\n iam_role_name(self.title)\n\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py"}], "after_files": [{"content": "# Copyright (c) 2012-2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSObject, AWSProperty\nfrom .validators import integer, boolean, status\nfrom .validators import iam_path, iam_role_name, iam_group_name, iam_user_name\n\ntry:\n from awacs.aws import Policy\n policytypes = (dict, Policy)\nexcept ImportError:\n policytypes = dict,\n\n\nActive = \"Active\"\nInactive = \"Inactive\"\n\n\nclass AccessKey(AWSObject):\n resource_type = \"AWS::IAM::AccessKey\"\n\n props = {\n 'Serial': (integer, False),\n 'Status': (status, False),\n 'UserName': (basestring, True),\n }\n\n\nclass PolicyType(AWSObject):\n resource_type = \"AWS::IAM::Policy\"\n\n props = {\n 'Groups': ([basestring], False),\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n\n\nclass Policy(AWSProperty):\n props = {\n 'PolicyDocument': (policytypes, True),\n 'PolicyName': (basestring, True),\n }\n\nPolicyProperty = Policy\n\n\nclass Group(AWSObject):\n resource_type = \"AWS::IAM::Group\"\n\n props = {\n 'GroupName': (iam_group_name, False),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n }\n\n\nclass InstanceProfile(AWSObject):\n resource_type = \"AWS::IAM::InstanceProfile\"\n\n props = {\n 'Path': (iam_path, False),\n 'Roles': (list, True),\n }\n\n\nclass Role(AWSObject):\n resource_type = \"AWS::IAM::Role\"\n\n props = {\n 'AssumeRolePolicyDocument': (policytypes, True),\n 'ManagedPolicyArns': ([basestring], False),\n 'Path': (iam_path, False),\n 'Policies': ([Policy], False),\n 'RoleName': (iam_role_name, False),\n }\n\n\nclass LoginProfile(AWSProperty):\n props = {\n 'Password': (basestring, True),\n 'PasswordResetRequired': (boolean, False),\n }\n\n\nclass User(AWSObject):\n resource_type = \"AWS::IAM::User\"\n\n props = {\n 'Path': (iam_path, False),\n 'Groups': ([basestring], False),\n 'ManagedPolicyArns': ([basestring], False),\n 'LoginProfile': (LoginProfile, False),\n 'Policies': ([Policy], False),\n 'UserName': (iam_user_name, False),\n }\n\n\nclass UserToGroupAddition(AWSObject):\n resource_type = \"AWS::IAM::UserToGroupAddition\"\n\n props = {\n 'GroupName': (basestring, True),\n 'Users': (list, True),\n }\n\n\nclass ManagedPolicy(AWSObject):\n resource_type = \"AWS::IAM::ManagedPolicy\"\n\n props = {\n 'Description': (basestring, False),\n 'Groups': ([basestring], False),\n 'Path': (iam_path, False),\n 'PolicyDocument': (policytypes, True),\n 'Roles': ([basestring], False),\n 'Users': ([basestring], False),\n }\n", "path": "troposphere/iam.py"}]} | 2,296 | 138 |
gh_patches_debug_12568 | rasdani/github-patches | git_diff | Kinto__kinto-474 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do not require cliquet master branch in dev
As discussed with @Natim @almet
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/config/__init__.py`
Content:
```
1 import os
2 import binascii
3 import codecs
4 from kinto import logger
5
6 HERE = os.path.abspath(os.path.dirname(__file__))
7
8
9 def render_template(template, destination, **kwargs):
10 template = os.path.join(HERE, template)
11 folder = os.path.dirname(destination)
12
13 if not os.path.exists(folder):
14 os.makedirs(folder)
15
16 logger.info("Created config {}".format(os.path.abspath(destination)))
17
18 with codecs.open(template, 'r', encoding='utf-8') as f:
19 raw_template = f.read()
20 rendered = raw_template.format(**kwargs)
21 with codecs.open(destination, 'w+', encoding='utf-8') as output:
22 output.write(rendered)
23
24
25 def init(config_file, backend):
26 values = {}
27 values['secret'] = binascii.b2a_hex(os.urandom(32))
28
29 values['storage_backend'] = "cliquet.storage.%s" % backend
30 values['cache_backend'] = "cliquet.cache.%s" % backend
31 values['permission_backend'] = "cliquet.permission.%s" % backend
32
33 if backend == 'postgresql':
34 postgresql_url = "postgres://postgres:postgres@localhost/postgres"
35 values['storage_url'] = postgresql_url
36 values['cache_url'] = postgresql_url
37 values['permission_url'] = postgresql_url
38
39 elif backend == 'redis':
40 redis_url = "redis://localhost:6379"
41 values['storage_url'] = redis_url + "/1"
42 values['cache_url'] = redis_url + "/2"
43 values['permission_url'] = redis_url + "/3"
44
45 else:
46 values['storage_url'] = ''
47 values['cache_url'] = ''
48 values['permission_url'] = ''
49
50 render_template("kinto.tpl", config_file, **values)
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py
--- a/kinto/config/__init__.py
+++ b/kinto/config/__init__.py
@@ -1,6 +1,8 @@
import os
-import binascii
import codecs
+
+from cliquet import utils as cliquet_utils
+
from kinto import logger
HERE = os.path.abspath(os.path.dirname(__file__))
@@ -24,7 +26,8 @@
def init(config_file, backend):
values = {}
- values['secret'] = binascii.b2a_hex(os.urandom(32))
+
+ values['secret'] = cliquet_utils.random_bytes_hex(32)
values['storage_backend'] = "cliquet.storage.%s" % backend
values['cache_backend'] = "cliquet.cache.%s" % backend
| {"golden_diff": "diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py\n--- a/kinto/config/__init__.py\n+++ b/kinto/config/__init__.py\n@@ -1,6 +1,8 @@\n import os\n-import binascii\n import codecs\n+\n+from cliquet import utils as cliquet_utils\n+\n from kinto import logger\n \n HERE = os.path.abspath(os.path.dirname(__file__))\n@@ -24,7 +26,8 @@\n \n def init(config_file, backend):\n values = {}\n- values['secret'] = binascii.b2a_hex(os.urandom(32))\n+\n+ values['secret'] = cliquet_utils.random_bytes_hex(32)\n \n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n", "issue": "Do not require cliquet master branch in dev\nAs discussed with @Natim @almet \n\n", "before_files": [{"content": "import os\nimport binascii\nimport codecs\nfrom kinto import logger\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend):\n values = {}\n values['secret'] = binascii.b2a_hex(os.urandom(32))\n\n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n values['permission_backend'] = \"cliquet.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}], "after_files": [{"content": "import os\nimport codecs\n\nfrom cliquet import utils as cliquet_utils\n\nfrom kinto import logger\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend):\n values = {}\n\n values['secret'] = cliquet_utils.random_bytes_hex(32)\n\n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n values['permission_backend'] = \"cliquet.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}]} | 768 | 189 |
gh_patches_debug_19880 | rasdani/github-patches | git_diff | deepset-ai__haystack-7422 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[1.x] Support json mode for OpenAI
**Is your feature request related to a problem? Please describe.**
OpenAI supports [json mode](https://platform.openai.com/docs/guides/text-generation/json-mode) to constrain the model to only output valid json.
**Describe the solution you'd like**
Be able to set `model_kwargs={"response_format": { "type": "json_object" }}` in the `PromptNode` to have it only generate valid json.
**Additional context**
Implementation-wise might be as simple as adding the keyword to the OAI [invocation context kwargs list](https://github.com/deepset-ai/haystack/blob/f389dcf8c97d6bafda9609440c3fff61ed804a90/haystack/nodes/prompt/invocation_layer/open_ai.py#L77). But not fully sure.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/nodes/prompt/invocation_layer/open_ai.py`
Content:
```
1 from typing import List, Union, Dict, Optional, cast, Any
2 import json
3 import logging
4
5 import sseclient
6
7 from haystack.errors import OpenAIError
8 from haystack.nodes.prompt.invocation_layer.utils import has_azure_parameters
9 from haystack.utils.openai_utils import (
10 _openai_text_completion_tokenization_details,
11 load_openai_tokenizer,
12 _check_openai_finish_reason,
13 check_openai_async_policy_violation,
14 check_openai_policy_violation,
15 openai_async_request,
16 openai_request,
17 )
18 from haystack.nodes.prompt.invocation_layer.base import PromptModelInvocationLayer
19 from haystack.nodes.prompt.invocation_layer.handlers import TokenStreamingHandler, DefaultTokenStreamingHandler
20
21 logger = logging.getLogger(__name__)
22
23
24 class OpenAIInvocationLayer(PromptModelInvocationLayer):
25 """
26 PromptModelInvocationLayer implementation for OpenAI's GPT-3 InstructGPT models. Invocations are made using REST API.
27 See [OpenAI GPT-3](https://platform.openai.com/docs/models/gpt-3) for more details.
28
29 Note: kwargs other than init parameter names are ignored to enable reflective construction of the class
30 as many variants of PromptModelInvocationLayer are possible and they may have different parameters.
31 """
32
33 def __init__(
34 self,
35 api_key: str,
36 model_name_or_path: str = "gpt-3.5-turbo-instruct",
37 max_length: Optional[int] = 100,
38 api_base: str = "https://api.openai.com/v1",
39 openai_organization: Optional[str] = None,
40 timeout: Optional[float] = None,
41 **kwargs,
42 ):
43 """
44 Creates an instance of OpenAIInvocationLayer for OpenAI's GPT-3 InstructGPT models.
45
46 :param model_name_or_path: The name or path of the underlying model.
47 :param max_length: The maximum number of tokens the output text can have.
48 :param api_key: The OpenAI API key.
49 :param api_base: The OpenAI API Base url, defaults to `https://api.openai.com/v1`.
50 :param openai_organization: The OpenAI-Organization ID, defaults to `None`. For more details, see see OpenAI
51 [documentation](https://platform.openai.com/docs/api-reference/requesting-organization).
52 :param kwargs: Additional keyword arguments passed to the underlying model. Due to reflective construction of
53 all PromptModelInvocationLayer instances, this instance of OpenAIInvocationLayer might receive some unrelated
54 kwargs. Only the kwargs relevant to OpenAIInvocationLayer are considered. The list of OpenAI-relevant
55 kwargs includes: suffix, temperature, top_p, presence_penalty, frequency_penalty, best_of, n, max_tokens,
56 logit_bias, stop, echo, and logprobs. For more details about these kwargs, see OpenAI
57 [documentation](https://platform.openai.com/docs/api-reference/completions/create).
58 Note: additional model argument moderate_content will filter input and generated answers for potentially
59 sensitive content using the [OpenAI Moderation API](https://platform.openai.com/docs/guides/moderation)
60 if set. If the input or answers are flagged, an empty list is returned in place of the answers.
61 """
62 super().__init__(model_name_or_path)
63 if not isinstance(api_key, str) or len(api_key) == 0:
64 raise OpenAIError(
65 f"api_key {api_key} must be a valid OpenAI key. Visit https://openai.com/api/ to get one."
66 )
67 self.api_key = api_key
68 self.api_base = api_base
69 self.openai_organization = openai_organization
70 self.timeout = timeout
71
72 # 16 is the default length for answers from OpenAI shown in the docs
73 # here, https://platform.openai.com/docs/api-reference/completions/create.
74 # max_length must be set otherwise OpenAIInvocationLayer._ensure_token_limit will fail.
75 self.max_length = max_length or 16
76
77 # Due to reflective construction of all invocation layers we might receive some
78 # unknown kwargs, so we need to take only the relevant.
79 # For more details refer to OpenAI documentation
80 self.model_input_kwargs = {
81 key: kwargs[key]
82 for key in [
83 "suffix",
84 "max_tokens",
85 "temperature",
86 "top_p",
87 "n",
88 "logprobs",
89 "echo",
90 "stop",
91 "presence_penalty",
92 "frequency_penalty",
93 "best_of",
94 "logit_bias",
95 "stream",
96 "stream_handler",
97 "moderate_content",
98 ]
99 if key in kwargs
100 }
101
102 tokenizer_name, max_tokens_limit = _openai_text_completion_tokenization_details(
103 model_name=self.model_name_or_path
104 )
105 self.max_tokens_limit = max_tokens_limit
106 self._tokenizer = load_openai_tokenizer(tokenizer_name=tokenizer_name)
107
108 @property
109 def url(self) -> str:
110 return f"{self.api_base}/completions"
111
112 @property
113 def headers(self) -> Dict[str, str]:
114 headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
115 if self.openai_organization:
116 headers["OpenAI-Organization"] = self.openai_organization
117 return headers
118
119 def _prepare_invoke(self, *args, **kwargs):
120 prompt = kwargs.get("prompt")
121 if not prompt:
122 raise ValueError(
123 f"No prompt provided. Model {self.model_name_or_path} requires prompt."
124 f"Make sure to provide prompt in kwargs."
125 )
126 # either stream is True (will use default handler) or stream_handler is provided
127 kwargs_with_defaults = self.model_input_kwargs
128 if kwargs:
129 # we use keyword stop_words but OpenAI uses stop
130 if "stop_words" in kwargs:
131 kwargs["stop"] = kwargs.pop("stop_words")
132 if "top_k" in kwargs:
133 top_k = kwargs.pop("top_k")
134 kwargs["n"] = top_k
135 kwargs["best_of"] = top_k
136 kwargs_with_defaults.update(kwargs)
137 stream = (
138 kwargs_with_defaults.get("stream", False) or kwargs_with_defaults.get("stream_handler", None) is not None
139 )
140 moderation = kwargs_with_defaults.get("moderate_content", False)
141 base_payload = { # payload common to all OpenAI models
142 "model": self.model_name_or_path,
143 "max_tokens": kwargs_with_defaults.get("max_tokens", self.max_length),
144 "temperature": kwargs_with_defaults.get("temperature", 0.7),
145 "top_p": kwargs_with_defaults.get("top_p", 1),
146 "n": kwargs_with_defaults.get("n", 1),
147 "stream": stream,
148 "stop": kwargs_with_defaults.get("stop", None),
149 "presence_penalty": kwargs_with_defaults.get("presence_penalty", 0),
150 "frequency_penalty": kwargs_with_defaults.get("frequency_penalty", 0),
151 "logit_bias": kwargs_with_defaults.get("logit_bias", {}),
152 }
153
154 return (prompt, base_payload, kwargs_with_defaults, stream, moderation)
155
156 def invoke(self, *args, **kwargs):
157 """
158 Invokes a prompt on the model. Based on the model, it takes in a prompt (or either a prompt or a list of messages)
159 and returns a list of responses using a REST invocation.
160
161 :return: The responses are being returned.
162
163 Note: Only kwargs relevant to OpenAI are passed to OpenAI rest API. Others kwargs are ignored.
164 For more details, see OpenAI [documentation](https://platform.openai.com/docs/api-reference/completions/create).
165 """
166 prompt, base_payload, kwargs_with_defaults, stream, moderation = self._prepare_invoke(*args, **kwargs)
167
168 if moderation and check_openai_policy_violation(input=prompt, headers=self.headers):
169 logger.info("Prompt '%s' will not be sent to OpenAI due to potential policy violation.", prompt)
170 return []
171
172 extra_payload = {
173 "prompt": prompt,
174 "suffix": kwargs_with_defaults.get("suffix", None),
175 "logprobs": kwargs_with_defaults.get("logprobs", None),
176 "echo": kwargs_with_defaults.get("echo", False),
177 "best_of": kwargs_with_defaults.get("best_of", 1),
178 }
179 payload = {**base_payload, **extra_payload}
180 if not stream:
181 res = openai_request(url=self.url, headers=self.headers, payload=payload)
182 _check_openai_finish_reason(result=res, payload=payload)
183 responses = [ans["text"].strip() for ans in res["choices"]]
184 else:
185 response = openai_request(
186 url=self.url, headers=self.headers, payload=payload, read_response=False, stream=True
187 )
188 handler: TokenStreamingHandler = kwargs_with_defaults.pop("stream_handler", DefaultTokenStreamingHandler())
189 responses = self._process_streaming_response(response=response, stream_handler=handler)
190
191 if moderation and check_openai_policy_violation(input=responses, headers=self.headers):
192 logger.info("Response '%s' will not be returned due to potential policy violation.", responses)
193 return []
194
195 return responses
196
197 async def ainvoke(self, *args, **kwargs):
198 """
199 asyncio version of the `invoke` method.
200 """
201 prompt, base_payload, kwargs_with_defaults, stream, moderation = self._prepare_invoke(*args, **kwargs)
202 if moderation and await check_openai_async_policy_violation(input=prompt, headers=self.headers):
203 logger.info("Prompt '%s' will not be sent to OpenAI due to potential policy violation.", prompt)
204 return []
205
206 extra_payload = {
207 "prompt": prompt,
208 "suffix": kwargs_with_defaults.get("suffix", None),
209 "logprobs": kwargs_with_defaults.get("logprobs", None),
210 "echo": kwargs_with_defaults.get("echo", False),
211 "best_of": kwargs_with_defaults.get("best_of", 1),
212 }
213 payload = {**base_payload, **extra_payload}
214 if not stream:
215 res = await openai_async_request(url=self.url, headers=self.headers, payload=payload)
216 _check_openai_finish_reason(result=res, payload=payload)
217 responses = [ans["text"].strip() for ans in res["choices"]]
218 else:
219 response = await openai_async_request(
220 url=self.url, headers=self.headers, payload=payload, read_response=False, stream=True
221 )
222 handler: TokenStreamingHandler = kwargs_with_defaults.pop("stream_handler", DefaultTokenStreamingHandler())
223 responses = self._process_streaming_response(response=response, stream_handler=handler)
224
225 if moderation and await check_openai_async_policy_violation(input=responses, headers=self.headers):
226 logger.info("Response '%s' will not be returned due to potential policy violation.", responses)
227 return []
228
229 return responses
230
231 def _process_streaming_response(self, response, stream_handler: TokenStreamingHandler):
232 client = sseclient.SSEClient(response)
233 tokens: List[str] = []
234 try:
235 for event in client.events():
236 if event.data != TokenStreamingHandler.DONE_MARKER:
237 event_data = json.loads(event.data)
238 token: str = self._extract_token(event_data)
239 if token:
240 tokens.append(stream_handler(token, event_data=event_data["choices"]))
241 finally:
242 client.close()
243 return ["".join(tokens)] # return a list of strings just like non-streaming
244
245 def _extract_token(self, event_data: Dict[str, Any]):
246 return event_data["choices"][0]["text"]
247
248 def _ensure_token_limit(self, prompt: Union[str, List[Dict[str, str]]]) -> Union[str, List[Dict[str, str]]]:
249 """Ensure that the length of the prompt and answer is within the max tokens limit of the model.
250 If needed, truncate the prompt text so that it fits within the limit.
251
252 :param prompt: Prompt text to be sent to the generative model.
253 """
254 n_prompt_tokens = len(self._tokenizer.encode(cast(str, prompt)))
255 n_answer_tokens = self.max_length
256 if (n_prompt_tokens + n_answer_tokens) <= self.max_tokens_limit:
257 return prompt
258
259 logger.warning(
260 "The prompt has been truncated from %s tokens to %s tokens so that the prompt length and "
261 "answer length (%s tokens) fit within the max token limit (%s tokens). "
262 "Reduce the length of the prompt to prevent it from being cut off.",
263 n_prompt_tokens,
264 self.max_tokens_limit - n_answer_tokens,
265 n_answer_tokens,
266 self.max_tokens_limit,
267 )
268
269 tokenized_payload = self._tokenizer.encode(prompt)
270 decoded_string = self._tokenizer.decode(tokenized_payload[: self.max_tokens_limit - n_answer_tokens])
271 return decoded_string
272
273 @classmethod
274 def supports(cls, model_name_or_path: str, **kwargs) -> bool:
275 valid_model = model_name_or_path in ["ada", "babbage", "davinci", "curie", "gpt-3.5-turbo-instruct"] or any(
276 m in model_name_or_path for m in ["-ada-", "-babbage-", "-davinci-", "-curie-"]
277 )
278 return valid_model and not has_azure_parameters(**kwargs)
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/nodes/prompt/invocation_layer/open_ai.py b/haystack/nodes/prompt/invocation_layer/open_ai.py
--- a/haystack/nodes/prompt/invocation_layer/open_ai.py
+++ b/haystack/nodes/prompt/invocation_layer/open_ai.py
@@ -95,6 +95,8 @@
"stream",
"stream_handler",
"moderate_content",
+ "seed",
+ "response_format",
]
if key in kwargs
}
@@ -150,6 +152,12 @@
"frequency_penalty": kwargs_with_defaults.get("frequency_penalty", 0),
"logit_bias": kwargs_with_defaults.get("logit_bias", {}),
}
+ response_format = kwargs_with_defaults.get("response_format", None)
+ if response_format:
+ base_payload["response_format"] = response_format
+ seed = kwargs_with_defaults.get("seed", None)
+ if seed:
+ base_payload["seed"] = seed
return (prompt, base_payload, kwargs_with_defaults, stream, moderation)
| {"golden_diff": "diff --git a/haystack/nodes/prompt/invocation_layer/open_ai.py b/haystack/nodes/prompt/invocation_layer/open_ai.py\n--- a/haystack/nodes/prompt/invocation_layer/open_ai.py\n+++ b/haystack/nodes/prompt/invocation_layer/open_ai.py\n@@ -95,6 +95,8 @@\n \"stream\",\n \"stream_handler\",\n \"moderate_content\",\n+ \"seed\",\n+ \"response_format\",\n ]\n if key in kwargs\n }\n@@ -150,6 +152,12 @@\n \"frequency_penalty\": kwargs_with_defaults.get(\"frequency_penalty\", 0),\n \"logit_bias\": kwargs_with_defaults.get(\"logit_bias\", {}),\n }\n+ response_format = kwargs_with_defaults.get(\"response_format\", None)\n+ if response_format:\n+ base_payload[\"response_format\"] = response_format\n+ seed = kwargs_with_defaults.get(\"seed\", None)\n+ if seed:\n+ base_payload[\"seed\"] = seed\n \n return (prompt, base_payload, kwargs_with_defaults, stream, moderation)\n", "issue": "[1.x] Support json mode for OpenAI\n**Is your feature request related to a problem? Please describe.**\r\nOpenAI supports [json mode](https://platform.openai.com/docs/guides/text-generation/json-mode) to constrain the model to only output valid json.\r\n\r\n**Describe the solution you'd like**\r\nBe able to set `model_kwargs={\"response_format\": { \"type\": \"json_object\" }}` in the `PromptNode` to have it only generate valid json.\r\n\r\n**Additional context**\r\nImplementation-wise might be as simple as adding the keyword to the OAI [invocation context kwargs list](https://github.com/deepset-ai/haystack/blob/f389dcf8c97d6bafda9609440c3fff61ed804a90/haystack/nodes/prompt/invocation_layer/open_ai.py#L77). But not fully sure.\r\n\n", "before_files": [{"content": "from typing import List, Union, Dict, Optional, cast, Any\nimport json\nimport logging\n\nimport sseclient\n\nfrom haystack.errors import OpenAIError\nfrom haystack.nodes.prompt.invocation_layer.utils import has_azure_parameters\nfrom haystack.utils.openai_utils import (\n _openai_text_completion_tokenization_details,\n load_openai_tokenizer,\n _check_openai_finish_reason,\n check_openai_async_policy_violation,\n check_openai_policy_violation,\n openai_async_request,\n openai_request,\n)\nfrom haystack.nodes.prompt.invocation_layer.base import PromptModelInvocationLayer\nfrom haystack.nodes.prompt.invocation_layer.handlers import TokenStreamingHandler, DefaultTokenStreamingHandler\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpenAIInvocationLayer(PromptModelInvocationLayer):\n \"\"\"\n PromptModelInvocationLayer implementation for OpenAI's GPT-3 InstructGPT models. Invocations are made using REST API.\n See [OpenAI GPT-3](https://platform.openai.com/docs/models/gpt-3) for more details.\n\n Note: kwargs other than init parameter names are ignored to enable reflective construction of the class\n as many variants of PromptModelInvocationLayer are possible and they may have different parameters.\n \"\"\"\n\n def __init__(\n self,\n api_key: str,\n model_name_or_path: str = \"gpt-3.5-turbo-instruct\",\n max_length: Optional[int] = 100,\n api_base: str = \"https://api.openai.com/v1\",\n openai_organization: Optional[str] = None,\n timeout: Optional[float] = None,\n **kwargs,\n ):\n \"\"\"\n Creates an instance of OpenAIInvocationLayer for OpenAI's GPT-3 InstructGPT models.\n\n :param model_name_or_path: The name or path of the underlying model.\n :param max_length: The maximum number of tokens the output text can have.\n :param api_key: The OpenAI API key.\n :param api_base: The OpenAI API Base url, defaults to `https://api.openai.com/v1`.\n :param openai_organization: The OpenAI-Organization ID, defaults to `None`. For more details, see see OpenAI\n [documentation](https://platform.openai.com/docs/api-reference/requesting-organization).\n :param kwargs: Additional keyword arguments passed to the underlying model. Due to reflective construction of\n all PromptModelInvocationLayer instances, this instance of OpenAIInvocationLayer might receive some unrelated\n kwargs. Only the kwargs relevant to OpenAIInvocationLayer are considered. The list of OpenAI-relevant\n kwargs includes: suffix, temperature, top_p, presence_penalty, frequency_penalty, best_of, n, max_tokens,\n logit_bias, stop, echo, and logprobs. For more details about these kwargs, see OpenAI\n [documentation](https://platform.openai.com/docs/api-reference/completions/create).\n Note: additional model argument moderate_content will filter input and generated answers for potentially\n sensitive content using the [OpenAI Moderation API](https://platform.openai.com/docs/guides/moderation)\n if set. If the input or answers are flagged, an empty list is returned in place of the answers.\n \"\"\"\n super().__init__(model_name_or_path)\n if not isinstance(api_key, str) or len(api_key) == 0:\n raise OpenAIError(\n f\"api_key {api_key} must be a valid OpenAI key. Visit https://openai.com/api/ to get one.\"\n )\n self.api_key = api_key\n self.api_base = api_base\n self.openai_organization = openai_organization\n self.timeout = timeout\n\n # 16 is the default length for answers from OpenAI shown in the docs\n # here, https://platform.openai.com/docs/api-reference/completions/create.\n # max_length must be set otherwise OpenAIInvocationLayer._ensure_token_limit will fail.\n self.max_length = max_length or 16\n\n # Due to reflective construction of all invocation layers we might receive some\n # unknown kwargs, so we need to take only the relevant.\n # For more details refer to OpenAI documentation\n self.model_input_kwargs = {\n key: kwargs[key]\n for key in [\n \"suffix\",\n \"max_tokens\",\n \"temperature\",\n \"top_p\",\n \"n\",\n \"logprobs\",\n \"echo\",\n \"stop\",\n \"presence_penalty\",\n \"frequency_penalty\",\n \"best_of\",\n \"logit_bias\",\n \"stream\",\n \"stream_handler\",\n \"moderate_content\",\n ]\n if key in kwargs\n }\n\n tokenizer_name, max_tokens_limit = _openai_text_completion_tokenization_details(\n model_name=self.model_name_or_path\n )\n self.max_tokens_limit = max_tokens_limit\n self._tokenizer = load_openai_tokenizer(tokenizer_name=tokenizer_name)\n\n @property\n def url(self) -> str:\n return f\"{self.api_base}/completions\"\n\n @property\n def headers(self) -> Dict[str, str]:\n headers = {\"Authorization\": f\"Bearer {self.api_key}\", \"Content-Type\": \"application/json\"}\n if self.openai_organization:\n headers[\"OpenAI-Organization\"] = self.openai_organization\n return headers\n\n def _prepare_invoke(self, *args, **kwargs):\n prompt = kwargs.get(\"prompt\")\n if not prompt:\n raise ValueError(\n f\"No prompt provided. Model {self.model_name_or_path} requires prompt.\"\n f\"Make sure to provide prompt in kwargs.\"\n )\n # either stream is True (will use default handler) or stream_handler is provided\n kwargs_with_defaults = self.model_input_kwargs\n if kwargs:\n # we use keyword stop_words but OpenAI uses stop\n if \"stop_words\" in kwargs:\n kwargs[\"stop\"] = kwargs.pop(\"stop_words\")\n if \"top_k\" in kwargs:\n top_k = kwargs.pop(\"top_k\")\n kwargs[\"n\"] = top_k\n kwargs[\"best_of\"] = top_k\n kwargs_with_defaults.update(kwargs)\n stream = (\n kwargs_with_defaults.get(\"stream\", False) or kwargs_with_defaults.get(\"stream_handler\", None) is not None\n )\n moderation = kwargs_with_defaults.get(\"moderate_content\", False)\n base_payload = { # payload common to all OpenAI models\n \"model\": self.model_name_or_path,\n \"max_tokens\": kwargs_with_defaults.get(\"max_tokens\", self.max_length),\n \"temperature\": kwargs_with_defaults.get(\"temperature\", 0.7),\n \"top_p\": kwargs_with_defaults.get(\"top_p\", 1),\n \"n\": kwargs_with_defaults.get(\"n\", 1),\n \"stream\": stream,\n \"stop\": kwargs_with_defaults.get(\"stop\", None),\n \"presence_penalty\": kwargs_with_defaults.get(\"presence_penalty\", 0),\n \"frequency_penalty\": kwargs_with_defaults.get(\"frequency_penalty\", 0),\n \"logit_bias\": kwargs_with_defaults.get(\"logit_bias\", {}),\n }\n\n return (prompt, base_payload, kwargs_with_defaults, stream, moderation)\n\n def invoke(self, *args, **kwargs):\n \"\"\"\n Invokes a prompt on the model. Based on the model, it takes in a prompt (or either a prompt or a list of messages)\n and returns a list of responses using a REST invocation.\n\n :return: The responses are being returned.\n\n Note: Only kwargs relevant to OpenAI are passed to OpenAI rest API. Others kwargs are ignored.\n For more details, see OpenAI [documentation](https://platform.openai.com/docs/api-reference/completions/create).\n \"\"\"\n prompt, base_payload, kwargs_with_defaults, stream, moderation = self._prepare_invoke(*args, **kwargs)\n\n if moderation and check_openai_policy_violation(input=prompt, headers=self.headers):\n logger.info(\"Prompt '%s' will not be sent to OpenAI due to potential policy violation.\", prompt)\n return []\n\n extra_payload = {\n \"prompt\": prompt,\n \"suffix\": kwargs_with_defaults.get(\"suffix\", None),\n \"logprobs\": kwargs_with_defaults.get(\"logprobs\", None),\n \"echo\": kwargs_with_defaults.get(\"echo\", False),\n \"best_of\": kwargs_with_defaults.get(\"best_of\", 1),\n }\n payload = {**base_payload, **extra_payload}\n if not stream:\n res = openai_request(url=self.url, headers=self.headers, payload=payload)\n _check_openai_finish_reason(result=res, payload=payload)\n responses = [ans[\"text\"].strip() for ans in res[\"choices\"]]\n else:\n response = openai_request(\n url=self.url, headers=self.headers, payload=payload, read_response=False, stream=True\n )\n handler: TokenStreamingHandler = kwargs_with_defaults.pop(\"stream_handler\", DefaultTokenStreamingHandler())\n responses = self._process_streaming_response(response=response, stream_handler=handler)\n\n if moderation and check_openai_policy_violation(input=responses, headers=self.headers):\n logger.info(\"Response '%s' will not be returned due to potential policy violation.\", responses)\n return []\n\n return responses\n\n async def ainvoke(self, *args, **kwargs):\n \"\"\"\n asyncio version of the `invoke` method.\n \"\"\"\n prompt, base_payload, kwargs_with_defaults, stream, moderation = self._prepare_invoke(*args, **kwargs)\n if moderation and await check_openai_async_policy_violation(input=prompt, headers=self.headers):\n logger.info(\"Prompt '%s' will not be sent to OpenAI due to potential policy violation.\", prompt)\n return []\n\n extra_payload = {\n \"prompt\": prompt,\n \"suffix\": kwargs_with_defaults.get(\"suffix\", None),\n \"logprobs\": kwargs_with_defaults.get(\"logprobs\", None),\n \"echo\": kwargs_with_defaults.get(\"echo\", False),\n \"best_of\": kwargs_with_defaults.get(\"best_of\", 1),\n }\n payload = {**base_payload, **extra_payload}\n if not stream:\n res = await openai_async_request(url=self.url, headers=self.headers, payload=payload)\n _check_openai_finish_reason(result=res, payload=payload)\n responses = [ans[\"text\"].strip() for ans in res[\"choices\"]]\n else:\n response = await openai_async_request(\n url=self.url, headers=self.headers, payload=payload, read_response=False, stream=True\n )\n handler: TokenStreamingHandler = kwargs_with_defaults.pop(\"stream_handler\", DefaultTokenStreamingHandler())\n responses = self._process_streaming_response(response=response, stream_handler=handler)\n\n if moderation and await check_openai_async_policy_violation(input=responses, headers=self.headers):\n logger.info(\"Response '%s' will not be returned due to potential policy violation.\", responses)\n return []\n\n return responses\n\n def _process_streaming_response(self, response, stream_handler: TokenStreamingHandler):\n client = sseclient.SSEClient(response)\n tokens: List[str] = []\n try:\n for event in client.events():\n if event.data != TokenStreamingHandler.DONE_MARKER:\n event_data = json.loads(event.data)\n token: str = self._extract_token(event_data)\n if token:\n tokens.append(stream_handler(token, event_data=event_data[\"choices\"]))\n finally:\n client.close()\n return [\"\".join(tokens)] # return a list of strings just like non-streaming\n\n def _extract_token(self, event_data: Dict[str, Any]):\n return event_data[\"choices\"][0][\"text\"]\n\n def _ensure_token_limit(self, prompt: Union[str, List[Dict[str, str]]]) -> Union[str, List[Dict[str, str]]]:\n \"\"\"Ensure that the length of the prompt and answer is within the max tokens limit of the model.\n If needed, truncate the prompt text so that it fits within the limit.\n\n :param prompt: Prompt text to be sent to the generative model.\n \"\"\"\n n_prompt_tokens = len(self._tokenizer.encode(cast(str, prompt)))\n n_answer_tokens = self.max_length\n if (n_prompt_tokens + n_answer_tokens) <= self.max_tokens_limit:\n return prompt\n\n logger.warning(\n \"The prompt has been truncated from %s tokens to %s tokens so that the prompt length and \"\n \"answer length (%s tokens) fit within the max token limit (%s tokens). \"\n \"Reduce the length of the prompt to prevent it from being cut off.\",\n n_prompt_tokens,\n self.max_tokens_limit - n_answer_tokens,\n n_answer_tokens,\n self.max_tokens_limit,\n )\n\n tokenized_payload = self._tokenizer.encode(prompt)\n decoded_string = self._tokenizer.decode(tokenized_payload[: self.max_tokens_limit - n_answer_tokens])\n return decoded_string\n\n @classmethod\n def supports(cls, model_name_or_path: str, **kwargs) -> bool:\n valid_model = model_name_or_path in [\"ada\", \"babbage\", \"davinci\", \"curie\", \"gpt-3.5-turbo-instruct\"] or any(\n m in model_name_or_path for m in [\"-ada-\", \"-babbage-\", \"-davinci-\", \"-curie-\"]\n )\n return valid_model and not has_azure_parameters(**kwargs)\n", "path": "haystack/nodes/prompt/invocation_layer/open_ai.py"}], "after_files": [{"content": "from typing import List, Union, Dict, Optional, cast, Any\nimport json\nimport logging\n\nimport sseclient\n\nfrom haystack.errors import OpenAIError\nfrom haystack.nodes.prompt.invocation_layer.utils import has_azure_parameters\nfrom haystack.utils.openai_utils import (\n _openai_text_completion_tokenization_details,\n load_openai_tokenizer,\n _check_openai_finish_reason,\n check_openai_async_policy_violation,\n check_openai_policy_violation,\n openai_async_request,\n openai_request,\n)\nfrom haystack.nodes.prompt.invocation_layer.base import PromptModelInvocationLayer\nfrom haystack.nodes.prompt.invocation_layer.handlers import TokenStreamingHandler, DefaultTokenStreamingHandler\n\nlogger = logging.getLogger(__name__)\n\n\nclass OpenAIInvocationLayer(PromptModelInvocationLayer):\n \"\"\"\n PromptModelInvocationLayer implementation for OpenAI's GPT-3 InstructGPT models. Invocations are made using REST API.\n See [OpenAI GPT-3](https://platform.openai.com/docs/models/gpt-3) for more details.\n\n Note: kwargs other than init parameter names are ignored to enable reflective construction of the class\n as many variants of PromptModelInvocationLayer are possible and they may have different parameters.\n \"\"\"\n\n def __init__(\n self,\n api_key: str,\n model_name_or_path: str = \"gpt-3.5-turbo-instruct\",\n max_length: Optional[int] = 100,\n api_base: str = \"https://api.openai.com/v1\",\n openai_organization: Optional[str] = None,\n timeout: Optional[float] = None,\n **kwargs,\n ):\n \"\"\"\n Creates an instance of OpenAIInvocationLayer for OpenAI's GPT-3 InstructGPT models.\n\n :param model_name_or_path: The name or path of the underlying model.\n :param max_length: The maximum number of tokens the output text can have.\n :param api_key: The OpenAI API key.\n :param api_base: The OpenAI API Base url, defaults to `https://api.openai.com/v1`.\n :param openai_organization: The OpenAI-Organization ID, defaults to `None`. For more details, see see OpenAI\n [documentation](https://platform.openai.com/docs/api-reference/requesting-organization).\n :param kwargs: Additional keyword arguments passed to the underlying model. Due to reflective construction of\n all PromptModelInvocationLayer instances, this instance of OpenAIInvocationLayer might receive some unrelated\n kwargs. Only the kwargs relevant to OpenAIInvocationLayer are considered. The list of OpenAI-relevant\n kwargs includes: suffix, temperature, top_p, presence_penalty, frequency_penalty, best_of, n, max_tokens,\n logit_bias, stop, echo, and logprobs. For more details about these kwargs, see OpenAI\n [documentation](https://platform.openai.com/docs/api-reference/completions/create).\n Note: additional model argument moderate_content will filter input and generated answers for potentially\n sensitive content using the [OpenAI Moderation API](https://platform.openai.com/docs/guides/moderation)\n if set. If the input or answers are flagged, an empty list is returned in place of the answers.\n \"\"\"\n super().__init__(model_name_or_path)\n if not isinstance(api_key, str) or len(api_key) == 0:\n raise OpenAIError(\n f\"api_key {api_key} must be a valid OpenAI key. Visit https://openai.com/api/ to get one.\"\n )\n self.api_key = api_key\n self.api_base = api_base\n self.openai_organization = openai_organization\n self.timeout = timeout\n\n # 16 is the default length for answers from OpenAI shown in the docs\n # here, https://platform.openai.com/docs/api-reference/completions/create.\n # max_length must be set otherwise OpenAIInvocationLayer._ensure_token_limit will fail.\n self.max_length = max_length or 16\n\n # Due to reflective construction of all invocation layers we might receive some\n # unknown kwargs, so we need to take only the relevant.\n # For more details refer to OpenAI documentation\n self.model_input_kwargs = {\n key: kwargs[key]\n for key in [\n \"suffix\",\n \"max_tokens\",\n \"temperature\",\n \"top_p\",\n \"n\",\n \"logprobs\",\n \"echo\",\n \"stop\",\n \"presence_penalty\",\n \"frequency_penalty\",\n \"best_of\",\n \"logit_bias\",\n \"stream\",\n \"stream_handler\",\n \"moderate_content\",\n \"seed\",\n \"response_format\",\n ]\n if key in kwargs\n }\n\n tokenizer_name, max_tokens_limit = _openai_text_completion_tokenization_details(\n model_name=self.model_name_or_path\n )\n self.max_tokens_limit = max_tokens_limit\n self._tokenizer = load_openai_tokenizer(tokenizer_name=tokenizer_name)\n\n @property\n def url(self) -> str:\n return f\"{self.api_base}/completions\"\n\n @property\n def headers(self) -> Dict[str, str]:\n headers = {\"Authorization\": f\"Bearer {self.api_key}\", \"Content-Type\": \"application/json\"}\n if self.openai_organization:\n headers[\"OpenAI-Organization\"] = self.openai_organization\n return headers\n\n def _prepare_invoke(self, *args, **kwargs):\n prompt = kwargs.get(\"prompt\")\n if not prompt:\n raise ValueError(\n f\"No prompt provided. Model {self.model_name_or_path} requires prompt.\"\n f\"Make sure to provide prompt in kwargs.\"\n )\n # either stream is True (will use default handler) or stream_handler is provided\n kwargs_with_defaults = self.model_input_kwargs\n if kwargs:\n # we use keyword stop_words but OpenAI uses stop\n if \"stop_words\" in kwargs:\n kwargs[\"stop\"] = kwargs.pop(\"stop_words\")\n if \"top_k\" in kwargs:\n top_k = kwargs.pop(\"top_k\")\n kwargs[\"n\"] = top_k\n kwargs[\"best_of\"] = top_k\n kwargs_with_defaults.update(kwargs)\n stream = (\n kwargs_with_defaults.get(\"stream\", False) or kwargs_with_defaults.get(\"stream_handler\", None) is not None\n )\n moderation = kwargs_with_defaults.get(\"moderate_content\", False)\n base_payload = { # payload common to all OpenAI models\n \"model\": self.model_name_or_path,\n \"max_tokens\": kwargs_with_defaults.get(\"max_tokens\", self.max_length),\n \"temperature\": kwargs_with_defaults.get(\"temperature\", 0.7),\n \"top_p\": kwargs_with_defaults.get(\"top_p\", 1),\n \"n\": kwargs_with_defaults.get(\"n\", 1),\n \"stream\": stream,\n \"stop\": kwargs_with_defaults.get(\"stop\", None),\n \"presence_penalty\": kwargs_with_defaults.get(\"presence_penalty\", 0),\n \"frequency_penalty\": kwargs_with_defaults.get(\"frequency_penalty\", 0),\n \"logit_bias\": kwargs_with_defaults.get(\"logit_bias\", {}),\n }\n response_format = kwargs_with_defaults.get(\"response_format\", None)\n if response_format:\n base_payload[\"response_format\"] = response_format\n seed = kwargs_with_defaults.get(\"seed\", None)\n if seed:\n base_payload[\"seed\"] = seed\n\n return (prompt, base_payload, kwargs_with_defaults, stream, moderation)\n\n def invoke(self, *args, **kwargs):\n \"\"\"\n Invokes a prompt on the model. Based on the model, it takes in a prompt (or either a prompt or a list of messages)\n and returns a list of responses using a REST invocation.\n\n :return: The responses are being returned.\n\n Note: Only kwargs relevant to OpenAI are passed to OpenAI rest API. Others kwargs are ignored.\n For more details, see OpenAI [documentation](https://platform.openai.com/docs/api-reference/completions/create).\n \"\"\"\n prompt, base_payload, kwargs_with_defaults, stream, moderation = self._prepare_invoke(*args, **kwargs)\n\n if moderation and check_openai_policy_violation(input=prompt, headers=self.headers):\n logger.info(\"Prompt '%s' will not be sent to OpenAI due to potential policy violation.\", prompt)\n return []\n\n extra_payload = {\n \"prompt\": prompt,\n \"suffix\": kwargs_with_defaults.get(\"suffix\", None),\n \"logprobs\": kwargs_with_defaults.get(\"logprobs\", None),\n \"echo\": kwargs_with_defaults.get(\"echo\", False),\n \"best_of\": kwargs_with_defaults.get(\"best_of\", 1),\n }\n payload = {**base_payload, **extra_payload}\n if not stream:\n res = openai_request(url=self.url, headers=self.headers, payload=payload)\n _check_openai_finish_reason(result=res, payload=payload)\n responses = [ans[\"text\"].strip() for ans in res[\"choices\"]]\n else:\n response = openai_request(\n url=self.url, headers=self.headers, payload=payload, read_response=False, stream=True\n )\n handler: TokenStreamingHandler = kwargs_with_defaults.pop(\"stream_handler\", DefaultTokenStreamingHandler())\n responses = self._process_streaming_response(response=response, stream_handler=handler)\n\n if moderation and check_openai_policy_violation(input=responses, headers=self.headers):\n logger.info(\"Response '%s' will not be returned due to potential policy violation.\", responses)\n return []\n\n return responses\n\n async def ainvoke(self, *args, **kwargs):\n \"\"\"\n asyncio version of the `invoke` method.\n \"\"\"\n prompt, base_payload, kwargs_with_defaults, stream, moderation = self._prepare_invoke(*args, **kwargs)\n if moderation and await check_openai_async_policy_violation(input=prompt, headers=self.headers):\n logger.info(\"Prompt '%s' will not be sent to OpenAI due to potential policy violation.\", prompt)\n return []\n\n extra_payload = {\n \"prompt\": prompt,\n \"suffix\": kwargs_with_defaults.get(\"suffix\", None),\n \"logprobs\": kwargs_with_defaults.get(\"logprobs\", None),\n \"echo\": kwargs_with_defaults.get(\"echo\", False),\n \"best_of\": kwargs_with_defaults.get(\"best_of\", 1),\n }\n payload = {**base_payload, **extra_payload}\n if not stream:\n res = await openai_async_request(url=self.url, headers=self.headers, payload=payload)\n _check_openai_finish_reason(result=res, payload=payload)\n responses = [ans[\"text\"].strip() for ans in res[\"choices\"]]\n else:\n response = await openai_async_request(\n url=self.url, headers=self.headers, payload=payload, read_response=False, stream=True\n )\n handler: TokenStreamingHandler = kwargs_with_defaults.pop(\"stream_handler\", DefaultTokenStreamingHandler())\n responses = self._process_streaming_response(response=response, stream_handler=handler)\n\n if moderation and await check_openai_async_policy_violation(input=responses, headers=self.headers):\n logger.info(\"Response '%s' will not be returned due to potential policy violation.\", responses)\n return []\n\n return responses\n\n def _process_streaming_response(self, response, stream_handler: TokenStreamingHandler):\n client = sseclient.SSEClient(response)\n tokens: List[str] = []\n try:\n for event in client.events():\n if event.data != TokenStreamingHandler.DONE_MARKER:\n event_data = json.loads(event.data)\n token: str = self._extract_token(event_data)\n if token:\n tokens.append(stream_handler(token, event_data=event_data[\"choices\"]))\n finally:\n client.close()\n return [\"\".join(tokens)] # return a list of strings just like non-streaming\n\n def _extract_token(self, event_data: Dict[str, Any]):\n return event_data[\"choices\"][0][\"text\"]\n\n def _ensure_token_limit(self, prompt: Union[str, List[Dict[str, str]]]) -> Union[str, List[Dict[str, str]]]:\n \"\"\"Ensure that the length of the prompt and answer is within the max tokens limit of the model.\n If needed, truncate the prompt text so that it fits within the limit.\n\n :param prompt: Prompt text to be sent to the generative model.\n \"\"\"\n n_prompt_tokens = len(self._tokenizer.encode(cast(str, prompt)))\n n_answer_tokens = self.max_length\n if (n_prompt_tokens + n_answer_tokens) <= self.max_tokens_limit:\n return prompt\n\n logger.warning(\n \"The prompt has been truncated from %s tokens to %s tokens so that the prompt length and \"\n \"answer length (%s tokens) fit within the max token limit (%s tokens). \"\n \"Reduce the length of the prompt to prevent it from being cut off.\",\n n_prompt_tokens,\n self.max_tokens_limit - n_answer_tokens,\n n_answer_tokens,\n self.max_tokens_limit,\n )\n\n tokenized_payload = self._tokenizer.encode(prompt)\n decoded_string = self._tokenizer.decode(tokenized_payload[: self.max_tokens_limit - n_answer_tokens])\n return decoded_string\n\n @classmethod\n def supports(cls, model_name_or_path: str, **kwargs) -> bool:\n valid_model = model_name_or_path in [\"ada\", \"babbage\", \"davinci\", \"curie\", \"gpt-3.5-turbo-instruct\"] or any(\n m in model_name_or_path for m in [\"-ada-\", \"-babbage-\", \"-davinci-\", \"-curie-\"]\n )\n return valid_model and not has_azure_parameters(**kwargs)\n", "path": "haystack/nodes/prompt/invocation_layer/open_ai.py"}]} | 4,060 | 242 |
gh_patches_debug_21239 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-3068 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Celery queue names inconsistent
**Is your feature request related to a problem? Please describe.**
In the admin interface, the Celery Status tab shows all the current queues.
They seemed to be only ever-increasing recently, so I had to look into why that happened.
After a lot of wasting of time, I figured out how to properly get into Flower, (and how to start it...)
Here, I discovered that the Celery worker was ignoring all but four of the queues, so I had to manually add them.
This did not really seem to be a problem. However, when I entered `broadcasts`, as that is what the queue is named in the admin interface, nothing happened. An investigation later I found out that the queue was actually called `broadcast`, singular.
**Describe the solution you'd like**
So, please fix that. Either change the name of the queue, or change it in the admin interface, so that someone can look at the admin interface to know what the queue names are...
**Describe alternatives you've considered**
N/A
**Additional context**
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/admin/celery_status.py`
Content:
```
1 """ celery status """
2 import json
3
4 from django.contrib.auth.decorators import login_required, permission_required
5 from django.http import HttpResponse
6 from django.template.response import TemplateResponse
7 from django.utils.decorators import method_decorator
8 from django.views import View
9 from django.views.decorators.http import require_GET
10 from django import forms
11 import redis
12
13 from celerywyrm import settings
14 from bookwyrm.tasks import (
15 app as celery,
16 LOW,
17 MEDIUM,
18 HIGH,
19 STREAMS,
20 IMAGES,
21 SUGGESTED_USERS,
22 EMAIL,
23 CONNECTORS,
24 LISTS,
25 INBOX,
26 IMPORTS,
27 IMPORT_TRIGGERED,
28 BROADCAST,
29 MISC,
30 )
31
32 r = redis.from_url(settings.REDIS_BROKER_URL)
33
34 # pylint: disable= no-self-use
35 @method_decorator(login_required, name="dispatch")
36 @method_decorator(
37 permission_required("bookwyrm.edit_instance_settings", raise_exception=True),
38 name="dispatch",
39 )
40 class CeleryStatus(View):
41 """Are your tasks running? Well you'd better go catch them"""
42
43 def get(self, request):
44 """See workers and active tasks"""
45 errors = []
46 try:
47 inspect = celery.control.inspect()
48 stats = inspect.stats()
49 active_tasks = inspect.active()
50 # pylint: disable=broad-except
51 except Exception as err:
52 stats = active_tasks = None
53 errors.append(err)
54
55 try:
56 queues = {
57 LOW: r.llen(LOW),
58 MEDIUM: r.llen(MEDIUM),
59 HIGH: r.llen(HIGH),
60 STREAMS: r.llen(STREAMS),
61 IMAGES: r.llen(IMAGES),
62 SUGGESTED_USERS: r.llen(SUGGESTED_USERS),
63 EMAIL: r.llen(EMAIL),
64 CONNECTORS: r.llen(CONNECTORS),
65 LISTS: r.llen(LISTS),
66 INBOX: r.llen(INBOX),
67 IMPORTS: r.llen(IMPORTS),
68 IMPORT_TRIGGERED: r.llen(IMPORT_TRIGGERED),
69 BROADCAST: r.llen(BROADCAST),
70 MISC: r.llen(MISC),
71 }
72 # pylint: disable=broad-except
73 except Exception as err:
74 queues = None
75 errors.append(err)
76
77 form = ClearCeleryForm()
78
79 data = {
80 "stats": stats,
81 "active_tasks": active_tasks,
82 "queues": queues,
83 "form": form,
84 "errors": errors,
85 }
86 return TemplateResponse(request, "settings/celery.html", data)
87
88 def post(self, request):
89 """Submit form to clear queues"""
90 form = ClearCeleryForm(request.POST)
91 if form.is_valid():
92 if len(celery.control.ping()) != 0:
93 return HttpResponse(
94 "Refusing to delete tasks while Celery worker is active"
95 )
96 pipeline = r.pipeline()
97 for queue in form.cleaned_data["queues"]:
98 for task in r.lrange(queue, 0, -1):
99 task_json = json.loads(task)
100 if task_json["headers"]["task"] in form.cleaned_data["tasks"]:
101 pipeline.lrem(queue, 0, task)
102 results = pipeline.execute()
103
104 return HttpResponse(f"Deleted {sum(results)} tasks")
105
106
107 class ClearCeleryForm(forms.Form):
108 """Form to clear queues"""
109
110 queues = forms.MultipleChoiceField(
111 label="Queues",
112 choices=[
113 (LOW, "Low prioirty"),
114 (MEDIUM, "Medium priority"),
115 (HIGH, "High priority"),
116 (STREAMS, "Streams"),
117 (IMAGES, "Images"),
118 (SUGGESTED_USERS, "Suggested users"),
119 (EMAIL, "Email"),
120 (CONNECTORS, "Connectors"),
121 (LISTS, "Lists"),
122 (INBOX, "Inbox"),
123 (IMPORTS, "Imports"),
124 (IMPORT_TRIGGERED, "Import triggered"),
125 (BROADCAST, "Broadcasts"),
126 (MISC, "Misc"),
127 ],
128 widget=forms.CheckboxSelectMultiple,
129 )
130 tasks = forms.MultipleChoiceField(
131 label="Tasks", choices=[], widget=forms.CheckboxSelectMultiple
132 )
133
134 def __init__(self, *args, **kwargs):
135 super().__init__(*args, **kwargs)
136 celery.loader.import_default_modules()
137 self.fields["tasks"].choices = sorted(
138 [(k, k) for k in celery.tasks.keys() if not k.startswith("celery.")]
139 )
140
141
142 @require_GET
143 # pylint: disable=unused-argument
144 def celery_ping(request):
145 """Just tells you if Celery is on or not"""
146 try:
147 ping = celery.control.inspect().ping()
148 if ping:
149 return HttpResponse()
150 # pylint: disable=broad-except
151 except Exception:
152 pass
153
154 return HttpResponse(status=500)
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/admin/celery_status.py b/bookwyrm/views/admin/celery_status.py
--- a/bookwyrm/views/admin/celery_status.py
+++ b/bookwyrm/views/admin/celery_status.py
@@ -110,20 +110,20 @@
queues = forms.MultipleChoiceField(
label="Queues",
choices=[
- (LOW, "Low prioirty"),
+ (LOW, "Low priority"),
(MEDIUM, "Medium priority"),
(HIGH, "High priority"),
- (STREAMS, "Streams"),
- (IMAGES, "Images"),
- (SUGGESTED_USERS, "Suggested users"),
- (EMAIL, "Email"),
+ (BROADCAST, "Broadcast"),
(CONNECTORS, "Connectors"),
- (LISTS, "Lists"),
- (INBOX, "Inbox"),
+ (EMAIL, "Email"),
+ (IMAGES, "Images"),
(IMPORTS, "Imports"),
(IMPORT_TRIGGERED, "Import triggered"),
- (BROADCAST, "Broadcasts"),
+ (INBOX, "Inbox"),
+ (LISTS, "Lists"),
(MISC, "Misc"),
+ (STREAMS, "Streams"),
+ (SUGGESTED_USERS, "Suggested users"),
],
widget=forms.CheckboxSelectMultiple,
)
| {"golden_diff": "diff --git a/bookwyrm/views/admin/celery_status.py b/bookwyrm/views/admin/celery_status.py\n--- a/bookwyrm/views/admin/celery_status.py\n+++ b/bookwyrm/views/admin/celery_status.py\n@@ -110,20 +110,20 @@\n queues = forms.MultipleChoiceField(\n label=\"Queues\",\n choices=[\n- (LOW, \"Low prioirty\"),\n+ (LOW, \"Low priority\"),\n (MEDIUM, \"Medium priority\"),\n (HIGH, \"High priority\"),\n- (STREAMS, \"Streams\"),\n- (IMAGES, \"Images\"),\n- (SUGGESTED_USERS, \"Suggested users\"),\n- (EMAIL, \"Email\"),\n+ (BROADCAST, \"Broadcast\"),\n (CONNECTORS, \"Connectors\"),\n- (LISTS, \"Lists\"),\n- (INBOX, \"Inbox\"),\n+ (EMAIL, \"Email\"),\n+ (IMAGES, \"Images\"),\n (IMPORTS, \"Imports\"),\n (IMPORT_TRIGGERED, \"Import triggered\"),\n- (BROADCAST, \"Broadcasts\"),\n+ (INBOX, \"Inbox\"),\n+ (LISTS, \"Lists\"),\n (MISC, \"Misc\"),\n+ (STREAMS, \"Streams\"),\n+ (SUGGESTED_USERS, \"Suggested users\"),\n ],\n widget=forms.CheckboxSelectMultiple,\n )\n", "issue": " Celery queue names inconsistent\n**Is your feature request related to a problem? Please describe.**\r\nIn the admin interface, the Celery Status tab shows all the current queues.\r\nThey seemed to be only ever-increasing recently, so I had to look into why that happened.\r\nAfter a lot of wasting of time, I figured out how to properly get into Flower, (and how to start it...)\r\nHere, I discovered that the Celery worker was ignoring all but four of the queues, so I had to manually add them.\r\nThis did not really seem to be a problem. However, when I entered `broadcasts`, as that is what the queue is named in the admin interface, nothing happened. An investigation later I found out that the queue was actually called `broadcast`, singular.\r\n\r\n**Describe the solution you'd like**\r\nSo, please fix that. Either change the name of the queue, or change it in the admin interface, so that someone can look at the admin interface to know what the queue names are...\r\n\r\n**Describe alternatives you've considered**\r\nN/A\r\n\r\n**Additional context**\r\nN/A\r\n\n", "before_files": [{"content": "\"\"\" celery status \"\"\"\nimport json\n\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.http import require_GET\nfrom django import forms\nimport redis\n\nfrom celerywyrm import settings\nfrom bookwyrm.tasks import (\n app as celery,\n LOW,\n MEDIUM,\n HIGH,\n STREAMS,\n IMAGES,\n SUGGESTED_USERS,\n EMAIL,\n CONNECTORS,\n LISTS,\n INBOX,\n IMPORTS,\n IMPORT_TRIGGERED,\n BROADCAST,\n MISC,\n)\n\nr = redis.from_url(settings.REDIS_BROKER_URL)\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_instance_settings\", raise_exception=True),\n name=\"dispatch\",\n)\nclass CeleryStatus(View):\n \"\"\"Are your tasks running? Well you'd better go catch them\"\"\"\n\n def get(self, request):\n \"\"\"See workers and active tasks\"\"\"\n errors = []\n try:\n inspect = celery.control.inspect()\n stats = inspect.stats()\n active_tasks = inspect.active()\n # pylint: disable=broad-except\n except Exception as err:\n stats = active_tasks = None\n errors.append(err)\n\n try:\n queues = {\n LOW: r.llen(LOW),\n MEDIUM: r.llen(MEDIUM),\n HIGH: r.llen(HIGH),\n STREAMS: r.llen(STREAMS),\n IMAGES: r.llen(IMAGES),\n SUGGESTED_USERS: r.llen(SUGGESTED_USERS),\n EMAIL: r.llen(EMAIL),\n CONNECTORS: r.llen(CONNECTORS),\n LISTS: r.llen(LISTS),\n INBOX: r.llen(INBOX),\n IMPORTS: r.llen(IMPORTS),\n IMPORT_TRIGGERED: r.llen(IMPORT_TRIGGERED),\n BROADCAST: r.llen(BROADCAST),\n MISC: r.llen(MISC),\n }\n # pylint: disable=broad-except\n except Exception as err:\n queues = None\n errors.append(err)\n\n form = ClearCeleryForm()\n\n data = {\n \"stats\": stats,\n \"active_tasks\": active_tasks,\n \"queues\": queues,\n \"form\": form,\n \"errors\": errors,\n }\n return TemplateResponse(request, \"settings/celery.html\", data)\n\n def post(self, request):\n \"\"\"Submit form to clear queues\"\"\"\n form = ClearCeleryForm(request.POST)\n if form.is_valid():\n if len(celery.control.ping()) != 0:\n return HttpResponse(\n \"Refusing to delete tasks while Celery worker is active\"\n )\n pipeline = r.pipeline()\n for queue in form.cleaned_data[\"queues\"]:\n for task in r.lrange(queue, 0, -1):\n task_json = json.loads(task)\n if task_json[\"headers\"][\"task\"] in form.cleaned_data[\"tasks\"]:\n pipeline.lrem(queue, 0, task)\n results = pipeline.execute()\n\n return HttpResponse(f\"Deleted {sum(results)} tasks\")\n\n\nclass ClearCeleryForm(forms.Form):\n \"\"\"Form to clear queues\"\"\"\n\n queues = forms.MultipleChoiceField(\n label=\"Queues\",\n choices=[\n (LOW, \"Low prioirty\"),\n (MEDIUM, \"Medium priority\"),\n (HIGH, \"High priority\"),\n (STREAMS, \"Streams\"),\n (IMAGES, \"Images\"),\n (SUGGESTED_USERS, \"Suggested users\"),\n (EMAIL, \"Email\"),\n (CONNECTORS, \"Connectors\"),\n (LISTS, \"Lists\"),\n (INBOX, \"Inbox\"),\n (IMPORTS, \"Imports\"),\n (IMPORT_TRIGGERED, \"Import triggered\"),\n (BROADCAST, \"Broadcasts\"),\n (MISC, \"Misc\"),\n ],\n widget=forms.CheckboxSelectMultiple,\n )\n tasks = forms.MultipleChoiceField(\n label=\"Tasks\", choices=[], widget=forms.CheckboxSelectMultiple\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n celery.loader.import_default_modules()\n self.fields[\"tasks\"].choices = sorted(\n [(k, k) for k in celery.tasks.keys() if not k.startswith(\"celery.\")]\n )\n\n\n@require_GET\n# pylint: disable=unused-argument\ndef celery_ping(request):\n \"\"\"Just tells you if Celery is on or not\"\"\"\n try:\n ping = celery.control.inspect().ping()\n if ping:\n return HttpResponse()\n # pylint: disable=broad-except\n except Exception:\n pass\n\n return HttpResponse(status=500)\n", "path": "bookwyrm/views/admin/celery_status.py"}], "after_files": [{"content": "\"\"\" celery status \"\"\"\nimport json\n\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.http import require_GET\nfrom django import forms\nimport redis\n\nfrom celerywyrm import settings\nfrom bookwyrm.tasks import (\n app as celery,\n LOW,\n MEDIUM,\n HIGH,\n STREAMS,\n IMAGES,\n SUGGESTED_USERS,\n EMAIL,\n CONNECTORS,\n LISTS,\n INBOX,\n IMPORTS,\n IMPORT_TRIGGERED,\n BROADCAST,\n MISC,\n)\n\nr = redis.from_url(settings.REDIS_BROKER_URL)\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_instance_settings\", raise_exception=True),\n name=\"dispatch\",\n)\nclass CeleryStatus(View):\n \"\"\"Are your tasks running? Well you'd better go catch them\"\"\"\n\n def get(self, request):\n \"\"\"See workers and active tasks\"\"\"\n errors = []\n try:\n inspect = celery.control.inspect()\n stats = inspect.stats()\n active_tasks = inspect.active()\n # pylint: disable=broad-except\n except Exception as err:\n stats = active_tasks = None\n errors.append(err)\n\n try:\n queues = {\n LOW: r.llen(LOW),\n MEDIUM: r.llen(MEDIUM),\n HIGH: r.llen(HIGH),\n STREAMS: r.llen(STREAMS),\n IMAGES: r.llen(IMAGES),\n SUGGESTED_USERS: r.llen(SUGGESTED_USERS),\n EMAIL: r.llen(EMAIL),\n CONNECTORS: r.llen(CONNECTORS),\n LISTS: r.llen(LISTS),\n INBOX: r.llen(INBOX),\n IMPORTS: r.llen(IMPORTS),\n IMPORT_TRIGGERED: r.llen(IMPORT_TRIGGERED),\n BROADCAST: r.llen(BROADCAST),\n MISC: r.llen(MISC),\n }\n # pylint: disable=broad-except\n except Exception as err:\n queues = None\n errors.append(err)\n\n form = ClearCeleryForm()\n\n data = {\n \"stats\": stats,\n \"active_tasks\": active_tasks,\n \"queues\": queues,\n \"form\": form,\n \"errors\": errors,\n }\n return TemplateResponse(request, \"settings/celery.html\", data)\n\n def post(self, request):\n \"\"\"Submit form to clear queues\"\"\"\n form = ClearCeleryForm(request.POST)\n if form.is_valid():\n if len(celery.control.ping()) != 0:\n return HttpResponse(\n \"Refusing to delete tasks while Celery worker is active\"\n )\n pipeline = r.pipeline()\n for queue in form.cleaned_data[\"queues\"]:\n for task in r.lrange(queue, 0, -1):\n task_json = json.loads(task)\n if task_json[\"headers\"][\"task\"] in form.cleaned_data[\"tasks\"]:\n pipeline.lrem(queue, 0, task)\n results = pipeline.execute()\n\n return HttpResponse(f\"Deleted {sum(results)} tasks\")\n\n\nclass ClearCeleryForm(forms.Form):\n \"\"\"Form to clear queues\"\"\"\n\n queues = forms.MultipleChoiceField(\n label=\"Queues\",\n choices=[\n (LOW, \"Low priority\"),\n (MEDIUM, \"Medium priority\"),\n (HIGH, \"High priority\"),\n (BROADCAST, \"Broadcast\"),\n (CONNECTORS, \"Connectors\"),\n (EMAIL, \"Email\"),\n (IMAGES, \"Images\"),\n (IMPORTS, \"Imports\"),\n (IMPORT_TRIGGERED, \"Import triggered\"),\n (INBOX, \"Inbox\"),\n (LISTS, \"Lists\"),\n (MISC, \"Misc\"),\n (STREAMS, \"Streams\"),\n (SUGGESTED_USERS, \"Suggested users\"),\n ],\n widget=forms.CheckboxSelectMultiple,\n )\n tasks = forms.MultipleChoiceField(\n label=\"Tasks\", choices=[], widget=forms.CheckboxSelectMultiple\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n celery.loader.import_default_modules()\n self.fields[\"tasks\"].choices = sorted(\n [(k, k) for k in celery.tasks.keys() if not k.startswith(\"celery.\")]\n )\n\n\n@require_GET\n# pylint: disable=unused-argument\ndef celery_ping(request):\n \"\"\"Just tells you if Celery is on or not\"\"\"\n try:\n ping = celery.control.inspect().ping()\n if ping:\n return HttpResponse()\n # pylint: disable=broad-except\n except Exception:\n pass\n\n return HttpResponse(status=500)\n", "path": "bookwyrm/views/admin/celery_status.py"}]} | 1,878 | 306 |
gh_patches_debug_33749 | rasdani/github-patches | git_diff | scverse__scanpy-2089 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use session_info instead of sinfo
[`sinfo` has been replaced](https://pypi.org/project/sinfo/) with [`session_info`](https://gitlab.com/joelostblom/session_info), which is definitely a better name. We should switch over to using this.
I think we'll be calling it like: `import session_info; session_info.show(dependencies=True, html=False, **extra_kwargs)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scanpy/logging.py`
Content:
```
1 """Logging and Profiling
2 """
3 import io
4 import logging
5 import sys
6 from functools import update_wrapper, partial
7 from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET
8 from datetime import datetime, timedelta, timezone
9 from typing import Optional
10
11 import anndata.logging
12 from sinfo import sinfo
13
14
15 HINT = (INFO + DEBUG) // 2
16 logging.addLevelName(HINT, 'HINT')
17
18
19 class _RootLogger(logging.RootLogger):
20 def __init__(self, level):
21 super().__init__(level)
22 self.propagate = False
23 _RootLogger.manager = logging.Manager(self)
24
25 def log(
26 self,
27 level: int,
28 msg: str,
29 *,
30 extra: Optional[dict] = None,
31 time: datetime = None,
32 deep: Optional[str] = None,
33 ) -> datetime:
34 from . import settings
35
36 now = datetime.now(timezone.utc)
37 time_passed: timedelta = None if time is None else now - time
38 extra = {
39 **(extra or {}),
40 'deep': deep if settings.verbosity.level < level else None,
41 'time_passed': time_passed,
42 }
43 super().log(level, msg, extra=extra)
44 return now
45
46 def critical(self, msg, *, time=None, deep=None, extra=None) -> datetime:
47 return self.log(CRITICAL, msg, time=time, deep=deep, extra=extra)
48
49 def error(self, msg, *, time=None, deep=None, extra=None) -> datetime:
50 return self.log(ERROR, msg, time=time, deep=deep, extra=extra)
51
52 def warning(self, msg, *, time=None, deep=None, extra=None) -> datetime:
53 return self.log(WARNING, msg, time=time, deep=deep, extra=extra)
54
55 def info(self, msg, *, time=None, deep=None, extra=None) -> datetime:
56 return self.log(INFO, msg, time=time, deep=deep, extra=extra)
57
58 def hint(self, msg, *, time=None, deep=None, extra=None) -> datetime:
59 return self.log(HINT, msg, time=time, deep=deep, extra=extra)
60
61 def debug(self, msg, *, time=None, deep=None, extra=None) -> datetime:
62 return self.log(DEBUG, msg, time=time, deep=deep, extra=extra)
63
64
65 def _set_log_file(settings):
66 file = settings.logfile
67 name = settings.logpath
68 root = settings._root_logger
69 h = logging.StreamHandler(file) if name is None else logging.FileHandler(name)
70 h.setFormatter(_LogFormatter())
71 h.setLevel(root.level)
72 if len(root.handlers) == 1:
73 root.removeHandler(root.handlers[0])
74 elif len(root.handlers) > 1:
75 raise RuntimeError('Scanpy’s root logger somehow got more than one handler')
76 root.addHandler(h)
77
78
79 def _set_log_level(settings, level: int):
80 root = settings._root_logger
81 root.setLevel(level)
82 (h,) = root.handlers # may only be 1
83 h.setLevel(level)
84
85
86 class _LogFormatter(logging.Formatter):
87 def __init__(
88 self, fmt='{levelname}: {message}', datefmt='%Y-%m-%d %H:%M', style='{'
89 ):
90 super().__init__(fmt, datefmt, style)
91
92 def format(self, record: logging.LogRecord):
93 format_orig = self._style._fmt
94 if record.levelno == INFO:
95 self._style._fmt = '{message}'
96 elif record.levelno == HINT:
97 self._style._fmt = '--> {message}'
98 elif record.levelno == DEBUG:
99 self._style._fmt = ' {message}'
100 if record.time_passed:
101 # strip microseconds
102 if record.time_passed.microseconds:
103 record.time_passed = timedelta(
104 seconds=int(record.time_passed.total_seconds())
105 )
106 if '{time_passed}' in record.msg:
107 record.msg = record.msg.replace(
108 '{time_passed}', str(record.time_passed)
109 )
110 else:
111 self._style._fmt += ' ({time_passed})'
112 if record.deep:
113 record.msg = f'{record.msg}: {record.deep}'
114 result = logging.Formatter.format(self, record)
115 self._style._fmt = format_orig
116 return result
117
118
119 print_memory_usage = anndata.logging.print_memory_usage
120 get_memory_usage = anndata.logging.get_memory_usage
121
122
123 _DEPENDENCIES_NUMERICS = [
124 'anndata', # anndata actually shouldn't, but as long as it's in development
125 'umap',
126 'numpy',
127 'scipy',
128 'pandas',
129 ('sklearn', 'scikit-learn'),
130 'statsmodels',
131 ('igraph', 'python-igraph'),
132 'louvain',
133 'leidenalg',
134 'pynndescent',
135 ]
136
137
138 def _versions_dependencies(dependencies):
139 # this is not the same as the requirements!
140 for mod in dependencies:
141 mod_name, dist_name = mod if isinstance(mod, tuple) else (mod, mod)
142 try:
143 imp = __import__(mod_name)
144 yield dist_name, imp.__version__
145 except (ImportError, AttributeError):
146 pass
147
148
149 def print_header(*, file=None):
150 """\
151 Versions that might influence the numerical results.
152 Matplotlib and Seaborn are excluded from this.
153 """
154
155 modules = ['scanpy'] + _DEPENDENCIES_NUMERICS
156 print(
157 ' '.join(f'{mod}=={ver}' for mod, ver in _versions_dependencies(modules)),
158 file=file or sys.stdout,
159 )
160
161
162 def print_versions(*, file=None):
163 """Print print versions of imported packages"""
164 if file is None: # Inform people about the behavior change
165 warning('If you miss a compact list, please try `print_header`!')
166 stdout = sys.stdout
167 try:
168 buf = sys.stdout = io.StringIO()
169 sinfo(
170 dependencies=True,
171 excludes=[
172 'builtins',
173 'stdlib_list',
174 'importlib_metadata',
175 # Special module present if test coverage being calculated
176 # https://gitlab.com/joelostblom/sinfo/-/issues/10
177 "$coverage",
178 ],
179 )
180 finally:
181 sys.stdout = stdout
182 output = buf.getvalue()
183 print(output, file=file)
184
185
186 def print_version_and_date(*, file=None):
187 """\
188 Useful for starting a notebook so you see when you started working.
189 """
190 from . import __version__
191
192 if file is None:
193 file = sys.stdout
194 print(
195 f'Running Scanpy {__version__}, ' f'on {datetime.now():%Y-%m-%d %H:%M}.',
196 file=file,
197 )
198
199
200 def _copy_docs_and_signature(fn):
201 return partial(update_wrapper, wrapped=fn, assigned=['__doc__', '__annotations__'])
202
203
204 def error(
205 msg: str,
206 *,
207 time: datetime = None,
208 deep: Optional[str] = None,
209 extra: Optional[dict] = None,
210 ) -> datetime:
211 """\
212 Log message with specific level and return current time.
213
214 Parameters
215 ==========
216 msg
217 Message to display.
218 time
219 A time in the past. If this is passed, the time difference from then
220 to now is appended to `msg` as ` (HH:MM:SS)`.
221 If `msg` contains `{time_passed}`, the time difference is instead
222 inserted at that position.
223 deep
224 If the current verbosity is higher than the log function’s level,
225 this gets displayed as well
226 extra
227 Additional values you can specify in `msg` like `{time_passed}`.
228 """
229 from ._settings import settings
230
231 return settings._root_logger.error(msg, time=time, deep=deep, extra=extra)
232
233
234 @_copy_docs_and_signature(error)
235 def warning(msg, *, time=None, deep=None, extra=None) -> datetime:
236 from ._settings import settings
237
238 return settings._root_logger.warning(msg, time=time, deep=deep, extra=extra)
239
240
241 @_copy_docs_and_signature(error)
242 def info(msg, *, time=None, deep=None, extra=None) -> datetime:
243 from ._settings import settings
244
245 return settings._root_logger.info(msg, time=time, deep=deep, extra=extra)
246
247
248 @_copy_docs_and_signature(error)
249 def hint(msg, *, time=None, deep=None, extra=None) -> datetime:
250 from ._settings import settings
251
252 return settings._root_logger.hint(msg, time=time, deep=deep, extra=extra)
253
254
255 @_copy_docs_and_signature(error)
256 def debug(msg, *, time=None, deep=None, extra=None) -> datetime:
257 from ._settings import settings
258
259 return settings._root_logger.debug(msg, time=time, deep=deep, extra=extra)
260
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scanpy/logging.py b/scanpy/logging.py
--- a/scanpy/logging.py
+++ b/scanpy/logging.py
@@ -1,15 +1,14 @@
"""Logging and Profiling
"""
-import io
import logging
import sys
from functools import update_wrapper, partial
-from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET
+from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG
from datetime import datetime, timedelta, timezone
-from typing import Optional
+from typing import Optional, IO
+import warnings
import anndata.logging
-from sinfo import sinfo
HINT = (INFO + DEBUG) // 2
@@ -159,28 +158,37 @@
)
-def print_versions(*, file=None):
- """Print print versions of imported packages"""
- if file is None: # Inform people about the behavior change
- warning('If you miss a compact list, please try `print_header`!')
- stdout = sys.stdout
- try:
- buf = sys.stdout = io.StringIO()
- sinfo(
+def print_versions(*, file: Optional[IO[str]] = None):
+ """\
+ Print versions of imported packages, OS, and jupyter environment.
+
+ For more options (including rich output) use `session_info.show` directly.
+ """
+ import session_info
+
+ if file is not None:
+ from contextlib import redirect_stdout
+
+ warnings.warn(
+ "Passing argument 'file' to print_versions is deprecated, and will be "
+ "removed in a future version.",
+ FutureWarning,
+ )
+ with redirect_stdout(file):
+ print_versions()
+ else:
+ session_info.show(
dependencies=True,
+ html=False,
excludes=[
'builtins',
'stdlib_list',
'importlib_metadata',
# Special module present if test coverage being calculated
- # https://gitlab.com/joelostblom/sinfo/-/issues/10
+ # https://gitlab.com/joelostblom/session_info/-/issues/10
"$coverage",
],
)
- finally:
- sys.stdout = stdout
- output = buf.getvalue()
- print(output, file=file)
def print_version_and_date(*, file=None):
| {"golden_diff": "diff --git a/scanpy/logging.py b/scanpy/logging.py\n--- a/scanpy/logging.py\n+++ b/scanpy/logging.py\n@@ -1,15 +1,14 @@\n \"\"\"Logging and Profiling\n \"\"\"\n-import io\n import logging\n import sys\n from functools import update_wrapper, partial\n-from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET\n+from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG\n from datetime import datetime, timedelta, timezone\n-from typing import Optional\n+from typing import Optional, IO\n+import warnings\n \n import anndata.logging\n-from sinfo import sinfo\n \n \n HINT = (INFO + DEBUG) // 2\n@@ -159,28 +158,37 @@\n )\n \n \n-def print_versions(*, file=None):\n- \"\"\"Print print versions of imported packages\"\"\"\n- if file is None: # Inform people about the behavior change\n- warning('If you miss a compact list, please try `print_header`!')\n- stdout = sys.stdout\n- try:\n- buf = sys.stdout = io.StringIO()\n- sinfo(\n+def print_versions(*, file: Optional[IO[str]] = None):\n+ \"\"\"\\\n+ Print versions of imported packages, OS, and jupyter environment.\n+\n+ For more options (including rich output) use `session_info.show` directly.\n+ \"\"\"\n+ import session_info\n+\n+ if file is not None:\n+ from contextlib import redirect_stdout\n+\n+ warnings.warn(\n+ \"Passing argument 'file' to print_versions is deprecated, and will be \"\n+ \"removed in a future version.\",\n+ FutureWarning,\n+ )\n+ with redirect_stdout(file):\n+ print_versions()\n+ else:\n+ session_info.show(\n dependencies=True,\n+ html=False,\n excludes=[\n 'builtins',\n 'stdlib_list',\n 'importlib_metadata',\n # Special module present if test coverage being calculated\n- # https://gitlab.com/joelostblom/sinfo/-/issues/10\n+ # https://gitlab.com/joelostblom/session_info/-/issues/10\n \"$coverage\",\n ],\n )\n- finally:\n- sys.stdout = stdout\n- output = buf.getvalue()\n- print(output, file=file)\n \n \n def print_version_and_date(*, file=None):\n", "issue": "Use session_info instead of sinfo\n[`sinfo` has been replaced](https://pypi.org/project/sinfo/) with [`session_info`](https://gitlab.com/joelostblom/session_info), which is definitely a better name. We should switch over to using this.\r\n\r\nI think we'll be calling it like: `import session_info; session_info.show(dependencies=True, html=False, **extra_kwargs)`\n", "before_files": [{"content": "\"\"\"Logging and Profiling\n\"\"\"\nimport io\nimport logging\nimport sys\nfrom functools import update_wrapper, partial\nfrom logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Optional\n\nimport anndata.logging\nfrom sinfo import sinfo\n\n\nHINT = (INFO + DEBUG) // 2\nlogging.addLevelName(HINT, 'HINT')\n\n\nclass _RootLogger(logging.RootLogger):\n def __init__(self, level):\n super().__init__(level)\n self.propagate = False\n _RootLogger.manager = logging.Manager(self)\n\n def log(\n self,\n level: int,\n msg: str,\n *,\n extra: Optional[dict] = None,\n time: datetime = None,\n deep: Optional[str] = None,\n ) -> datetime:\n from . import settings\n\n now = datetime.now(timezone.utc)\n time_passed: timedelta = None if time is None else now - time\n extra = {\n **(extra or {}),\n 'deep': deep if settings.verbosity.level < level else None,\n 'time_passed': time_passed,\n }\n super().log(level, msg, extra=extra)\n return now\n\n def critical(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(CRITICAL, msg, time=time, deep=deep, extra=extra)\n\n def error(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(ERROR, msg, time=time, deep=deep, extra=extra)\n\n def warning(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(WARNING, msg, time=time, deep=deep, extra=extra)\n\n def info(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(INFO, msg, time=time, deep=deep, extra=extra)\n\n def hint(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(HINT, msg, time=time, deep=deep, extra=extra)\n\n def debug(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(DEBUG, msg, time=time, deep=deep, extra=extra)\n\n\ndef _set_log_file(settings):\n file = settings.logfile\n name = settings.logpath\n root = settings._root_logger\n h = logging.StreamHandler(file) if name is None else logging.FileHandler(name)\n h.setFormatter(_LogFormatter())\n h.setLevel(root.level)\n if len(root.handlers) == 1:\n root.removeHandler(root.handlers[0])\n elif len(root.handlers) > 1:\n raise RuntimeError('Scanpy\u2019s root logger somehow got more than one handler')\n root.addHandler(h)\n\n\ndef _set_log_level(settings, level: int):\n root = settings._root_logger\n root.setLevel(level)\n (h,) = root.handlers # may only be 1\n h.setLevel(level)\n\n\nclass _LogFormatter(logging.Formatter):\n def __init__(\n self, fmt='{levelname}: {message}', datefmt='%Y-%m-%d %H:%M', style='{'\n ):\n super().__init__(fmt, datefmt, style)\n\n def format(self, record: logging.LogRecord):\n format_orig = self._style._fmt\n if record.levelno == INFO:\n self._style._fmt = '{message}'\n elif record.levelno == HINT:\n self._style._fmt = '--> {message}'\n elif record.levelno == DEBUG:\n self._style._fmt = ' {message}'\n if record.time_passed:\n # strip microseconds\n if record.time_passed.microseconds:\n record.time_passed = timedelta(\n seconds=int(record.time_passed.total_seconds())\n )\n if '{time_passed}' in record.msg:\n record.msg = record.msg.replace(\n '{time_passed}', str(record.time_passed)\n )\n else:\n self._style._fmt += ' ({time_passed})'\n if record.deep:\n record.msg = f'{record.msg}: {record.deep}'\n result = logging.Formatter.format(self, record)\n self._style._fmt = format_orig\n return result\n\n\nprint_memory_usage = anndata.logging.print_memory_usage\nget_memory_usage = anndata.logging.get_memory_usage\n\n\n_DEPENDENCIES_NUMERICS = [\n 'anndata', # anndata actually shouldn't, but as long as it's in development\n 'umap',\n 'numpy',\n 'scipy',\n 'pandas',\n ('sklearn', 'scikit-learn'),\n 'statsmodels',\n ('igraph', 'python-igraph'),\n 'louvain',\n 'leidenalg',\n 'pynndescent',\n]\n\n\ndef _versions_dependencies(dependencies):\n # this is not the same as the requirements!\n for mod in dependencies:\n mod_name, dist_name = mod if isinstance(mod, tuple) else (mod, mod)\n try:\n imp = __import__(mod_name)\n yield dist_name, imp.__version__\n except (ImportError, AttributeError):\n pass\n\n\ndef print_header(*, file=None):\n \"\"\"\\\n Versions that might influence the numerical results.\n Matplotlib and Seaborn are excluded from this.\n \"\"\"\n\n modules = ['scanpy'] + _DEPENDENCIES_NUMERICS\n print(\n ' '.join(f'{mod}=={ver}' for mod, ver in _versions_dependencies(modules)),\n file=file or sys.stdout,\n )\n\n\ndef print_versions(*, file=None):\n \"\"\"Print print versions of imported packages\"\"\"\n if file is None: # Inform people about the behavior change\n warning('If you miss a compact list, please try `print_header`!')\n stdout = sys.stdout\n try:\n buf = sys.stdout = io.StringIO()\n sinfo(\n dependencies=True,\n excludes=[\n 'builtins',\n 'stdlib_list',\n 'importlib_metadata',\n # Special module present if test coverage being calculated\n # https://gitlab.com/joelostblom/sinfo/-/issues/10\n \"$coverage\",\n ],\n )\n finally:\n sys.stdout = stdout\n output = buf.getvalue()\n print(output, file=file)\n\n\ndef print_version_and_date(*, file=None):\n \"\"\"\\\n Useful for starting a notebook so you see when you started working.\n \"\"\"\n from . import __version__\n\n if file is None:\n file = sys.stdout\n print(\n f'Running Scanpy {__version__}, ' f'on {datetime.now():%Y-%m-%d %H:%M}.',\n file=file,\n )\n\n\ndef _copy_docs_and_signature(fn):\n return partial(update_wrapper, wrapped=fn, assigned=['__doc__', '__annotations__'])\n\n\ndef error(\n msg: str,\n *,\n time: datetime = None,\n deep: Optional[str] = None,\n extra: Optional[dict] = None,\n) -> datetime:\n \"\"\"\\\n Log message with specific level and return current time.\n\n Parameters\n ==========\n msg\n Message to display.\n time\n A time in the past. If this is passed, the time difference from then\n to now is appended to `msg` as ` (HH:MM:SS)`.\n If `msg` contains `{time_passed}`, the time difference is instead\n inserted at that position.\n deep\n If the current verbosity is higher than the log function\u2019s level,\n this gets displayed as well\n extra\n Additional values you can specify in `msg` like `{time_passed}`.\n \"\"\"\n from ._settings import settings\n\n return settings._root_logger.error(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef warning(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.warning(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef info(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.info(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef hint(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.hint(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef debug(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.debug(msg, time=time, deep=deep, extra=extra)\n", "path": "scanpy/logging.py"}], "after_files": [{"content": "\"\"\"Logging and Profiling\n\"\"\"\nimport logging\nimport sys\nfrom functools import update_wrapper, partial\nfrom logging import CRITICAL, ERROR, WARNING, INFO, DEBUG\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Optional, IO\nimport warnings\n\nimport anndata.logging\n\n\nHINT = (INFO + DEBUG) // 2\nlogging.addLevelName(HINT, 'HINT')\n\n\nclass _RootLogger(logging.RootLogger):\n def __init__(self, level):\n super().__init__(level)\n self.propagate = False\n _RootLogger.manager = logging.Manager(self)\n\n def log(\n self,\n level: int,\n msg: str,\n *,\n extra: Optional[dict] = None,\n time: datetime = None,\n deep: Optional[str] = None,\n ) -> datetime:\n from . import settings\n\n now = datetime.now(timezone.utc)\n time_passed: timedelta = None if time is None else now - time\n extra = {\n **(extra or {}),\n 'deep': deep if settings.verbosity.level < level else None,\n 'time_passed': time_passed,\n }\n super().log(level, msg, extra=extra)\n return now\n\n def critical(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(CRITICAL, msg, time=time, deep=deep, extra=extra)\n\n def error(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(ERROR, msg, time=time, deep=deep, extra=extra)\n\n def warning(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(WARNING, msg, time=time, deep=deep, extra=extra)\n\n def info(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(INFO, msg, time=time, deep=deep, extra=extra)\n\n def hint(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(HINT, msg, time=time, deep=deep, extra=extra)\n\n def debug(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(DEBUG, msg, time=time, deep=deep, extra=extra)\n\n\ndef _set_log_file(settings):\n file = settings.logfile\n name = settings.logpath\n root = settings._root_logger\n h = logging.StreamHandler(file) if name is None else logging.FileHandler(name)\n h.setFormatter(_LogFormatter())\n h.setLevel(root.level)\n if len(root.handlers) == 1:\n root.removeHandler(root.handlers[0])\n elif len(root.handlers) > 1:\n raise RuntimeError('Scanpy\u2019s root logger somehow got more than one handler')\n root.addHandler(h)\n\n\ndef _set_log_level(settings, level: int):\n root = settings._root_logger\n root.setLevel(level)\n (h,) = root.handlers # may only be 1\n h.setLevel(level)\n\n\nclass _LogFormatter(logging.Formatter):\n def __init__(\n self, fmt='{levelname}: {message}', datefmt='%Y-%m-%d %H:%M', style='{'\n ):\n super().__init__(fmt, datefmt, style)\n\n def format(self, record: logging.LogRecord):\n format_orig = self._style._fmt\n if record.levelno == INFO:\n self._style._fmt = '{message}'\n elif record.levelno == HINT:\n self._style._fmt = '--> {message}'\n elif record.levelno == DEBUG:\n self._style._fmt = ' {message}'\n if record.time_passed:\n # strip microseconds\n if record.time_passed.microseconds:\n record.time_passed = timedelta(\n seconds=int(record.time_passed.total_seconds())\n )\n if '{time_passed}' in record.msg:\n record.msg = record.msg.replace(\n '{time_passed}', str(record.time_passed)\n )\n else:\n self._style._fmt += ' ({time_passed})'\n if record.deep:\n record.msg = f'{record.msg}: {record.deep}'\n result = logging.Formatter.format(self, record)\n self._style._fmt = format_orig\n return result\n\n\nprint_memory_usage = anndata.logging.print_memory_usage\nget_memory_usage = anndata.logging.get_memory_usage\n\n\n_DEPENDENCIES_NUMERICS = [\n 'anndata', # anndata actually shouldn't, but as long as it's in development\n 'umap',\n 'numpy',\n 'scipy',\n 'pandas',\n ('sklearn', 'scikit-learn'),\n 'statsmodels',\n ('igraph', 'python-igraph'),\n 'louvain',\n 'leidenalg',\n 'pynndescent',\n]\n\n\ndef _versions_dependencies(dependencies):\n # this is not the same as the requirements!\n for mod in dependencies:\n mod_name, dist_name = mod if isinstance(mod, tuple) else (mod, mod)\n try:\n imp = __import__(mod_name)\n yield dist_name, imp.__version__\n except (ImportError, AttributeError):\n pass\n\n\ndef print_header(*, file=None):\n \"\"\"\\\n Versions that might influence the numerical results.\n Matplotlib and Seaborn are excluded from this.\n \"\"\"\n\n modules = ['scanpy'] + _DEPENDENCIES_NUMERICS\n print(\n ' '.join(f'{mod}=={ver}' for mod, ver in _versions_dependencies(modules)),\n file=file or sys.stdout,\n )\n\n\ndef print_versions(*, file: Optional[IO[str]] = None):\n \"\"\"\\\n Print versions of imported packages, OS, and jupyter environment.\n\n For more options (including rich output) use `session_info.show` directly.\n \"\"\"\n import session_info\n\n if file is not None:\n from contextlib import redirect_stdout\n\n warnings.warn(\n \"Passing argument 'file' to print_versions is deprecated, and will be \"\n \"removed in a future version.\",\n FutureWarning,\n )\n with redirect_stdout(file):\n print_versions()\n else:\n session_info.show(\n dependencies=True,\n html=False,\n excludes=[\n 'builtins',\n 'stdlib_list',\n 'importlib_metadata',\n # Special module present if test coverage being calculated\n # https://gitlab.com/joelostblom/session_info/-/issues/10\n \"$coverage\",\n ],\n )\n\n\ndef print_version_and_date(*, file=None):\n \"\"\"\\\n Useful for starting a notebook so you see when you started working.\n \"\"\"\n from . import __version__\n\n if file is None:\n file = sys.stdout\n print(\n f'Running Scanpy {__version__}, ' f'on {datetime.now():%Y-%m-%d %H:%M}.',\n file=file,\n )\n\n\ndef _copy_docs_and_signature(fn):\n return partial(update_wrapper, wrapped=fn, assigned=['__doc__', '__annotations__'])\n\n\ndef error(\n msg: str,\n *,\n time: datetime = None,\n deep: Optional[str] = None,\n extra: Optional[dict] = None,\n) -> datetime:\n \"\"\"\\\n Log message with specific level and return current time.\n\n Parameters\n ==========\n msg\n Message to display.\n time\n A time in the past. If this is passed, the time difference from then\n to now is appended to `msg` as ` (HH:MM:SS)`.\n If `msg` contains `{time_passed}`, the time difference is instead\n inserted at that position.\n deep\n If the current verbosity is higher than the log function\u2019s level,\n this gets displayed as well\n extra\n Additional values you can specify in `msg` like `{time_passed}`.\n \"\"\"\n from ._settings import settings\n\n return settings._root_logger.error(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef warning(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.warning(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef info(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.info(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef hint(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.hint(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef debug(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.debug(msg, time=time, deep=deep, extra=extra)\n", "path": "scanpy/logging.py"}]} | 2,939 | 524 |
gh_patches_debug_17390 | rasdani/github-patches | git_diff | pyodide__pyodide-3485 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`pyodide_build.out_of_tree.venv.create_pyodide_venv` wrongly requires an Emscripten compiler
See https://github.com/pyodide/pyodide/discussions/3462#discussioncomment-4710208
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyodide-build/pyodide_build/out_of_tree/venv.py`
Content:
```
1 import shutil
2 import subprocess
3 import sys
4 import textwrap
5 from pathlib import Path
6 from typing import Any
7
8 from ..common import (
9 check_emscripten_version,
10 exit_with_stdio,
11 get_make_flag,
12 get_pyodide_root,
13 in_xbuildenv,
14 )
15 from ..logger import logger
16
17
18 def check_result(result: subprocess.CompletedProcess[str], msg: str) -> None:
19 """Abort if the process returns a nonzero error code"""
20 if result.returncode != 0:
21 logger.error(msg)
22 exit_with_stdio(result)
23
24
25 def dedent(s: str) -> str:
26 return textwrap.dedent(s).strip() + "\n"
27
28
29 def get_pyversion() -> str:
30 return f"{sys.version_info.major}.{sys.version_info.minor}"
31
32
33 def check_host_python_version(session: Any) -> None:
34 pyodide_version = session.interpreter.version.partition(" ")[0].split(".")[:2]
35 sys_version = [str(sys.version_info.major), str(sys.version_info.minor)]
36 if pyodide_version == sys_version:
37 return
38 pyodide_version_fmt = ".".join(pyodide_version)
39 sys_version_fmt = ".".join(sys_version)
40 logger.stderr(
41 f"Expected host Python version to be {pyodide_version_fmt} but got version {sys_version_fmt}"
42 )
43 sys.exit(1)
44
45
46 def pyodide_dist_dir() -> Path:
47 return get_pyodide_root() / "dist"
48
49
50 def create_pip_conf(venv_root: Path) -> None:
51 """Create pip.conf file in venv root
52
53 This file adds a few options that will always be used by pip install.
54 """
55 if in_xbuildenv():
56 # In the xbuildenv, we don't have the packages locally. We will include
57 # in the xbuildenv a PEP 503 index for the vendored Pyodide packages
58 # https://peps.python.org/pep-0503/
59 repo = f'extra-index-url=file:{get_pyodide_root()/"pypa_index"}'
60 else:
61 # In the Pyodide development environment, the Pyodide dist directory
62 # should contain the needed wheels. find-links
63 repo = f"find-links={pyodide_dist_dir()}"
64
65 # Prevent attempts to install binary wheels from source.
66 # Maybe some day we can convince pip to invoke `pyodide build` as the build
67 # front end for wheels...
68 (venv_root / "pip.conf").write_text(
69 dedent(
70 f"""
71 [install]
72 only-binary=:all:
73 {repo}
74 """
75 )
76 )
77
78
79 def get_pip_monkeypatch(venv_bin: Path) -> str:
80 """Monkey patch pip's environment to show info about Pyodide's environment.
81
82 The code returned is injected at the beginning of the pip script.
83 """
84 result = subprocess.run(
85 [
86 venv_bin / "python",
87 "-c",
88 dedent(
89 """
90 import os, sys, sysconfig, platform
91 print([
92 os.name,
93 sys.platform,
94 sys.implementation._multiarch,
95 sysconfig.get_platform()
96 ])
97 """
98 ),
99 ],
100 capture_output=True,
101 encoding="utf8",
102 )
103 check_result(result, "ERROR: failed to invoke Pyodide")
104 platform_data = result.stdout
105 sysconfigdata_dir = Path(get_make_flag("TARGETINSTALLDIR")) / "sysconfigdata"
106
107 return dedent(
108 f"""\
109 import os
110 import sys
111 os_name, sys_platform, multiarch, host_platform = {platform_data}
112 os.name = os_name
113 sys.platform = sys_platform
114 sys.implementation._multiarch = multiarch
115 os.environ["_PYTHON_HOST_PLATFORM"] = host_platform
116 os.environ["_PYTHON_SYSCONFIGDATA_NAME"] = f'_sysconfigdata_{{sys.abiflags}}_{{sys.platform}}_{{sys.implementation._multiarch}}'
117 sys.path.append("{sysconfigdata_dir}")
118 import sysconfig
119 sysconfig.get_config_vars()
120 del os.environ["_PYTHON_SYSCONFIGDATA_NAME"]
121 """
122 )
123
124
125 def create_pip_script(venv_bin):
126 """Create pip and write it into the virtualenv bin folder."""
127 # pip needs to run in the host Python not in Pyodide, so we'll use the host
128 # Python in the shebang. Use whichever Python was used to invoke
129 # pyodide venv.
130 host_python_path = venv_bin / f"python{get_pyversion()}-host"
131 host_python_path.symlink_to(sys.executable)
132
133 (venv_bin / "pip").write_text(
134 # Other than the shebang and the monkey patch, this is exactly what
135 # normal pip looks like.
136 f"#!{host_python_path} -s\n"
137 + get_pip_monkeypatch(venv_bin)
138 + dedent(
139 """
140 import re
141 import sys
142 from pip._internal.cli.main import main
143 if __name__ == '__main__':
144 sys.argv[0] = re.sub(r'(-script\\.pyw|\\.exe)?$', '', sys.argv[0])
145 sys.exit(main())
146 """
147 )
148 )
149 (venv_bin / "pip").chmod(0o777)
150
151 pyversion = get_pyversion()
152 other_pips = [
153 venv_bin / "pip3",
154 venv_bin / f"pip{pyversion}",
155 venv_bin / f"pip-{pyversion}",
156 ]
157
158 for pip in other_pips:
159 pip.unlink()
160 pip.symlink_to(venv_bin / "pip")
161
162
163 def create_pyodide_script(venv_bin: Path) -> None:
164 """Write pyodide cli script into the virtualenv bin folder"""
165 import os
166
167 # Temporarily restore us to the environment that 'pyodide venv' was
168 # invoked in
169 PATH = os.environ["PATH"]
170 PYODIDE_ROOT = os.environ["PYODIDE_ROOT"]
171
172 original_pyodide_cli = shutil.which("pyodide")
173 if original_pyodide_cli is None:
174 raise RuntimeError("ERROR: pyodide cli not found")
175
176 pyodide_path = venv_bin / "pyodide"
177 pyodide_path.write_text(
178 dedent(
179 f"""
180 #!/bin/sh
181 PATH='{PATH}' PYODIDE_ROOT='{PYODIDE_ROOT}' exec {original_pyodide_cli} "$@"
182 """
183 )
184 )
185 pyodide_path.chmod(0o777)
186
187
188 def install_stdlib(venv_bin: Path) -> None:
189 """Install micropip and all unvendored stdlib modules"""
190 # Micropip we could install with pip hypothetically, but because we use
191 # `--extra-index-url` it would install the pypi version which we don't want.
192
193 # Other stuff we need to load with loadPackage
194 # TODO: Also load all shared libs.
195 to_load = ["micropip"]
196 result = subprocess.run(
197 [
198 venv_bin / "python",
199 "-c",
200 dedent(
201 f"""
202 from _pyodide._importhook import UNVENDORED_STDLIBS_AND_TEST;
203 from pyodide_js import loadPackage;
204 from pyodide_js._api import repodata_packages
205 shared_libs = [pkgname for (pkgname,pkg) in repodata_packages.object_entries() if getattr(pkg, "shared_library", False)]
206
207 to_load = [*UNVENDORED_STDLIBS_AND_TEST, *shared_libs, *{to_load!r}]
208 loadPackage(to_load);
209 """
210 ),
211 ],
212 capture_output=True,
213 encoding="utf8",
214 )
215 check_result(result, "ERROR: failed to install unvendored stdlib modules")
216
217
218 def create_pyodide_venv(dest: Path) -> None:
219 """Create a Pyodide virtualenv and store it into dest"""
220 logger.info(f"Creating Pyodide virtualenv at {dest}")
221 from virtualenv import session_via_cli
222
223 if dest.exists():
224 logger.error(f"ERROR: dest directory '{dest}' already exists")
225 sys.exit(1)
226
227 check_emscripten_version()
228
229 interp_path = pyodide_dist_dir() / "python"
230 session = session_via_cli(["--no-wheel", "-p", str(interp_path), str(dest)])
231 check_host_python_version(session)
232
233 try:
234 session.run()
235 venv_root = Path(session.creator.dest).absolute()
236 venv_bin = venv_root / "bin"
237
238 logger.info("... Configuring virtualenv")
239 create_pip_conf(venv_root)
240 create_pip_script(venv_bin)
241 create_pyodide_script(venv_bin)
242 logger.info("... Installing standard library")
243 install_stdlib(venv_bin)
244 except (Exception, KeyboardInterrupt, SystemExit):
245 shutil.rmtree(session.creator.dest)
246 raise
247
248 logger.success("Successfully created Pyodide virtual environment!")
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyodide-build/pyodide_build/out_of_tree/venv.py b/pyodide-build/pyodide_build/out_of_tree/venv.py
--- a/pyodide-build/pyodide_build/out_of_tree/venv.py
+++ b/pyodide-build/pyodide_build/out_of_tree/venv.py
@@ -5,13 +5,7 @@
from pathlib import Path
from typing import Any
-from ..common import (
- check_emscripten_version,
- exit_with_stdio,
- get_make_flag,
- get_pyodide_root,
- in_xbuildenv,
-)
+from ..common import exit_with_stdio, get_make_flag, get_pyodide_root, in_xbuildenv
from ..logger import logger
@@ -224,8 +218,6 @@
logger.error(f"ERROR: dest directory '{dest}' already exists")
sys.exit(1)
- check_emscripten_version()
-
interp_path = pyodide_dist_dir() / "python"
session = session_via_cli(["--no-wheel", "-p", str(interp_path), str(dest)])
check_host_python_version(session)
| {"golden_diff": "diff --git a/pyodide-build/pyodide_build/out_of_tree/venv.py b/pyodide-build/pyodide_build/out_of_tree/venv.py\n--- a/pyodide-build/pyodide_build/out_of_tree/venv.py\n+++ b/pyodide-build/pyodide_build/out_of_tree/venv.py\n@@ -5,13 +5,7 @@\n from pathlib import Path\n from typing import Any\n \n-from ..common import (\n- check_emscripten_version,\n- exit_with_stdio,\n- get_make_flag,\n- get_pyodide_root,\n- in_xbuildenv,\n-)\n+from ..common import exit_with_stdio, get_make_flag, get_pyodide_root, in_xbuildenv\n from ..logger import logger\n \n \n@@ -224,8 +218,6 @@\n logger.error(f\"ERROR: dest directory '{dest}' already exists\")\n sys.exit(1)\n \n- check_emscripten_version()\n-\n interp_path = pyodide_dist_dir() / \"python\"\n session = session_via_cli([\"--no-wheel\", \"-p\", str(interp_path), str(dest)])\n check_host_python_version(session)\n", "issue": "`pyodide_build.out_of_tree.venv.create_pyodide_venv` wrongly requires an Emscripten compiler\nSee https://github.com/pyodide/pyodide/discussions/3462#discussioncomment-4710208\n", "before_files": [{"content": "import shutil\nimport subprocess\nimport sys\nimport textwrap\nfrom pathlib import Path\nfrom typing import Any\n\nfrom ..common import (\n check_emscripten_version,\n exit_with_stdio,\n get_make_flag,\n get_pyodide_root,\n in_xbuildenv,\n)\nfrom ..logger import logger\n\n\ndef check_result(result: subprocess.CompletedProcess[str], msg: str) -> None:\n \"\"\"Abort if the process returns a nonzero error code\"\"\"\n if result.returncode != 0:\n logger.error(msg)\n exit_with_stdio(result)\n\n\ndef dedent(s: str) -> str:\n return textwrap.dedent(s).strip() + \"\\n\"\n\n\ndef get_pyversion() -> str:\n return f\"{sys.version_info.major}.{sys.version_info.minor}\"\n\n\ndef check_host_python_version(session: Any) -> None:\n pyodide_version = session.interpreter.version.partition(\" \")[0].split(\".\")[:2]\n sys_version = [str(sys.version_info.major), str(sys.version_info.minor)]\n if pyodide_version == sys_version:\n return\n pyodide_version_fmt = \".\".join(pyodide_version)\n sys_version_fmt = \".\".join(sys_version)\n logger.stderr(\n f\"Expected host Python version to be {pyodide_version_fmt} but got version {sys_version_fmt}\"\n )\n sys.exit(1)\n\n\ndef pyodide_dist_dir() -> Path:\n return get_pyodide_root() / \"dist\"\n\n\ndef create_pip_conf(venv_root: Path) -> None:\n \"\"\"Create pip.conf file in venv root\n\n This file adds a few options that will always be used by pip install.\n \"\"\"\n if in_xbuildenv():\n # In the xbuildenv, we don't have the packages locally. We will include\n # in the xbuildenv a PEP 503 index for the vendored Pyodide packages\n # https://peps.python.org/pep-0503/\n repo = f'extra-index-url=file:{get_pyodide_root()/\"pypa_index\"}'\n else:\n # In the Pyodide development environment, the Pyodide dist directory\n # should contain the needed wheels. find-links\n repo = f\"find-links={pyodide_dist_dir()}\"\n\n # Prevent attempts to install binary wheels from source.\n # Maybe some day we can convince pip to invoke `pyodide build` as the build\n # front end for wheels...\n (venv_root / \"pip.conf\").write_text(\n dedent(\n f\"\"\"\n [install]\n only-binary=:all:\n {repo}\n \"\"\"\n )\n )\n\n\ndef get_pip_monkeypatch(venv_bin: Path) -> str:\n \"\"\"Monkey patch pip's environment to show info about Pyodide's environment.\n\n The code returned is injected at the beginning of the pip script.\n \"\"\"\n result = subprocess.run(\n [\n venv_bin / \"python\",\n \"-c\",\n dedent(\n \"\"\"\n import os, sys, sysconfig, platform\n print([\n os.name,\n sys.platform,\n sys.implementation._multiarch,\n sysconfig.get_platform()\n ])\n \"\"\"\n ),\n ],\n capture_output=True,\n encoding=\"utf8\",\n )\n check_result(result, \"ERROR: failed to invoke Pyodide\")\n platform_data = result.stdout\n sysconfigdata_dir = Path(get_make_flag(\"TARGETINSTALLDIR\")) / \"sysconfigdata\"\n\n return dedent(\n f\"\"\"\\\n import os\n import sys\n os_name, sys_platform, multiarch, host_platform = {platform_data}\n os.name = os_name\n sys.platform = sys_platform\n sys.implementation._multiarch = multiarch\n os.environ[\"_PYTHON_HOST_PLATFORM\"] = host_platform\n os.environ[\"_PYTHON_SYSCONFIGDATA_NAME\"] = f'_sysconfigdata_{{sys.abiflags}}_{{sys.platform}}_{{sys.implementation._multiarch}}'\n sys.path.append(\"{sysconfigdata_dir}\")\n import sysconfig\n sysconfig.get_config_vars()\n del os.environ[\"_PYTHON_SYSCONFIGDATA_NAME\"]\n \"\"\"\n )\n\n\ndef create_pip_script(venv_bin):\n \"\"\"Create pip and write it into the virtualenv bin folder.\"\"\"\n # pip needs to run in the host Python not in Pyodide, so we'll use the host\n # Python in the shebang. Use whichever Python was used to invoke\n # pyodide venv.\n host_python_path = venv_bin / f\"python{get_pyversion()}-host\"\n host_python_path.symlink_to(sys.executable)\n\n (venv_bin / \"pip\").write_text(\n # Other than the shebang and the monkey patch, this is exactly what\n # normal pip looks like.\n f\"#!{host_python_path} -s\\n\"\n + get_pip_monkeypatch(venv_bin)\n + dedent(\n \"\"\"\n import re\n import sys\n from pip._internal.cli.main import main\n if __name__ == '__main__':\n sys.argv[0] = re.sub(r'(-script\\\\.pyw|\\\\.exe)?$', '', sys.argv[0])\n sys.exit(main())\n \"\"\"\n )\n )\n (venv_bin / \"pip\").chmod(0o777)\n\n pyversion = get_pyversion()\n other_pips = [\n venv_bin / \"pip3\",\n venv_bin / f\"pip{pyversion}\",\n venv_bin / f\"pip-{pyversion}\",\n ]\n\n for pip in other_pips:\n pip.unlink()\n pip.symlink_to(venv_bin / \"pip\")\n\n\ndef create_pyodide_script(venv_bin: Path) -> None:\n \"\"\"Write pyodide cli script into the virtualenv bin folder\"\"\"\n import os\n\n # Temporarily restore us to the environment that 'pyodide venv' was\n # invoked in\n PATH = os.environ[\"PATH\"]\n PYODIDE_ROOT = os.environ[\"PYODIDE_ROOT\"]\n\n original_pyodide_cli = shutil.which(\"pyodide\")\n if original_pyodide_cli is None:\n raise RuntimeError(\"ERROR: pyodide cli not found\")\n\n pyodide_path = venv_bin / \"pyodide\"\n pyodide_path.write_text(\n dedent(\n f\"\"\"\n #!/bin/sh\n PATH='{PATH}' PYODIDE_ROOT='{PYODIDE_ROOT}' exec {original_pyodide_cli} \"$@\"\n \"\"\"\n )\n )\n pyodide_path.chmod(0o777)\n\n\ndef install_stdlib(venv_bin: Path) -> None:\n \"\"\"Install micropip and all unvendored stdlib modules\"\"\"\n # Micropip we could install with pip hypothetically, but because we use\n # `--extra-index-url` it would install the pypi version which we don't want.\n\n # Other stuff we need to load with loadPackage\n # TODO: Also load all shared libs.\n to_load = [\"micropip\"]\n result = subprocess.run(\n [\n venv_bin / \"python\",\n \"-c\",\n dedent(\n f\"\"\"\n from _pyodide._importhook import UNVENDORED_STDLIBS_AND_TEST;\n from pyodide_js import loadPackage;\n from pyodide_js._api import repodata_packages\n shared_libs = [pkgname for (pkgname,pkg) in repodata_packages.object_entries() if getattr(pkg, \"shared_library\", False)]\n\n to_load = [*UNVENDORED_STDLIBS_AND_TEST, *shared_libs, *{to_load!r}]\n loadPackage(to_load);\n \"\"\"\n ),\n ],\n capture_output=True,\n encoding=\"utf8\",\n )\n check_result(result, \"ERROR: failed to install unvendored stdlib modules\")\n\n\ndef create_pyodide_venv(dest: Path) -> None:\n \"\"\"Create a Pyodide virtualenv and store it into dest\"\"\"\n logger.info(f\"Creating Pyodide virtualenv at {dest}\")\n from virtualenv import session_via_cli\n\n if dest.exists():\n logger.error(f\"ERROR: dest directory '{dest}' already exists\")\n sys.exit(1)\n\n check_emscripten_version()\n\n interp_path = pyodide_dist_dir() / \"python\"\n session = session_via_cli([\"--no-wheel\", \"-p\", str(interp_path), str(dest)])\n check_host_python_version(session)\n\n try:\n session.run()\n venv_root = Path(session.creator.dest).absolute()\n venv_bin = venv_root / \"bin\"\n\n logger.info(\"... Configuring virtualenv\")\n create_pip_conf(venv_root)\n create_pip_script(venv_bin)\n create_pyodide_script(venv_bin)\n logger.info(\"... Installing standard library\")\n install_stdlib(venv_bin)\n except (Exception, KeyboardInterrupt, SystemExit):\n shutil.rmtree(session.creator.dest)\n raise\n\n logger.success(\"Successfully created Pyodide virtual environment!\")\n", "path": "pyodide-build/pyodide_build/out_of_tree/venv.py"}], "after_files": [{"content": "import shutil\nimport subprocess\nimport sys\nimport textwrap\nfrom pathlib import Path\nfrom typing import Any\n\nfrom ..common import exit_with_stdio, get_make_flag, get_pyodide_root, in_xbuildenv\nfrom ..logger import logger\n\n\ndef check_result(result: subprocess.CompletedProcess[str], msg: str) -> None:\n \"\"\"Abort if the process returns a nonzero error code\"\"\"\n if result.returncode != 0:\n logger.error(msg)\n exit_with_stdio(result)\n\n\ndef dedent(s: str) -> str:\n return textwrap.dedent(s).strip() + \"\\n\"\n\n\ndef get_pyversion() -> str:\n return f\"{sys.version_info.major}.{sys.version_info.minor}\"\n\n\ndef check_host_python_version(session: Any) -> None:\n pyodide_version = session.interpreter.version.partition(\" \")[0].split(\".\")[:2]\n sys_version = [str(sys.version_info.major), str(sys.version_info.minor)]\n if pyodide_version == sys_version:\n return\n pyodide_version_fmt = \".\".join(pyodide_version)\n sys_version_fmt = \".\".join(sys_version)\n logger.stderr(\n f\"Expected host Python version to be {pyodide_version_fmt} but got version {sys_version_fmt}\"\n )\n sys.exit(1)\n\n\ndef pyodide_dist_dir() -> Path:\n return get_pyodide_root() / \"dist\"\n\n\ndef create_pip_conf(venv_root: Path) -> None:\n \"\"\"Create pip.conf file in venv root\n\n This file adds a few options that will always be used by pip install.\n \"\"\"\n if in_xbuildenv():\n # In the xbuildenv, we don't have the packages locally. We will include\n # in the xbuildenv a PEP 503 index for the vendored Pyodide packages\n # https://peps.python.org/pep-0503/\n repo = f'extra-index-url=file:{get_pyodide_root()/\"pypa_index\"}'\n else:\n # In the Pyodide development environment, the Pyodide dist directory\n # should contain the needed wheels. find-links\n repo = f\"find-links={pyodide_dist_dir()}\"\n\n # Prevent attempts to install binary wheels from source.\n # Maybe some day we can convince pip to invoke `pyodide build` as the build\n # front end for wheels...\n (venv_root / \"pip.conf\").write_text(\n dedent(\n f\"\"\"\n [install]\n only-binary=:all:\n {repo}\n \"\"\"\n )\n )\n\n\ndef get_pip_monkeypatch(venv_bin: Path) -> str:\n \"\"\"Monkey patch pip's environment to show info about Pyodide's environment.\n\n The code returned is injected at the beginning of the pip script.\n \"\"\"\n result = subprocess.run(\n [\n venv_bin / \"python\",\n \"-c\",\n dedent(\n \"\"\"\n import os, sys, sysconfig, platform\n print([\n os.name,\n sys.platform,\n sys.implementation._multiarch,\n sysconfig.get_platform()\n ])\n \"\"\"\n ),\n ],\n capture_output=True,\n encoding=\"utf8\",\n )\n check_result(result, \"ERROR: failed to invoke Pyodide\")\n platform_data = result.stdout\n sysconfigdata_dir = Path(get_make_flag(\"TARGETINSTALLDIR\")) / \"sysconfigdata\"\n\n return dedent(\n f\"\"\"\\\n import os\n import sys\n os_name, sys_platform, multiarch, host_platform = {platform_data}\n os.name = os_name\n sys.platform = sys_platform\n sys.implementation._multiarch = multiarch\n os.environ[\"_PYTHON_HOST_PLATFORM\"] = host_platform\n os.environ[\"_PYTHON_SYSCONFIGDATA_NAME\"] = f'_sysconfigdata_{{sys.abiflags}}_{{sys.platform}}_{{sys.implementation._multiarch}}'\n sys.path.append(\"{sysconfigdata_dir}\")\n import sysconfig\n sysconfig.get_config_vars()\n del os.environ[\"_PYTHON_SYSCONFIGDATA_NAME\"]\n \"\"\"\n )\n\n\ndef create_pip_script(venv_bin):\n \"\"\"Create pip and write it into the virtualenv bin folder.\"\"\"\n # pip needs to run in the host Python not in Pyodide, so we'll use the host\n # Python in the shebang. Use whichever Python was used to invoke\n # pyodide venv.\n host_python_path = venv_bin / f\"python{get_pyversion()}-host\"\n host_python_path.symlink_to(sys.executable)\n\n (venv_bin / \"pip\").write_text(\n # Other than the shebang and the monkey patch, this is exactly what\n # normal pip looks like.\n f\"#!{host_python_path} -s\\n\"\n + get_pip_monkeypatch(venv_bin)\n + dedent(\n \"\"\"\n import re\n import sys\n from pip._internal.cli.main import main\n if __name__ == '__main__':\n sys.argv[0] = re.sub(r'(-script\\\\.pyw|\\\\.exe)?$', '', sys.argv[0])\n sys.exit(main())\n \"\"\"\n )\n )\n (venv_bin / \"pip\").chmod(0o777)\n\n pyversion = get_pyversion()\n other_pips = [\n venv_bin / \"pip3\",\n venv_bin / f\"pip{pyversion}\",\n venv_bin / f\"pip-{pyversion}\",\n ]\n\n for pip in other_pips:\n pip.unlink()\n pip.symlink_to(venv_bin / \"pip\")\n\n\ndef create_pyodide_script(venv_bin: Path) -> None:\n \"\"\"Write pyodide cli script into the virtualenv bin folder\"\"\"\n import os\n\n # Temporarily restore us to the environment that 'pyodide venv' was\n # invoked in\n PATH = os.environ[\"PATH\"]\n PYODIDE_ROOT = os.environ[\"PYODIDE_ROOT\"]\n\n original_pyodide_cli = shutil.which(\"pyodide\")\n if original_pyodide_cli is None:\n raise RuntimeError(\"ERROR: pyodide cli not found\")\n\n pyodide_path = venv_bin / \"pyodide\"\n pyodide_path.write_text(\n dedent(\n f\"\"\"\n #!/bin/sh\n PATH='{PATH}' PYODIDE_ROOT='{PYODIDE_ROOT}' exec {original_pyodide_cli} \"$@\"\n \"\"\"\n )\n )\n pyodide_path.chmod(0o777)\n\n\ndef install_stdlib(venv_bin: Path) -> None:\n \"\"\"Install micropip and all unvendored stdlib modules\"\"\"\n # Micropip we could install with pip hypothetically, but because we use\n # `--extra-index-url` it would install the pypi version which we don't want.\n\n # Other stuff we need to load with loadPackage\n # TODO: Also load all shared libs.\n to_load = [\"micropip\"]\n result = subprocess.run(\n [\n venv_bin / \"python\",\n \"-c\",\n dedent(\n f\"\"\"\n from _pyodide._importhook import UNVENDORED_STDLIBS_AND_TEST;\n from pyodide_js import loadPackage;\n from pyodide_js._api import repodata_packages\n shared_libs = [pkgname for (pkgname,pkg) in repodata_packages.object_entries() if getattr(pkg, \"shared_library\", False)]\n\n to_load = [*UNVENDORED_STDLIBS_AND_TEST, *shared_libs, *{to_load!r}]\n loadPackage(to_load);\n \"\"\"\n ),\n ],\n capture_output=True,\n encoding=\"utf8\",\n )\n check_result(result, \"ERROR: failed to install unvendored stdlib modules\")\n\n\ndef create_pyodide_venv(dest: Path) -> None:\n \"\"\"Create a Pyodide virtualenv and store it into dest\"\"\"\n logger.info(f\"Creating Pyodide virtualenv at {dest}\")\n from virtualenv import session_via_cli\n\n if dest.exists():\n logger.error(f\"ERROR: dest directory '{dest}' already exists\")\n sys.exit(1)\n\n interp_path = pyodide_dist_dir() / \"python\"\n session = session_via_cli([\"--no-wheel\", \"-p\", str(interp_path), str(dest)])\n check_host_python_version(session)\n\n try:\n session.run()\n venv_root = Path(session.creator.dest).absolute()\n venv_bin = venv_root / \"bin\"\n\n logger.info(\"... Configuring virtualenv\")\n create_pip_conf(venv_root)\n create_pip_script(venv_bin)\n create_pyodide_script(venv_bin)\n logger.info(\"... Installing standard library\")\n install_stdlib(venv_bin)\n except (Exception, KeyboardInterrupt, SystemExit):\n shutil.rmtree(session.creator.dest)\n raise\n\n logger.success(\"Successfully created Pyodide virtual environment!\")\n", "path": "pyodide-build/pyodide_build/out_of_tree/venv.py"}]} | 2,955 | 253 |
gh_patches_debug_18256 | rasdani/github-patches | git_diff | qtile__qtile-2710 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CurrentLayout widget show wrong layout
hello everyone ,
when the system boots up **"CurrentLayout"** widget shows the **max** layout as the current layout which is wrong. I have set the **monadtall** as my default layout but when I restart/reload the qtile with shortcut keys it shows the correct current layout.
My layout configuration -->
```
layout_theme = {"border_width": 4,
"margin": 8,
"border_focus": "#d79921",
"border_normal": "#1D2330"
}
layouts = [
layout.Max(),
# layout.Matrix(**layout_theme),
layout.MonadTall(**layout_theme),
layout.MonadWide(**layout_theme),
layout.Floating(
border_focus='#d79921',
border_width=2
),
layout.TreeTab(
font="Ubuntu",
fontsize=12,
sections=["FIRST", "SECOND", "THIRD", "FOURTH"],
section_fontsize=10,
border_width=2,
bg_color="#1c1f24",
active_bg="#d75F5F",
active_fg="#000000",
inactive_bg="#a89984",
inactive_fg="#1c1f24",
padding_left=0,
padding_x=0,
padding_y=5,
section_top=10,
section_bottom=20,
level_shift=8,
vspace=3,
panel_width=200
),
# layout.VerticalTile(**layout_theme),
]
```

Am doing something wrong ?
qtile version -- 0.18.1.dev0+g8e7ecc0a.d20210719
os - Arch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/widget/currentlayout.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2011 Florian Mounier
3 # Copyright (c) 2011 Kenji_Takahashi
4 # Copyright (c) 2012 roger
5 # Copyright (c) 2012, 2014 Tycho Andersen
6 # Copyright (c) 2012 Maximilian Köhl
7 # Copyright (c) 2013 Craig Barnes
8 # Copyright (c) 2014 Sean Vig
9 # Copyright (c) 2014 Adi Sieker
10 #
11 # Permission is hereby granted, free of charge, to any person obtaining a copy
12 # of this software and associated documentation files (the "Software"), to deal
13 # in the Software without restriction, including without limitation the rights
14 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 # copies of the Software, and to permit persons to whom the Software is
16 # furnished to do so, subject to the following conditions:
17 #
18 # The above copyright notice and this permission notice shall be included in
19 # all copies or substantial portions of the Software.
20 #
21 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 # SOFTWARE.
28 import os
29
30 import cairocffi
31
32 from libqtile import bar, hook
33 from libqtile.log_utils import logger
34 from libqtile.widget import base
35
36
37 class CurrentLayout(base._TextBox):
38 """
39 Display the name of the current layout of the current group of the screen,
40 the bar containing the widget, is on.
41 """
42 orientations = base.ORIENTATION_HORIZONTAL
43
44 def __init__(self, width=bar.CALCULATED, **config):
45 base._TextBox.__init__(self, "", width, **config)
46
47 def _configure(self, qtile, bar):
48 base._TextBox._configure(self, qtile, bar)
49 self.text = self.bar.screen.group.layouts[0].name
50 self.setup_hooks()
51
52 self.add_callbacks({
53 'Button1': qtile.cmd_next_layout,
54 'Button2': qtile.cmd_prev_layout,
55 })
56
57 def setup_hooks(self):
58 def hook_response(layout, group):
59 if group.screen is not None and group.screen == self.bar.screen:
60 self.text = layout.name
61 self.bar.draw()
62 hook.subscribe.layout_change(hook_response)
63
64
65 class CurrentLayoutIcon(base._TextBox):
66 """
67 Display the icon representing the current layout of the
68 current group of the screen on which the bar containing the widget is.
69
70 If you are using custom layouts, a default icon with question mark
71 will be displayed for them. If you want to use custom icon for your own
72 layout, for example, `FooGrid`, then create a file named
73 "layout-foogrid.png" and place it in `~/.icons` directory. You can as well
74 use other directories, but then you need to specify those directories
75 in `custom_icon_paths` argument for this plugin.
76
77 The order of icon search is:
78
79 - dirs in `custom_icon_paths` config argument
80 - `~/.icons`
81 - built-in qtile icons
82 """
83 orientations = base.ORIENTATION_HORIZONTAL
84
85 defaults = [
86 (
87 'scale',
88 1,
89 'Scale factor relative to the bar height. '
90 'Defaults to 1'
91 ),
92 (
93 'custom_icon_paths',
94 [],
95 'List of folders where to search icons before'
96 'using built-in icons or icons in ~/.icons dir. '
97 'This can also be used to provide'
98 'missing icons for custom layouts. '
99 'Defaults to empty list.'
100 )
101 ]
102
103 def __init__(self, **config):
104 base._TextBox.__init__(self, "", **config)
105 self.add_defaults(CurrentLayoutIcon.defaults)
106 self.scale = 1.0 / self.scale
107
108 self.length_type = bar.STATIC
109 self.length = 0
110
111 def _configure(self, qtile, bar):
112 base._TextBox._configure(self, qtile, bar)
113 self.text = self.bar.screen.group.layouts[0].name
114 self.current_layout = self.text
115 self.icons_loaded = False
116 self.icon_paths = []
117 self.surfaces = {}
118 self._update_icon_paths()
119 self._setup_images()
120 self._setup_hooks()
121
122 self.add_callbacks({
123 'Button1': qtile.cmd_next_layout,
124 'Button2': qtile.cmd_prev_layout,
125 })
126
127 def _setup_hooks(self):
128 """
129 Listens for layout change and performs a redraw when it occurs.
130 """
131 def hook_response(layout, group):
132 if group.screen is not None and group.screen == self.bar.screen:
133 self.current_layout = layout.name
134 self.bar.draw()
135 hook.subscribe.layout_change(hook_response)
136
137 def draw(self):
138 if self.icons_loaded:
139 try:
140 surface = self.surfaces[self.current_layout]
141 except KeyError:
142 logger.error('No icon for layout {}'.format(
143 self.current_layout
144 ))
145 else:
146 self.drawer.clear(self.background or self.bar.background)
147 self.drawer.ctx.set_source(surface)
148 self.drawer.ctx.paint()
149 self.drawer.draw(offsetx=self.offset, width=self.length)
150 else:
151 # Fallback to text
152 self.text = self.current_layout[0].upper()
153 base._TextBox.draw(self)
154
155 def _get_layout_names(self):
156 """
157 Returns the list of lowercased strings for each available layout name.
158 """
159 return [layout.__class__.__name__.lower() for layout in self.qtile.config.layouts]
160
161 def _update_icon_paths(self):
162 self.icon_paths = []
163
164 # We allow user to override icon search path
165 self.icon_paths.extend(self.custom_icon_paths)
166
167 # We also look in ~/.icons/
168 self.icon_paths.append(os.path.expanduser('~/.icons'))
169
170 # Default icons are in libqtile/resources/layout-icons.
171 # If using default config without any custom icons,
172 # this path will be used.
173 root = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2])
174 self.icon_paths.append(os.path.join(root, 'resources', 'layout-icons'))
175
176 def find_icon_file_path(self, layout_name):
177 icon_filename = 'layout-{}.png'.format(layout_name)
178 for icon_path in self.icon_paths:
179 icon_file_path = os.path.join(icon_path, icon_filename)
180 if os.path.isfile(icon_file_path):
181 return icon_file_path
182
183 def _setup_images(self):
184 """
185 Loads layout icons.
186 """
187 for layout_name in self._get_layout_names():
188 icon_file_path = self.find_icon_file_path(layout_name)
189 if icon_file_path is None:
190 logger.warning('No icon found for layout "{}"'.format(layout_name))
191 icon_file_path = self.find_icon_file_path('unknown')
192
193 try:
194 img = cairocffi.ImageSurface.create_from_png(icon_file_path)
195 except (cairocffi.Error, IOError) as e:
196 # Icon file is guaranteed to exist at this point.
197 # If this exception happens, it means the icon file contains
198 # an invalid image or is not readable.
199 self.icons_loaded = False
200 logger.exception(
201 'Failed to load icon from file "{}", '
202 'error was: {}'.format(icon_file_path, e.message)
203 )
204 return
205
206 input_width = img.get_width()
207 input_height = img.get_height()
208
209 sp = input_height / (self.bar.height - 1)
210
211 width = input_width / sp
212 if width > self.length:
213 self.length = int(width) + self.actual_padding * 2
214
215 imgpat = cairocffi.SurfacePattern(img)
216
217 scaler = cairocffi.Matrix()
218
219 scaler.scale(sp, sp)
220 scaler.scale(self.scale, self.scale)
221 factor = (1 - 1 / self.scale) / 2
222 scaler.translate(-width * factor, -width * factor)
223 scaler.translate(self.actual_padding * -1, 0)
224 imgpat.set_matrix(scaler)
225
226 imgpat.set_filter(cairocffi.FILTER_BEST)
227 self.surfaces[layout_name] = imgpat
228
229 self.icons_loaded = True
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/widget/currentlayout.py b/libqtile/widget/currentlayout.py
--- a/libqtile/widget/currentlayout.py
+++ b/libqtile/widget/currentlayout.py
@@ -46,7 +46,8 @@
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
- self.text = self.bar.screen.group.layouts[0].name
+ layout_id = self.bar.screen.group.current_layout
+ self.text = self.bar.screen.group.layouts[layout_id].name
self.setup_hooks()
self.add_callbacks({
@@ -110,7 +111,8 @@
def _configure(self, qtile, bar):
base._TextBox._configure(self, qtile, bar)
- self.text = self.bar.screen.group.layouts[0].name
+ layout_id = self.bar.screen.group.current_layout
+ self.text = self.bar.screen.group.layouts[layout_id].name
self.current_layout = self.text
self.icons_loaded = False
self.icon_paths = []
| {"golden_diff": "diff --git a/libqtile/widget/currentlayout.py b/libqtile/widget/currentlayout.py\n--- a/libqtile/widget/currentlayout.py\n+++ b/libqtile/widget/currentlayout.py\n@@ -46,7 +46,8 @@\n \n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n- self.text = self.bar.screen.group.layouts[0].name\n+ layout_id = self.bar.screen.group.current_layout\n+ self.text = self.bar.screen.group.layouts[layout_id].name\n self.setup_hooks()\n \n self.add_callbacks({\n@@ -110,7 +111,8 @@\n \n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n- self.text = self.bar.screen.group.layouts[0].name\n+ layout_id = self.bar.screen.group.current_layout\n+ self.text = self.bar.screen.group.layouts[layout_id].name\n self.current_layout = self.text\n self.icons_loaded = False\n self.icon_paths = []\n", "issue": "CurrentLayout widget show wrong layout\nhello everyone , \r\n\r\nwhen the system boots up **\"CurrentLayout\"** widget shows the **max** layout as the current layout which is wrong. I have set the **monadtall** as my default layout but when I restart/reload the qtile with shortcut keys it shows the correct current layout. \r\n\r\nMy layout configuration --> \r\n\r\n```\r\nlayout_theme = {\"border_width\": 4,\r\n \"margin\": 8,\r\n \"border_focus\": \"#d79921\",\r\n \"border_normal\": \"#1D2330\"\r\n }\r\n\r\nlayouts = [\r\n layout.Max(),\r\n # layout.Matrix(**layout_theme),\r\n layout.MonadTall(**layout_theme),\r\n layout.MonadWide(**layout_theme),\r\n layout.Floating(\r\n border_focus='#d79921',\r\n border_width=2\r\n ),\r\n layout.TreeTab(\r\n font=\"Ubuntu\",\r\n fontsize=12,\r\n sections=[\"FIRST\", \"SECOND\", \"THIRD\", \"FOURTH\"],\r\n section_fontsize=10,\r\n border_width=2,\r\n bg_color=\"#1c1f24\",\r\n active_bg=\"#d75F5F\",\r\n active_fg=\"#000000\",\r\n inactive_bg=\"#a89984\",\r\n inactive_fg=\"#1c1f24\",\r\n padding_left=0,\r\n padding_x=0,\r\n padding_y=5,\r\n section_top=10,\r\n section_bottom=20,\r\n level_shift=8,\r\n vspace=3,\r\n panel_width=200\r\n ),\r\n # layout.VerticalTile(**layout_theme),\r\n]\r\n\r\n```\r\n\r\n\r\n\r\nAm doing something wrong ?\r\n\r\nqtile version -- 0.18.1.dev0+g8e7ecc0a.d20210719\r\nos - Arch \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2011 Florian Mounier\n# Copyright (c) 2011 Kenji_Takahashi\n# Copyright (c) 2012 roger\n# Copyright (c) 2012, 2014 Tycho Andersen\n# Copyright (c) 2012 Maximilian K\u00f6hl\n# Copyright (c) 2013 Craig Barnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Adi Sieker\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport os\n\nimport cairocffi\n\nfrom libqtile import bar, hook\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass CurrentLayout(base._TextBox):\n \"\"\"\n Display the name of the current layout of the current group of the screen,\n the bar containing the widget, is on.\n \"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n\n def __init__(self, width=bar.CALCULATED, **config):\n base._TextBox.__init__(self, \"\", width, **config)\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n self.text = self.bar.screen.group.layouts[0].name\n self.setup_hooks()\n\n self.add_callbacks({\n 'Button1': qtile.cmd_next_layout,\n 'Button2': qtile.cmd_prev_layout,\n })\n\n def setup_hooks(self):\n def hook_response(layout, group):\n if group.screen is not None and group.screen == self.bar.screen:\n self.text = layout.name\n self.bar.draw()\n hook.subscribe.layout_change(hook_response)\n\n\nclass CurrentLayoutIcon(base._TextBox):\n \"\"\"\n Display the icon representing the current layout of the\n current group of the screen on which the bar containing the widget is.\n\n If you are using custom layouts, a default icon with question mark\n will be displayed for them. If you want to use custom icon for your own\n layout, for example, `FooGrid`, then create a file named\n \"layout-foogrid.png\" and place it in `~/.icons` directory. You can as well\n use other directories, but then you need to specify those directories\n in `custom_icon_paths` argument for this plugin.\n\n The order of icon search is:\n\n - dirs in `custom_icon_paths` config argument\n - `~/.icons`\n - built-in qtile icons\n \"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n\n defaults = [\n (\n 'scale',\n 1,\n 'Scale factor relative to the bar height. '\n 'Defaults to 1'\n ),\n (\n 'custom_icon_paths',\n [],\n 'List of folders where to search icons before'\n 'using built-in icons or icons in ~/.icons dir. '\n 'This can also be used to provide'\n 'missing icons for custom layouts. '\n 'Defaults to empty list.'\n )\n ]\n\n def __init__(self, **config):\n base._TextBox.__init__(self, \"\", **config)\n self.add_defaults(CurrentLayoutIcon.defaults)\n self.scale = 1.0 / self.scale\n\n self.length_type = bar.STATIC\n self.length = 0\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n self.text = self.bar.screen.group.layouts[0].name\n self.current_layout = self.text\n self.icons_loaded = False\n self.icon_paths = []\n self.surfaces = {}\n self._update_icon_paths()\n self._setup_images()\n self._setup_hooks()\n\n self.add_callbacks({\n 'Button1': qtile.cmd_next_layout,\n 'Button2': qtile.cmd_prev_layout,\n })\n\n def _setup_hooks(self):\n \"\"\"\n Listens for layout change and performs a redraw when it occurs.\n \"\"\"\n def hook_response(layout, group):\n if group.screen is not None and group.screen == self.bar.screen:\n self.current_layout = layout.name\n self.bar.draw()\n hook.subscribe.layout_change(hook_response)\n\n def draw(self):\n if self.icons_loaded:\n try:\n surface = self.surfaces[self.current_layout]\n except KeyError:\n logger.error('No icon for layout {}'.format(\n self.current_layout\n ))\n else:\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.ctx.set_source(surface)\n self.drawer.ctx.paint()\n self.drawer.draw(offsetx=self.offset, width=self.length)\n else:\n # Fallback to text\n self.text = self.current_layout[0].upper()\n base._TextBox.draw(self)\n\n def _get_layout_names(self):\n \"\"\"\n Returns the list of lowercased strings for each available layout name.\n \"\"\"\n return [layout.__class__.__name__.lower() for layout in self.qtile.config.layouts]\n\n def _update_icon_paths(self):\n self.icon_paths = []\n\n # We allow user to override icon search path\n self.icon_paths.extend(self.custom_icon_paths)\n\n # We also look in ~/.icons/\n self.icon_paths.append(os.path.expanduser('~/.icons'))\n\n # Default icons are in libqtile/resources/layout-icons.\n # If using default config without any custom icons,\n # this path will be used.\n root = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2])\n self.icon_paths.append(os.path.join(root, 'resources', 'layout-icons'))\n\n def find_icon_file_path(self, layout_name):\n icon_filename = 'layout-{}.png'.format(layout_name)\n for icon_path in self.icon_paths:\n icon_file_path = os.path.join(icon_path, icon_filename)\n if os.path.isfile(icon_file_path):\n return icon_file_path\n\n def _setup_images(self):\n \"\"\"\n Loads layout icons.\n \"\"\"\n for layout_name in self._get_layout_names():\n icon_file_path = self.find_icon_file_path(layout_name)\n if icon_file_path is None:\n logger.warning('No icon found for layout \"{}\"'.format(layout_name))\n icon_file_path = self.find_icon_file_path('unknown')\n\n try:\n img = cairocffi.ImageSurface.create_from_png(icon_file_path)\n except (cairocffi.Error, IOError) as e:\n # Icon file is guaranteed to exist at this point.\n # If this exception happens, it means the icon file contains\n # an invalid image or is not readable.\n self.icons_loaded = False\n logger.exception(\n 'Failed to load icon from file \"{}\", '\n 'error was: {}'.format(icon_file_path, e.message)\n )\n return\n\n input_width = img.get_width()\n input_height = img.get_height()\n\n sp = input_height / (self.bar.height - 1)\n\n width = input_width / sp\n if width > self.length:\n self.length = int(width) + self.actual_padding * 2\n\n imgpat = cairocffi.SurfacePattern(img)\n\n scaler = cairocffi.Matrix()\n\n scaler.scale(sp, sp)\n scaler.scale(self.scale, self.scale)\n factor = (1 - 1 / self.scale) / 2\n scaler.translate(-width * factor, -width * factor)\n scaler.translate(self.actual_padding * -1, 0)\n imgpat.set_matrix(scaler)\n\n imgpat.set_filter(cairocffi.FILTER_BEST)\n self.surfaces[layout_name] = imgpat\n\n self.icons_loaded = True\n", "path": "libqtile/widget/currentlayout.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2011 Florian Mounier\n# Copyright (c) 2011 Kenji_Takahashi\n# Copyright (c) 2012 roger\n# Copyright (c) 2012, 2014 Tycho Andersen\n# Copyright (c) 2012 Maximilian K\u00f6hl\n# Copyright (c) 2013 Craig Barnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 Adi Sieker\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport os\n\nimport cairocffi\n\nfrom libqtile import bar, hook\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\n\nclass CurrentLayout(base._TextBox):\n \"\"\"\n Display the name of the current layout of the current group of the screen,\n the bar containing the widget, is on.\n \"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n\n def __init__(self, width=bar.CALCULATED, **config):\n base._TextBox.__init__(self, \"\", width, **config)\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n layout_id = self.bar.screen.group.current_layout\n self.text = self.bar.screen.group.layouts[layout_id].name\n self.setup_hooks()\n\n self.add_callbacks({\n 'Button1': qtile.cmd_next_layout,\n 'Button2': qtile.cmd_prev_layout,\n })\n\n def setup_hooks(self):\n def hook_response(layout, group):\n if group.screen is not None and group.screen == self.bar.screen:\n self.text = layout.name\n self.bar.draw()\n hook.subscribe.layout_change(hook_response)\n\n\nclass CurrentLayoutIcon(base._TextBox):\n \"\"\"\n Display the icon representing the current layout of the\n current group of the screen on which the bar containing the widget is.\n\n If you are using custom layouts, a default icon with question mark\n will be displayed for them. If you want to use custom icon for your own\n layout, for example, `FooGrid`, then create a file named\n \"layout-foogrid.png\" and place it in `~/.icons` directory. You can as well\n use other directories, but then you need to specify those directories\n in `custom_icon_paths` argument for this plugin.\n\n The order of icon search is:\n\n - dirs in `custom_icon_paths` config argument\n - `~/.icons`\n - built-in qtile icons\n \"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n\n defaults = [\n (\n 'scale',\n 1,\n 'Scale factor relative to the bar height. '\n 'Defaults to 1'\n ),\n (\n 'custom_icon_paths',\n [],\n 'List of folders where to search icons before'\n 'using built-in icons or icons in ~/.icons dir. '\n 'This can also be used to provide'\n 'missing icons for custom layouts. '\n 'Defaults to empty list.'\n )\n ]\n\n def __init__(self, **config):\n base._TextBox.__init__(self, \"\", **config)\n self.add_defaults(CurrentLayoutIcon.defaults)\n self.scale = 1.0 / self.scale\n\n self.length_type = bar.STATIC\n self.length = 0\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n layout_id = self.bar.screen.group.current_layout\n self.text = self.bar.screen.group.layouts[layout_id].name\n self.current_layout = self.text\n self.icons_loaded = False\n self.icon_paths = []\n self.surfaces = {}\n self._update_icon_paths()\n self._setup_images()\n self._setup_hooks()\n\n self.add_callbacks({\n 'Button1': qtile.cmd_next_layout,\n 'Button2': qtile.cmd_prev_layout,\n })\n\n def _setup_hooks(self):\n \"\"\"\n Listens for layout change and performs a redraw when it occurs.\n \"\"\"\n def hook_response(layout, group):\n if group.screen is not None and group.screen == self.bar.screen:\n self.current_layout = layout.name\n self.bar.draw()\n hook.subscribe.layout_change(hook_response)\n\n def draw(self):\n if self.icons_loaded:\n try:\n surface = self.surfaces[self.current_layout]\n except KeyError:\n logger.error('No icon for layout {}'.format(\n self.current_layout\n ))\n else:\n self.drawer.clear(self.background or self.bar.background)\n self.drawer.ctx.set_source(surface)\n self.drawer.ctx.paint()\n self.drawer.draw(offsetx=self.offset, width=self.length)\n else:\n # Fallback to text\n self.text = self.current_layout[0].upper()\n base._TextBox.draw(self)\n\n def _get_layout_names(self):\n \"\"\"\n Returns the list of lowercased strings for each available layout name.\n \"\"\"\n return [layout.__class__.__name__.lower() for layout in self.qtile.config.layouts]\n\n def _update_icon_paths(self):\n self.icon_paths = []\n\n # We allow user to override icon search path\n self.icon_paths.extend(self.custom_icon_paths)\n\n # We also look in ~/.icons/\n self.icon_paths.append(os.path.expanduser('~/.icons'))\n\n # Default icons are in libqtile/resources/layout-icons.\n # If using default config without any custom icons,\n # this path will be used.\n root = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-2])\n self.icon_paths.append(os.path.join(root, 'resources', 'layout-icons'))\n\n def find_icon_file_path(self, layout_name):\n icon_filename = 'layout-{}.png'.format(layout_name)\n for icon_path in self.icon_paths:\n icon_file_path = os.path.join(icon_path, icon_filename)\n if os.path.isfile(icon_file_path):\n return icon_file_path\n\n def _setup_images(self):\n \"\"\"\n Loads layout icons.\n \"\"\"\n for layout_name in self._get_layout_names():\n icon_file_path = self.find_icon_file_path(layout_name)\n if icon_file_path is None:\n logger.warning('No icon found for layout \"{}\"'.format(layout_name))\n icon_file_path = self.find_icon_file_path('unknown')\n\n try:\n img = cairocffi.ImageSurface.create_from_png(icon_file_path)\n except (cairocffi.Error, IOError) as e:\n # Icon file is guaranteed to exist at this point.\n # If this exception happens, it means the icon file contains\n # an invalid image or is not readable.\n self.icons_loaded = False\n logger.exception(\n 'Failed to load icon from file \"{}\", '\n 'error was: {}'.format(icon_file_path, e.message)\n )\n return\n\n input_width = img.get_width()\n input_height = img.get_height()\n\n sp = input_height / (self.bar.height - 1)\n\n width = input_width / sp\n if width > self.length:\n self.length = int(width) + self.actual_padding * 2\n\n imgpat = cairocffi.SurfacePattern(img)\n\n scaler = cairocffi.Matrix()\n\n scaler.scale(sp, sp)\n scaler.scale(self.scale, self.scale)\n factor = (1 - 1 / self.scale) / 2\n scaler.translate(-width * factor, -width * factor)\n scaler.translate(self.actual_padding * -1, 0)\n imgpat.set_matrix(scaler)\n\n imgpat.set_filter(cairocffi.FILTER_BEST)\n self.surfaces[layout_name] = imgpat\n\n self.icons_loaded = True\n", "path": "libqtile/widget/currentlayout.py"}]} | 3,169 | 234 |
gh_patches_debug_31530 | rasdani/github-patches | git_diff | voxel51__fiftyone-3317 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[INSTALL] Some `setup.py` dependencies are not used.
### System information
- **OS Platform and Distribution** (e.g., Linux Ubuntu 16.04): Ubuntu 22.04
- **Python version** (`python --version`): 3.9.17
- **FiftyOne version** (`fiftyone --version`): 0.21
- **FiftyOne installed from** (pip or source): pip/poetry
### Describe the problem
My main issue was related to `eventlet` library, so I started looking where it's used. When I didn't find a place where it's used I decided to go through `setup.py` to find other unused ones. Here's the list with comments:
- `eventlet` - not used in the project but causes issues when it's installed; for example, `FastAPI` crashes during file downloading with a traceback pointing to a file descriptor used by a `socket` library (which is basically `asyncio` internals), and that makes sense because `eventlet` is just a hack to make sync code async. Also I saw the same issues with `s3fs` library.
- `future` - this library is for Python2/3 compatibility; not sure if this is needed since the minimum version of Python this project supports is 3.7.
- `kaleido` - `kaleido` after installation exports the same symbol `plotly` which I believe is being overwritten with the `plotly` library.
**Question**: Am I missing something or these dependencies are really not used in the project? If so is it possible to remove them? (I can make a PR)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Installs FiftyOne.
4
5 | Copyright 2017-2023, Voxel51, Inc.
6 | `voxel51.com <https://voxel51.com/>`_
7 |
8 """
9 try:
10 from importlib import metadata
11 except ImportError:
12 import importlib_metadata as metadata
13
14 import os
15 import re
16 from setuptools import setup, find_packages
17
18
19 VERSION = "0.21.5"
20
21
22 def get_version():
23 if "RELEASE_VERSION" in os.environ:
24 version = os.environ["RELEASE_VERSION"]
25 if not version.startswith(VERSION):
26 raise ValueError(
27 "Release version does not match version: %s and %s"
28 % (version, VERSION)
29 )
30 return version
31
32 return VERSION
33
34
35 INSTALL_REQUIRES = [
36 # third-party packages
37 "aiofiles",
38 "argcomplete",
39 "boto3",
40 "cachetools",
41 "dacite>=1.6.0,<1.8.0",
42 "Deprecated",
43 "eventlet",
44 "ftfy",
45 "future",
46 "hypercorn>=0.13.2",
47 "importlib-metadata; python_version<'3.8'",
48 "Jinja2>=3",
49 "kaleido",
50 "matplotlib",
51 "mongoengine==0.24.2",
52 "motor>=2.5",
53 "numpy",
54 "packaging",
55 "pandas",
56 "Pillow>=6.2",
57 "plotly>=4.14",
58 "pprintpp",
59 "psutil",
60 "pymongo>=3.12",
61 "pytz",
62 "PyYAML",
63 "regex",
64 "retrying",
65 "scikit-learn",
66 "scikit-image",
67 "setuptools",
68 "sseclient-py>=1.7.2,<2",
69 "sse-starlette>=0.10.3,<1",
70 "starlette>=0.24.0",
71 "strawberry-graphql==0.138.1",
72 "tabulate",
73 "xmltodict",
74 "universal-analytics-python3>=1.0.1,<2",
75 # internal packages
76 "fiftyone-brain>=0.13,<0.14",
77 "fiftyone-db>=0.4,<0.5",
78 "voxel51-eta>=0.10,<0.11",
79 ]
80
81
82 CHOOSE_INSTALL_REQUIRES = [
83 (
84 (
85 "opencv-python",
86 "opencv-contrib-python",
87 "opencv-contrib-python-headless",
88 ),
89 "opencv-python-headless",
90 )
91 ]
92
93
94 def choose_requirement(mains, secondary):
95 chosen = secondary
96 for main in mains:
97 try:
98 name = re.split(r"[!<>=]", main)[0]
99 metadata.version(name)
100 chosen = main
101 break
102 except metadata.PackageNotFoundError:
103 pass
104
105 return str(chosen)
106
107
108 def get_install_requirements(install_requires, choose_install_requires):
109 for mains, secondary in choose_install_requires:
110 install_requires.append(choose_requirement(mains, secondary))
111
112 return install_requires
113
114
115 EXTRAS_REQUIREMENTS = {"desktop": ["fiftyone-desktop>=0.28.2,<0.29"]}
116
117
118 with open("README.md", "r") as fh:
119 long_description = fh.read()
120
121
122 setup(
123 name="fiftyone",
124 version=get_version(),
125 description=(
126 "FiftyOne: the open-source tool for building high-quality datasets "
127 "and computer vision models"
128 ),
129 author="Voxel51, Inc.",
130 author_email="[email protected]",
131 url="https://github.com/voxel51/fiftyone",
132 extras_require=EXTRAS_REQUIREMENTS,
133 license="Apache",
134 long_description=long_description,
135 long_description_content_type="text/markdown",
136 packages=find_packages(
137 exclude=["app", "eta", "package", "requirements", "tests", "tools"]
138 )
139 + ["fiftyone.recipes", "fiftyone.tutorials"],
140 package_dir={
141 "fiftyone.recipes": "docs/source/recipes",
142 "fiftyone.tutorials": "docs/source/tutorials",
143 },
144 install_requires=get_install_requirements(
145 INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES
146 ),
147 include_package_data=True,
148 classifiers=[
149 "Development Status :: 4 - Beta",
150 "Intended Audience :: Developers",
151 "Intended Audience :: Science/Research",
152 "License :: OSI Approved :: Apache Software License",
153 "Topic :: Scientific/Engineering :: Artificial Intelligence",
154 "Topic :: Scientific/Engineering :: Image Processing",
155 "Topic :: Scientific/Engineering :: Image Recognition",
156 "Topic :: Scientific/Engineering :: Information Analysis",
157 "Topic :: Scientific/Engineering :: Visualization",
158 "Operating System :: MacOS :: MacOS X",
159 "Operating System :: POSIX :: Linux",
160 "Operating System :: Microsoft :: Windows",
161 "Programming Language :: Python :: 3",
162 "Programming Language :: Python :: 3.7",
163 "Programming Language :: Python :: 3.8",
164 "Programming Language :: Python :: 3.9",
165 "Programming Language :: Python :: 3.10",
166 ],
167 entry_points={"console_scripts": ["fiftyone=fiftyone.core.cli:main"]},
168 python_requires=">=3.7",
169 )
170
```
Path: `app/packages/looker-3d/load_kiti.py`
Content:
```
1 import fiftyone as fo
2 import fiftyone.zoo as foz
3 import os
4
5 # dataset = foz.load_zoo_dataset("kitti", split="train")
6
7 dataset = fo.load_dataset("kitti-copy")
8
9 dataset.add_sample_field("pcd_filepath", fo.StringField)
10
11 # firstSample = dataset.first()
12
13 # dataDir = os.path.dirname(firstSample.filepath)
14 # dataDirParent = os.path.dirname(dataDir)
15 # pointsDir = os.path.join(dataDirParent, 'pcds')
16 # print(pointsDir)
17
18 # for sample in dataset.iter_samples(progress=True):
19 # base = os.path.basename(sample.filepath)
20 # filename = os.path.splitext(base)[0]
21 # pcd_filename = filename + '.bin.pcd'
22 # pcd_filepath = os.path.join(pointsDir, pcd_filename)
23 # sample['pcd_filepath'] = pcd_filepath
24 # sample.save()
25
```
Path: `app/packages/looker-3d/load_kitti_labels.py`
Content:
```
1 import fiftyone as fo
2 import fiftyone.zoo as foz
3 import os
4
5 # dataset = foz.load_zoo_dataset("kitti", split="train")
6
7 dataset = fo.load_dataset("kitti-pcd")
8
9 # dataset.add_sample_field('pcd_filepath', fo.StringField)
10
11 firstSample = dataset.first()
12
13 print(firstSample)
14
15 # dataDir = os.path.dirname(firstSample.filepath)
16 # dataDirParent = os.path.dirname(dataDir)
17 # pointsDir = os.path.join(dataDirParent, 'pcds')
18 # print(pointsDir)
19
20 # for sample in dataset.iter_samples(progress=True):
21 # base = os.path.basename(sample.filepath)
22 # filename = os.path.splitext(base)[0]
23 # pcd_filename = filename + '.bin.pcd'
24 # pcd_filepath = os.path.join(pointsDir, pcd_filename)
25 # sample['pcd_filepath'] = pcd_filepath
26 # sample.save()
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/packages/looker-3d/load_kiti.py b/app/packages/looker-3d/load_kiti.py
deleted file mode 100644
--- a/app/packages/looker-3d/load_kiti.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import fiftyone as fo
-import fiftyone.zoo as foz
-import os
-
-# dataset = foz.load_zoo_dataset("kitti", split="train")
-
-dataset = fo.load_dataset("kitti-copy")
-
-dataset.add_sample_field("pcd_filepath", fo.StringField)
-
-# firstSample = dataset.first()
-
-# dataDir = os.path.dirname(firstSample.filepath)
-# dataDirParent = os.path.dirname(dataDir)
-# pointsDir = os.path.join(dataDirParent, 'pcds')
-# print(pointsDir)
-
-# for sample in dataset.iter_samples(progress=True):
-# base = os.path.basename(sample.filepath)
-# filename = os.path.splitext(base)[0]
-# pcd_filename = filename + '.bin.pcd'
-# pcd_filepath = os.path.join(pointsDir, pcd_filename)
-# sample['pcd_filepath'] = pcd_filepath
-# sample.save()
diff --git a/app/packages/looker-3d/load_kitti_labels.py b/app/packages/looker-3d/load_kitti_labels.py
deleted file mode 100644
--- a/app/packages/looker-3d/load_kitti_labels.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import fiftyone as fo
-import fiftyone.zoo as foz
-import os
-
-# dataset = foz.load_zoo_dataset("kitti", split="train")
-
-dataset = fo.load_dataset("kitti-pcd")
-
-# dataset.add_sample_field('pcd_filepath', fo.StringField)
-
-firstSample = dataset.first()
-
-print(firstSample)
-
-# dataDir = os.path.dirname(firstSample.filepath)
-# dataDirParent = os.path.dirname(dataDir)
-# pointsDir = os.path.join(dataDirParent, 'pcds')
-# print(pointsDir)
-
-# for sample in dataset.iter_samples(progress=True):
-# base = os.path.basename(sample.filepath)
-# filename = os.path.splitext(base)[0]
-# pcd_filename = filename + '.bin.pcd'
-# pcd_filepath = os.path.join(pointsDir, pcd_filename)
-# sample['pcd_filepath'] = pcd_filepath
-# sample.save()
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -40,12 +40,12 @@
"cachetools",
"dacite>=1.6.0,<1.8.0",
"Deprecated",
- "eventlet",
"ftfy",
- "future",
"hypercorn>=0.13.2",
"importlib-metadata; python_version<'3.8'",
"Jinja2>=3",
+ # kaleido indirectly required by plotly for image export
+ # https://plotly.com/python/static-image-export/
"kaleido",
"matplotlib",
"mongoengine==0.24.2",
| {"golden_diff": "diff --git a/app/packages/looker-3d/load_kiti.py b/app/packages/looker-3d/load_kiti.py\ndeleted file mode 100644\n--- a/app/packages/looker-3d/load_kiti.py\n+++ /dev/null\n@@ -1,24 +0,0 @@\n-import fiftyone as fo\n-import fiftyone.zoo as foz\n-import os\n-\n-# dataset = foz.load_zoo_dataset(\"kitti\", split=\"train\")\n-\n-dataset = fo.load_dataset(\"kitti-copy\")\n-\n-dataset.add_sample_field(\"pcd_filepath\", fo.StringField)\n-\n-# firstSample = dataset.first()\n-\n-# dataDir = os.path.dirname(firstSample.filepath)\n-# dataDirParent = os.path.dirname(dataDir)\n-# pointsDir = os.path.join(dataDirParent, 'pcds')\n-# print(pointsDir)\n-\n-# for sample in dataset.iter_samples(progress=True):\n-# base = os.path.basename(sample.filepath)\n-# filename = os.path.splitext(base)[0]\n-# pcd_filename = filename + '.bin.pcd'\n-# pcd_filepath = os.path.join(pointsDir, pcd_filename)\n-# sample['pcd_filepath'] = pcd_filepath\n-# sample.save()\ndiff --git a/app/packages/looker-3d/load_kitti_labels.py b/app/packages/looker-3d/load_kitti_labels.py\ndeleted file mode 100644\n--- a/app/packages/looker-3d/load_kitti_labels.py\n+++ /dev/null\n@@ -1,26 +0,0 @@\n-import fiftyone as fo\n-import fiftyone.zoo as foz\n-import os\n-\n-# dataset = foz.load_zoo_dataset(\"kitti\", split=\"train\")\n-\n-dataset = fo.load_dataset(\"kitti-pcd\")\n-\n-# dataset.add_sample_field('pcd_filepath', fo.StringField)\n-\n-firstSample = dataset.first()\n-\n-print(firstSample)\n-\n-# dataDir = os.path.dirname(firstSample.filepath)\n-# dataDirParent = os.path.dirname(dataDir)\n-# pointsDir = os.path.join(dataDirParent, 'pcds')\n-# print(pointsDir)\n-\n-# for sample in dataset.iter_samples(progress=True):\n-# base = os.path.basename(sample.filepath)\n-# filename = os.path.splitext(base)[0]\n-# pcd_filename = filename + '.bin.pcd'\n-# pcd_filepath = os.path.join(pointsDir, pcd_filename)\n-# sample['pcd_filepath'] = pcd_filepath\n-# sample.save()\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -40,12 +40,12 @@\n \"cachetools\",\n \"dacite>=1.6.0,<1.8.0\",\n \"Deprecated\",\n- \"eventlet\",\n \"ftfy\",\n- \"future\",\n \"hypercorn>=0.13.2\",\n \"importlib-metadata; python_version<'3.8'\",\n \"Jinja2>=3\",\n+ # kaleido indirectly required by plotly for image export\n+ # https://plotly.com/python/static-image-export/\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.24.2\",\n", "issue": "[INSTALL] Some `setup.py` dependencies are not used.\n### System information\r\n\r\n- **OS Platform and Distribution** (e.g., Linux Ubuntu 16.04): Ubuntu 22.04\r\n- **Python version** (`python --version`): 3.9.17\r\n- **FiftyOne version** (`fiftyone --version`): 0.21\r\n- **FiftyOne installed from** (pip or source): pip/poetry\r\n\r\n### Describe the problem\r\n\r\nMy main issue was related to `eventlet` library, so I started looking where it's used. When I didn't find a place where it's used I decided to go through `setup.py` to find other unused ones. Here's the list with comments:\r\n\r\n- `eventlet` - not used in the project but causes issues when it's installed; for example, `FastAPI` crashes during file downloading with a traceback pointing to a file descriptor used by a `socket` library (which is basically `asyncio` internals), and that makes sense because `eventlet` is just a hack to make sync code async. Also I saw the same issues with `s3fs` library.\r\n- `future` - this library is for Python2/3 compatibility; not sure if this is needed since the minimum version of Python this project supports is 3.7.\r\n- `kaleido` - `kaleido` after installation exports the same symbol `plotly` which I believe is being overwritten with the `plotly` library.\r\n\r\n**Question**: Am I missing something or these dependencies are really not used in the project? If so is it possible to remove them? (I can make a PR)\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\ntry:\n from importlib import metadata\nexcept ImportError:\n import importlib_metadata as metadata\n\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n\nVERSION = \"0.21.5\"\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nINSTALL_REQUIRES = [\n # third-party packages\n \"aiofiles\",\n \"argcomplete\",\n \"boto3\",\n \"cachetools\",\n \"dacite>=1.6.0,<1.8.0\",\n \"Deprecated\",\n \"eventlet\",\n \"ftfy\",\n \"future\",\n \"hypercorn>=0.13.2\",\n \"importlib-metadata; python_version<'3.8'\",\n \"Jinja2>=3\",\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.24.2\",\n \"motor>=2.5\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.12\",\n \"pytz\",\n \"PyYAML\",\n \"regex\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"sseclient-py>=1.7.2,<2\",\n \"sse-starlette>=0.10.3,<1\",\n \"starlette>=0.24.0\",\n \"strawberry-graphql==0.138.1\",\n \"tabulate\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"fiftyone-brain>=0.13,<0.14\",\n \"fiftyone-db>=0.4,<0.5\",\n \"voxel51-eta>=0.10,<0.11\",\n]\n\n\nCHOOSE_INSTALL_REQUIRES = [\n (\n (\n \"opencv-python\",\n \"opencv-contrib-python\",\n \"opencv-contrib-python-headless\",\n ),\n \"opencv-python-headless\",\n )\n]\n\n\ndef choose_requirement(mains, secondary):\n chosen = secondary\n for main in mains:\n try:\n name = re.split(r\"[!<>=]\", main)[0]\n metadata.version(name)\n chosen = main\n break\n except metadata.PackageNotFoundError:\n pass\n\n return str(chosen)\n\n\ndef get_install_requirements(install_requires, choose_install_requires):\n for mains, secondary in choose_install_requires:\n install_requires.append(choose_requirement(mains, secondary))\n\n return install_requires\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.28.2,<0.29\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\"app\", \"eta\", \"package\", \"requirements\", \"tests\", \"tools\"]\n )\n + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n install_requires=get_install_requirements(\n INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES\n ),\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.7\",\n)\n", "path": "setup.py"}, {"content": "import fiftyone as fo\nimport fiftyone.zoo as foz\nimport os\n\n# dataset = foz.load_zoo_dataset(\"kitti\", split=\"train\")\n\ndataset = fo.load_dataset(\"kitti-copy\")\n\ndataset.add_sample_field(\"pcd_filepath\", fo.StringField)\n\n# firstSample = dataset.first()\n\n# dataDir = os.path.dirname(firstSample.filepath)\n# dataDirParent = os.path.dirname(dataDir)\n# pointsDir = os.path.join(dataDirParent, 'pcds')\n# print(pointsDir)\n\n# for sample in dataset.iter_samples(progress=True):\n# base = os.path.basename(sample.filepath)\n# filename = os.path.splitext(base)[0]\n# pcd_filename = filename + '.bin.pcd'\n# pcd_filepath = os.path.join(pointsDir, pcd_filename)\n# sample['pcd_filepath'] = pcd_filepath\n# sample.save()\n", "path": "app/packages/looker-3d/load_kiti.py"}, {"content": "import fiftyone as fo\nimport fiftyone.zoo as foz\nimport os\n\n# dataset = foz.load_zoo_dataset(\"kitti\", split=\"train\")\n\ndataset = fo.load_dataset(\"kitti-pcd\")\n\n# dataset.add_sample_field('pcd_filepath', fo.StringField)\n\nfirstSample = dataset.first()\n\nprint(firstSample)\n\n# dataDir = os.path.dirname(firstSample.filepath)\n# dataDirParent = os.path.dirname(dataDir)\n# pointsDir = os.path.join(dataDirParent, 'pcds')\n# print(pointsDir)\n\n# for sample in dataset.iter_samples(progress=True):\n# base = os.path.basename(sample.filepath)\n# filename = os.path.splitext(base)[0]\n# pcd_filename = filename + '.bin.pcd'\n# pcd_filepath = os.path.join(pointsDir, pcd_filename)\n# sample['pcd_filepath'] = pcd_filepath\n# sample.save()\n", "path": "app/packages/looker-3d/load_kitti_labels.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nInstalls FiftyOne.\n\n| Copyright 2017-2023, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\ntry:\n from importlib import metadata\nexcept ImportError:\n import importlib_metadata as metadata\n\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\n\nVERSION = \"0.21.4\"\n\n\ndef get_version():\n if \"RELEASE_VERSION\" in os.environ:\n version = os.environ[\"RELEASE_VERSION\"]\n if not version.startswith(VERSION):\n raise ValueError(\n \"Release version does not match version: %s and %s\"\n % (version, VERSION)\n )\n return version\n\n return VERSION\n\n\nINSTALL_REQUIRES = [\n # third-party packages\n \"aiofiles\",\n \"argcomplete\",\n \"boto3\",\n \"cachetools\",\n \"dacite>=1.6.0,<1.8.0\",\n \"Deprecated\",\n \"ftfy\",\n \"hypercorn>=0.13.2\",\n \"importlib-metadata; python_version<'3.8'\",\n \"Jinja2>=3\",\n # kaleido indirectly required by plotly for image export\n # https://plotly.com/python/static-image-export/\n \"kaleido\",\n \"matplotlib\",\n \"mongoengine==0.24.2\",\n \"motor>=2.5\",\n \"numpy\",\n \"packaging\",\n \"pandas\",\n \"Pillow>=6.2\",\n \"plotly>=4.14\",\n \"pprintpp\",\n \"psutil\",\n \"pymongo>=3.12\",\n \"pytz\",\n \"PyYAML\",\n \"regex\",\n \"retrying\",\n \"scikit-learn\",\n \"scikit-image\",\n \"setuptools\",\n \"sseclient-py>=1.7.2,<2\",\n \"sse-starlette>=0.10.3,<1\",\n \"starlette>=0.24.0\",\n \"strawberry-graphql==0.138.1\",\n \"tabulate\",\n \"xmltodict\",\n \"universal-analytics-python3>=1.0.1,<2\",\n # internal packages\n \"fiftyone-brain>=0.13,<0.14\",\n \"fiftyone-db>=0.4,<0.5\",\n \"voxel51-eta>=0.10,<0.11\",\n]\n\n\nCHOOSE_INSTALL_REQUIRES = [\n (\n (\n \"opencv-python\",\n \"opencv-contrib-python\",\n \"opencv-contrib-python-headless\",\n ),\n \"opencv-python-headless\",\n )\n]\n\n\ndef choose_requirement(mains, secondary):\n chosen = secondary\n for main in mains:\n try:\n name = re.split(r\"[!<>=]\", main)[0]\n metadata.version(name)\n chosen = main\n break\n except metadata.PackageNotFoundError:\n pass\n\n return str(chosen)\n\n\ndef get_install_requirements(install_requires, choose_install_requires):\n for mains, secondary in choose_install_requires:\n install_requires.append(choose_requirement(mains, secondary))\n\n return install_requires\n\n\nEXTRAS_REQUIREMENTS = {\"desktop\": [\"fiftyone-desktop>=0.28.2,<0.29\"]}\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nsetup(\n name=\"fiftyone\",\n version=get_version(),\n description=(\n \"FiftyOne: the open-source tool for building high-quality datasets \"\n \"and computer vision models\"\n ),\n author=\"Voxel51, Inc.\",\n author_email=\"[email protected]\",\n url=\"https://github.com/voxel51/fiftyone\",\n extras_require=EXTRAS_REQUIREMENTS,\n license=\"Apache\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(\n exclude=[\"app\", \"eta\", \"package\", \"requirements\", \"tests\", \"tools\"]\n )\n + [\"fiftyone.recipes\", \"fiftyone.tutorials\"],\n package_dir={\n \"fiftyone.recipes\": \"docs/source/recipes\",\n \"fiftyone.tutorials\": \"docs/source/tutorials\",\n },\n install_requires=get_install_requirements(\n INSTALL_REQUIRES, CHOOSE_INSTALL_REQUIRES\n ),\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Processing\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n entry_points={\"console_scripts\": [\"fiftyone=fiftyone.core.cli:main\"]},\n python_requires=\">=3.7\",\n)\n", "path": "setup.py"}, {"content": null, "path": "app/packages/looker-3d/load_kiti.py"}, {"content": null, "path": "app/packages/looker-3d/load_kitti_labels.py"}]} | 2,721 | 728 |
gh_patches_debug_24836 | rasdani/github-patches | git_diff | sktime__sktime-4036 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] LSTM deep learning estimators failing CI on windows
Since recently, a failure of two deep learning estimators has been appearing on windows CI:
```
FAILED sktime/tests/test_all_estimators.py::TestAllEstimators::test_methods_have_no_side_effects[MLPClassifier-1-ClassifierFitPredictMultivariate-predict]
FAILED sktime/tests/test_all_estimators.py::TestAllEstimators::test_methods_have_no_side_effects[LSTMFCNClassifier-0-ClassifierFitPredictMultivariate-predict]
```
Note that the failure appears to be only on python 3.9, but this is due to the matrix design which spreads estimators across version/OS combinations.
In theory, it could be a 3.9 specific failure, but I think that is less likely than windows specifity (although it may be worth to test that by turning the `matrixdesign` flag off in the CI).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/classification/deep_learning/lstmfcn.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """LongShort Term Memory Fully Convolutional Network (LSTM-FCN)."""
3 __author__ = ["jnrusson1", "solen0id"]
4
5 __all__ = ["LSTMFCNClassifier"]
6
7 from sklearn.utils import check_random_state
8
9 from sktime.classification.deep_learning.base import BaseDeepClassifier
10 from sktime.networks.lstmfcn import LSTMFCNNetwork
11 from sktime.utils.validation._dependencies import _check_dl_dependencies
12
13 _check_dl_dependencies(severity="warning")
14
15
16 class LSTMFCNClassifier(BaseDeepClassifier):
17 """
18
19 Implementation of LSTMFCNClassifier from Karim et al (2019) [1].
20
21 Overview
22 --------
23 Combines an LSTM arm with a CNN arm. Optionally uses an attention mechanism in the
24 LSTM which the author indicates provides improved performance.
25
26
27 Parameters
28 ----------
29 n_epochs: int, default=2000
30 the number of epochs to train the model
31 batch_size: int, default=128
32 the number of samples per gradient update.
33 dropout: float, default=0.8
34 controls dropout rate of LSTM layer
35 kernel_sizes: list of ints, default=[8, 5, 3]
36 specifying the length of the 1D convolution windows
37 filter_sizes: int, list of ints, default=[128, 256, 128]
38 size of filter for each conv layer
39 lstm_size: int, default=8
40 output dimension for LSTM layer
41 attention: boolean, default=False
42 If True, uses custom attention LSTM layer
43 callbacks: keras callbacks, default=ReduceLRonPlateau
44 Keras callbacks to use such as learning rate reduction or saving best model
45 based on validation error
46 verbose: 'auto', 0, 1, or 2. Verbosity mode.
47 0 = silent, 1 = progress bar, 2 = one line per epoch.
48 'auto' defaults to 1 for most cases, but 2 when used with
49 `ParameterServerStrategy`. Note that the progress bar is not
50 particularly useful when logged to a file, so verbose=2 is
51 recommended when not running interactively (eg, in a production
52 environment).
53 random_state : int or None, default=None
54 Seed for random, integer.
55
56
57 Notes
58 -----
59 Ported from sktime-dl source code
60 https://github.com/sktime/sktime-dl/blob/master/sktime_dl/classification/_lstmfcn.py
61
62 References
63 ----------
64 .. [1] Karim et al. Multivariate LSTM-FCNs for Time Series Classification, 2019
65 https://arxiv.org/pdf/1801.04503.pdf
66
67 """
68
69 _tags = {"python_dependencies": "tensorflow"}
70
71 def __init__(
72 self,
73 n_epochs=100,
74 batch_size=128,
75 dropout=0.8,
76 kernel_sizes=(8, 5, 3),
77 filter_sizes=(128, 256, 128),
78 lstm_size=8,
79 attention=False,
80 callbacks=None,
81 random_state=None,
82 verbose=0,
83 ):
84
85 super(LSTMFCNClassifier, self).__init__()
86
87 # calced in fit
88 self.classes_ = None
89 self.input_shape = None
90 self.model_ = None
91 self.history = None
92
93 # predefined
94 self.n_epochs = n_epochs
95 self.batch_size = batch_size
96 self.kernel_sizes = kernel_sizes
97 self.filter_sizes = filter_sizes
98 self.lstm_size = lstm_size
99 self.dropout = dropout
100 self.attention = attention
101
102 self.callbacks = callbacks
103 self.random_state = random_state
104 self.verbose = verbose
105
106 self._network = LSTMFCNNetwork(
107 kernel_sizes=self.kernel_sizes,
108 filter_sizes=self.filter_sizes,
109 random_state=self.random_state,
110 lstm_size=self.lstm_size,
111 dropout=self.dropout,
112 attention=self.attention,
113 )
114 self._is_fitted = False
115
116 def build_model(self, input_shape, n_classes, **kwargs):
117 """
118 Construct a compiled, un-trained, keras model that is ready for training.
119
120 ----------
121 input_shape : tuple
122 The shape of the data fed into the input layer
123 n_classes: int
124 The number of classes, which shall become the size of the output
125 layer
126 Returns
127 -------
128 output : a compiled Keras Model
129 """
130 import tensorflow as tf
131 from tensorflow import keras
132
133 tf.random.set_seed(self.random_state)
134
135 input_layers, output_layer = self._network.build_network(input_shape, **kwargs)
136
137 output_layer = keras.layers.Dense(units=n_classes, activation="softmax")(
138 output_layer
139 )
140
141 model = keras.models.Model(inputs=input_layers, outputs=output_layer)
142
143 model.compile(
144 loss="categorical_crossentropy",
145 optimizer="adam",
146 metrics=["accuracy"],
147 )
148
149 if self.callbacks is None:
150 self._callbacks = []
151
152 return model
153
154 def _fit(self, X, y):
155 """
156 Fit the classifier on the training set (X, y).
157
158 ----------
159 X : a nested pd.Dataframe, or (if input_checks=False) array-like of
160 shape = (n_instances, series_length, n_dimensions)
161 The training input samples. If a 2D array-like is passed,
162 n_dimensions is assumed to be 1.
163 y : array-like, shape = [n_instances]
164 The training data class labels.
165 input_checks : boolean
166 whether to check the X and y parameters
167 validation_X : a nested pd.Dataframe, or array-like of shape =
168 (n_instances, series_length, n_dimensions)
169 The validation samples. If a 2D array-like is passed,
170 n_dimensions is assumed to be 1.
171 Unless strictly defined by the user via callbacks (such as
172 EarlyStopping), the presence or state of the validation
173 data does not alter training in any way. Predictions at each epoch
174 are stored in the model's fit history.
175 validation_y : array-like, shape = [n_instances]
176 The validation class labels.
177
178 Returns
179 -------
180 self : object
181 """
182 check_random_state(self.random_state)
183
184 y_onehot = self.convert_y_to_keras(y)
185
186 # Remove?
187 # Transpose to conform to Keras input style.
188 X = X.transpose(0, 2, 1)
189
190 # ignore the number of instances, X.shape[0],
191 # just want the shape of each instance
192 self.input_shape = X.shape[1:]
193
194 self.model_ = self.build_model(self.input_shape, self.n_classes_)
195
196 if self.verbose:
197 self.model_.summary()
198
199 self.history = self.model_.fit(
200 X,
201 y_onehot,
202 batch_size=self.batch_size,
203 epochs=self.n_epochs,
204 verbose=self.verbose,
205 callbacks=self._callbacks,
206 )
207
208 self._is_fitted = True
209
210 return self
211
212 @classmethod
213 def get_test_params(cls, parameter_set="default"):
214 """Return testing parameter settings for the estimator.
215
216 Parameters
217 ----------
218 parameter_set : str, default="default"
219 Name of the set of test parameters to return, for use in tests. If no
220 special parameters are defined for a value, will return `"default"` set.
221 For classifiers, a "default" set of parameters should be provided for
222 general testing, and a "results_comparison" set for comparing against
223 previously recorded results if the general set does not produce suitable
224 probabilities to compare against.
225
226 Returns
227 -------
228 params : dict or list of dict, default={}
229 Parameters to create testing instances of the class.
230 Each dict are parameters to construct an "interesting" test instance, i.e.,
231 `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
232 `create_test_instance` uses the first (or only) dictionary in `params`.
233 """
234 param1 = {
235 "n_epochs": 50,
236 }
237
238 param2 = {
239 "n_epochs": 100,
240 }
241
242 return [param1, param2]
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sktime/classification/deep_learning/lstmfcn.py b/sktime/classification/deep_learning/lstmfcn.py
--- a/sktime/classification/deep_learning/lstmfcn.py
+++ b/sktime/classification/deep_learning/lstmfcn.py
@@ -70,7 +70,7 @@
def __init__(
self,
- n_epochs=100,
+ n_epochs=2000,
batch_size=128,
dropout=0.8,
kernel_sizes=(8, 5, 3),
@@ -84,7 +84,6 @@
super(LSTMFCNClassifier, self).__init__()
- # calced in fit
self.classes_ = None
self.input_shape = None
self.model_ = None
@@ -232,11 +231,19 @@
`create_test_instance` uses the first (or only) dictionary in `params`.
"""
param1 = {
- "n_epochs": 50,
+ "n_epochs": 25,
+ "batch_size": 4,
+ "kernel_sizes": (3, 2, 1),
+ "filter_sizes": (2, 4, 2),
}
param2 = {
- "n_epochs": 100,
+ "n_epochs": 5,
+ "batch_size": 2,
+ "kernel_sizes": (3, 2, 1),
+ "filter_sizes": (2, 4, 2),
+ "lstm_size": 2,
+ "attention": True,
}
return [param1, param2]
| {"golden_diff": "diff --git a/sktime/classification/deep_learning/lstmfcn.py b/sktime/classification/deep_learning/lstmfcn.py\n--- a/sktime/classification/deep_learning/lstmfcn.py\n+++ b/sktime/classification/deep_learning/lstmfcn.py\n@@ -70,7 +70,7 @@\n \n def __init__(\n self,\n- n_epochs=100,\n+ n_epochs=2000,\n batch_size=128,\n dropout=0.8,\n kernel_sizes=(8, 5, 3),\n@@ -84,7 +84,6 @@\n \n super(LSTMFCNClassifier, self).__init__()\n \n- # calced in fit\n self.classes_ = None\n self.input_shape = None\n self.model_ = None\n@@ -232,11 +231,19 @@\n `create_test_instance` uses the first (or only) dictionary in `params`.\n \"\"\"\n param1 = {\n- \"n_epochs\": 50,\n+ \"n_epochs\": 25,\n+ \"batch_size\": 4,\n+ \"kernel_sizes\": (3, 2, 1),\n+ \"filter_sizes\": (2, 4, 2),\n }\n \n param2 = {\n- \"n_epochs\": 100,\n+ \"n_epochs\": 5,\n+ \"batch_size\": 2,\n+ \"kernel_sizes\": (3, 2, 1),\n+ \"filter_sizes\": (2, 4, 2),\n+ \"lstm_size\": 2,\n+ \"attention\": True,\n }\n \n return [param1, param2]\n", "issue": "[BUG] LSTM deep learning estimators failing CI on windows\nSince recently, a failure of two deep learning estimators has been appearing on windows CI:\r\n```\r\nFAILED sktime/tests/test_all_estimators.py::TestAllEstimators::test_methods_have_no_side_effects[MLPClassifier-1-ClassifierFitPredictMultivariate-predict]\r\nFAILED sktime/tests/test_all_estimators.py::TestAllEstimators::test_methods_have_no_side_effects[LSTMFCNClassifier-0-ClassifierFitPredictMultivariate-predict]\r\n```\r\n\r\nNote that the failure appears to be only on python 3.9, but this is due to the matrix design which spreads estimators across version/OS combinations.\r\n\r\nIn theory, it could be a 3.9 specific failure, but I think that is less likely than windows specifity (although it may be worth to test that by turning the `matrixdesign` flag off in the CI).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"LongShort Term Memory Fully Convolutional Network (LSTM-FCN).\"\"\"\n__author__ = [\"jnrusson1\", \"solen0id\"]\n\n__all__ = [\"LSTMFCNClassifier\"]\n\nfrom sklearn.utils import check_random_state\n\nfrom sktime.classification.deep_learning.base import BaseDeepClassifier\nfrom sktime.networks.lstmfcn import LSTMFCNNetwork\nfrom sktime.utils.validation._dependencies import _check_dl_dependencies\n\n_check_dl_dependencies(severity=\"warning\")\n\n\nclass LSTMFCNClassifier(BaseDeepClassifier):\n \"\"\"\n\n Implementation of LSTMFCNClassifier from Karim et al (2019) [1].\n\n Overview\n --------\n Combines an LSTM arm with a CNN arm. Optionally uses an attention mechanism in the\n LSTM which the author indicates provides improved performance.\n\n\n Parameters\n ----------\n n_epochs: int, default=2000\n the number of epochs to train the model\n batch_size: int, default=128\n the number of samples per gradient update.\n dropout: float, default=0.8\n controls dropout rate of LSTM layer\n kernel_sizes: list of ints, default=[8, 5, 3]\n specifying the length of the 1D convolution windows\n filter_sizes: int, list of ints, default=[128, 256, 128]\n size of filter for each conv layer\n lstm_size: int, default=8\n output dimension for LSTM layer\n attention: boolean, default=False\n If True, uses custom attention LSTM layer\n callbacks: keras callbacks, default=ReduceLRonPlateau\n Keras callbacks to use such as learning rate reduction or saving best model\n based on validation error\n verbose: 'auto', 0, 1, or 2. Verbosity mode.\n 0 = silent, 1 = progress bar, 2 = one line per epoch.\n 'auto' defaults to 1 for most cases, but 2 when used with\n `ParameterServerStrategy`. Note that the progress bar is not\n particularly useful when logged to a file, so verbose=2 is\n recommended when not running interactively (eg, in a production\n environment).\n random_state : int or None, default=None\n Seed for random, integer.\n\n\n Notes\n -----\n Ported from sktime-dl source code\n https://github.com/sktime/sktime-dl/blob/master/sktime_dl/classification/_lstmfcn.py\n\n References\n ----------\n .. [1] Karim et al. Multivariate LSTM-FCNs for Time Series Classification, 2019\n https://arxiv.org/pdf/1801.04503.pdf\n\n \"\"\"\n\n _tags = {\"python_dependencies\": \"tensorflow\"}\n\n def __init__(\n self,\n n_epochs=100,\n batch_size=128,\n dropout=0.8,\n kernel_sizes=(8, 5, 3),\n filter_sizes=(128, 256, 128),\n lstm_size=8,\n attention=False,\n callbacks=None,\n random_state=None,\n verbose=0,\n ):\n\n super(LSTMFCNClassifier, self).__init__()\n\n # calced in fit\n self.classes_ = None\n self.input_shape = None\n self.model_ = None\n self.history = None\n\n # predefined\n self.n_epochs = n_epochs\n self.batch_size = batch_size\n self.kernel_sizes = kernel_sizes\n self.filter_sizes = filter_sizes\n self.lstm_size = lstm_size\n self.dropout = dropout\n self.attention = attention\n\n self.callbacks = callbacks\n self.random_state = random_state\n self.verbose = verbose\n\n self._network = LSTMFCNNetwork(\n kernel_sizes=self.kernel_sizes,\n filter_sizes=self.filter_sizes,\n random_state=self.random_state,\n lstm_size=self.lstm_size,\n dropout=self.dropout,\n attention=self.attention,\n )\n self._is_fitted = False\n\n def build_model(self, input_shape, n_classes, **kwargs):\n \"\"\"\n Construct a compiled, un-trained, keras model that is ready for training.\n\n ----------\n input_shape : tuple\n The shape of the data fed into the input layer\n n_classes: int\n The number of classes, which shall become the size of the output\n layer\n Returns\n -------\n output : a compiled Keras Model\n \"\"\"\n import tensorflow as tf\n from tensorflow import keras\n\n tf.random.set_seed(self.random_state)\n\n input_layers, output_layer = self._network.build_network(input_shape, **kwargs)\n\n output_layer = keras.layers.Dense(units=n_classes, activation=\"softmax\")(\n output_layer\n )\n\n model = keras.models.Model(inputs=input_layers, outputs=output_layer)\n\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=[\"accuracy\"],\n )\n\n if self.callbacks is None:\n self._callbacks = []\n\n return model\n\n def _fit(self, X, y):\n \"\"\"\n Fit the classifier on the training set (X, y).\n\n ----------\n X : a nested pd.Dataframe, or (if input_checks=False) array-like of\n shape = (n_instances, series_length, n_dimensions)\n The training input samples. If a 2D array-like is passed,\n n_dimensions is assumed to be 1.\n y : array-like, shape = [n_instances]\n The training data class labels.\n input_checks : boolean\n whether to check the X and y parameters\n validation_X : a nested pd.Dataframe, or array-like of shape =\n (n_instances, series_length, n_dimensions)\n The validation samples. If a 2D array-like is passed,\n n_dimensions is assumed to be 1.\n Unless strictly defined by the user via callbacks (such as\n EarlyStopping), the presence or state of the validation\n data does not alter training in any way. Predictions at each epoch\n are stored in the model's fit history.\n validation_y : array-like, shape = [n_instances]\n The validation class labels.\n\n Returns\n -------\n self : object\n \"\"\"\n check_random_state(self.random_state)\n\n y_onehot = self.convert_y_to_keras(y)\n\n # Remove?\n # Transpose to conform to Keras input style.\n X = X.transpose(0, 2, 1)\n\n # ignore the number of instances, X.shape[0],\n # just want the shape of each instance\n self.input_shape = X.shape[1:]\n\n self.model_ = self.build_model(self.input_shape, self.n_classes_)\n\n if self.verbose:\n self.model_.summary()\n\n self.history = self.model_.fit(\n X,\n y_onehot,\n batch_size=self.batch_size,\n epochs=self.n_epochs,\n verbose=self.verbose,\n callbacks=self._callbacks,\n )\n\n self._is_fitted = True\n\n return self\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n For classifiers, a \"default\" set of parameters should be provided for\n general testing, and a \"results_comparison\" set for comparing against\n previously recorded results if the general set does not produce suitable\n probabilities to compare against.\n\n Returns\n -------\n params : dict or list of dict, default={}\n Parameters to create testing instances of the class.\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`.\n \"\"\"\n param1 = {\n \"n_epochs\": 50,\n }\n\n param2 = {\n \"n_epochs\": 100,\n }\n\n return [param1, param2]\n", "path": "sktime/classification/deep_learning/lstmfcn.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"LongShort Term Memory Fully Convolutional Network (LSTM-FCN).\"\"\"\n__author__ = [\"jnrusson1\", \"solen0id\"]\n\n__all__ = [\"LSTMFCNClassifier\"]\n\nfrom sklearn.utils import check_random_state\n\nfrom sktime.classification.deep_learning.base import BaseDeepClassifier\nfrom sktime.networks.lstmfcn import LSTMFCNNetwork\nfrom sktime.utils.validation._dependencies import _check_dl_dependencies\n\n_check_dl_dependencies(severity=\"warning\")\n\n\nclass LSTMFCNClassifier(BaseDeepClassifier):\n \"\"\"\n\n Implementation of LSTMFCNClassifier from Karim et al (2019) [1].\n\n Overview\n --------\n Combines an LSTM arm with a CNN arm. Optionally uses an attention mechanism in the\n LSTM which the author indicates provides improved performance.\n\n\n Parameters\n ----------\n n_epochs: int, default=2000\n the number of epochs to train the model\n batch_size: int, default=128\n the number of samples per gradient update.\n dropout: float, default=0.8\n controls dropout rate of LSTM layer\n kernel_sizes: list of ints, default=[8, 5, 3]\n specifying the length of the 1D convolution windows\n filter_sizes: int, list of ints, default=[128, 256, 128]\n size of filter for each conv layer\n lstm_size: int, default=8\n output dimension for LSTM layer\n attention: boolean, default=False\n If True, uses custom attention LSTM layer\n callbacks: keras callbacks, default=ReduceLRonPlateau\n Keras callbacks to use such as learning rate reduction or saving best model\n based on validation error\n verbose: 'auto', 0, 1, or 2. Verbosity mode.\n 0 = silent, 1 = progress bar, 2 = one line per epoch.\n 'auto' defaults to 1 for most cases, but 2 when used with\n `ParameterServerStrategy`. Note that the progress bar is not\n particularly useful when logged to a file, so verbose=2 is\n recommended when not running interactively (eg, in a production\n environment).\n random_state : int or None, default=None\n Seed for random, integer.\n\n\n Notes\n -----\n Ported from sktime-dl source code\n https://github.com/sktime/sktime-dl/blob/master/sktime_dl/classification/_lstmfcn.py\n\n References\n ----------\n .. [1] Karim et al. Multivariate LSTM-FCNs for Time Series Classification, 2019\n https://arxiv.org/pdf/1801.04503.pdf\n\n \"\"\"\n\n _tags = {\"python_dependencies\": \"tensorflow\"}\n\n def __init__(\n self,\n n_epochs=2000,\n batch_size=128,\n dropout=0.8,\n kernel_sizes=(8, 5, 3),\n filter_sizes=(128, 256, 128),\n lstm_size=8,\n attention=False,\n callbacks=None,\n random_state=None,\n verbose=0,\n ):\n\n super(LSTMFCNClassifier, self).__init__()\n\n self.classes_ = None\n self.input_shape = None\n self.model_ = None\n self.history = None\n\n # predefined\n self.n_epochs = n_epochs\n self.batch_size = batch_size\n self.kernel_sizes = kernel_sizes\n self.filter_sizes = filter_sizes\n self.lstm_size = lstm_size\n self.dropout = dropout\n self.attention = attention\n\n self.callbacks = callbacks\n self.random_state = random_state\n self.verbose = verbose\n\n self._network = LSTMFCNNetwork(\n kernel_sizes=self.kernel_sizes,\n filter_sizes=self.filter_sizes,\n random_state=self.random_state,\n lstm_size=self.lstm_size,\n dropout=self.dropout,\n attention=self.attention,\n )\n self._is_fitted = False\n\n def build_model(self, input_shape, n_classes, **kwargs):\n \"\"\"\n Construct a compiled, un-trained, keras model that is ready for training.\n\n ----------\n input_shape : tuple\n The shape of the data fed into the input layer\n n_classes: int\n The number of classes, which shall become the size of the output\n layer\n Returns\n -------\n output : a compiled Keras Model\n \"\"\"\n import tensorflow as tf\n from tensorflow import keras\n\n tf.random.set_seed(self.random_state)\n\n input_layers, output_layer = self._network.build_network(input_shape, **kwargs)\n\n output_layer = keras.layers.Dense(units=n_classes, activation=\"softmax\")(\n output_layer\n )\n\n model = keras.models.Model(inputs=input_layers, outputs=output_layer)\n\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=[\"accuracy\"],\n )\n\n if self.callbacks is None:\n self._callbacks = []\n\n return model\n\n def _fit(self, X, y):\n \"\"\"\n Fit the classifier on the training set (X, y).\n\n ----------\n X : a nested pd.Dataframe, or (if input_checks=False) array-like of\n shape = (n_instances, series_length, n_dimensions)\n The training input samples. If a 2D array-like is passed,\n n_dimensions is assumed to be 1.\n y : array-like, shape = [n_instances]\n The training data class labels.\n input_checks : boolean\n whether to check the X and y parameters\n validation_X : a nested pd.Dataframe, or array-like of shape =\n (n_instances, series_length, n_dimensions)\n The validation samples. If a 2D array-like is passed,\n n_dimensions is assumed to be 1.\n Unless strictly defined by the user via callbacks (such as\n EarlyStopping), the presence or state of the validation\n data does not alter training in any way. Predictions at each epoch\n are stored in the model's fit history.\n validation_y : array-like, shape = [n_instances]\n The validation class labels.\n\n Returns\n -------\n self : object\n \"\"\"\n check_random_state(self.random_state)\n\n y_onehot = self.convert_y_to_keras(y)\n\n # Remove?\n # Transpose to conform to Keras input style.\n X = X.transpose(0, 2, 1)\n\n # ignore the number of instances, X.shape[0],\n # just want the shape of each instance\n self.input_shape = X.shape[1:]\n\n self.model_ = self.build_model(self.input_shape, self.n_classes_)\n\n if self.verbose:\n self.model_.summary()\n\n self.history = self.model_.fit(\n X,\n y_onehot,\n batch_size=self.batch_size,\n epochs=self.n_epochs,\n verbose=self.verbose,\n callbacks=self._callbacks,\n )\n\n self._is_fitted = True\n\n return self\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n For classifiers, a \"default\" set of parameters should be provided for\n general testing, and a \"results_comparison\" set for comparing against\n previously recorded results if the general set does not produce suitable\n probabilities to compare against.\n\n Returns\n -------\n params : dict or list of dict, default={}\n Parameters to create testing instances of the class.\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`.\n \"\"\"\n param1 = {\n \"n_epochs\": 25,\n \"batch_size\": 4,\n \"kernel_sizes\": (3, 2, 1),\n \"filter_sizes\": (2, 4, 2),\n }\n\n param2 = {\n \"n_epochs\": 5,\n \"batch_size\": 2,\n \"kernel_sizes\": (3, 2, 1),\n \"filter_sizes\": (2, 4, 2),\n \"lstm_size\": 2,\n \"attention\": True,\n }\n\n return [param1, param2]\n", "path": "sktime/classification/deep_learning/lstmfcn.py"}]} | 2,886 | 378 |
gh_patches_debug_26026 | rasdani/github-patches | git_diff | python-discord__site-513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to parse the GitHub repository metadata from response!
Sentry Issue: [SITE-P](https://sentry.io/organizations/python-discord/issues/2093966668/?referrer=github_integration)
```
Unable to parse the GitHub repository metadata from response!
```
This is caused by us hitting github ratelimits, as we're not authenticating with the API.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydis_site/constants.py`
Content:
```
1 import os
2
3 GIT_SHA = os.environ.get("GIT_SHA", "development")
4
```
Path: `pydis_site/apps/home/views/home.py`
Content:
```
1 import logging
2 from typing import Dict, List
3
4 import requests
5 from django.core.handlers.wsgi import WSGIRequest
6 from django.http import HttpResponse
7 from django.shortcuts import render
8 from django.utils import timezone
9 from django.views import View
10
11 from pydis_site.apps.home.models import RepositoryMetadata
12
13 log = logging.getLogger(__name__)
14
15
16 class HomeView(View):
17 """The main landing page for the website."""
18
19 github_api = "https://api.github.com/users/python-discord/repos?per_page=100"
20 repository_cache_ttl = 3600
21
22 # Which of our GitHub repos should be displayed on the front page, and in which order?
23 repos = [
24 "python-discord/site",
25 "python-discord/bot",
26 "python-discord/snekbox",
27 "python-discord/sir-lancebot",
28 "python-discord/metricity",
29 "python-discord/django-simple-bulma",
30 ]
31
32 def __init__(self):
33 """Clean up stale RepositoryMetadata."""
34 RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()
35
36 def _get_api_data(self) -> Dict[str, Dict[str, str]]:
37 """
38 Call the GitHub API and get information about our repos.
39
40 If we're unable to get that info for any reason, return an empty dict.
41 """
42 repo_dict = {}
43
44 # Fetch the data from the GitHub API
45 api_data: List[dict] = requests.get(self.github_api).json()
46
47 # Process the API data into our dict
48 for repo in api_data:
49 try:
50 full_name = repo["full_name"]
51
52 if full_name in self.repos:
53 repo_dict[full_name] = {
54 "full_name": repo["full_name"],
55 "description": repo["description"],
56 "language": repo["language"],
57 "forks_count": repo["forks_count"],
58 "stargazers_count": repo["stargazers_count"],
59 }
60 # Something is not right about the API data we got back from GitHub.
61 except (TypeError, ConnectionError, KeyError) as e:
62 log.error(
63 "Unable to parse the GitHub repository metadata from response!",
64 extra={
65 'api_data': api_data,
66 'error': e
67 }
68 )
69 continue
70
71 return repo_dict
72
73 def _get_repo_data(self) -> List[RepositoryMetadata]:
74 """Build a list of RepositoryMetadata objects that we can use to populate the front page."""
75 database_repositories = []
76
77 # First, let's see if we have any metadata cached.
78 cached_data = RepositoryMetadata.objects.all()
79
80 # If we don't, we have to create some!
81 if not cached_data:
82
83 # Try to get new data from the API. If it fails, we'll return an empty list.
84 # In this case, we simply don't display our projects on the site.
85 api_repositories = self._get_api_data()
86
87 # Create all the repodata records in the database.
88 for api_data in api_repositories.values():
89 repo_data = RepositoryMetadata(
90 repo_name=api_data["full_name"],
91 description=api_data["description"],
92 forks=api_data["forks_count"],
93 stargazers=api_data["stargazers_count"],
94 language=api_data["language"],
95 )
96
97 repo_data.save()
98 database_repositories.append(repo_data)
99
100 return database_repositories
101
102 # If the data is stale, we should refresh it.
103 if (timezone.now() - cached_data[0].last_updated).seconds > self.repository_cache_ttl:
104 # Try to get new data from the API. If it fails, return the cached data.
105 api_repositories = self._get_api_data()
106
107 if not api_repositories:
108 return RepositoryMetadata.objects.all()
109
110 # Update or create all RepoData objects in self.repos
111 for repo_name, api_data in api_repositories.items():
112 try:
113 repo_data = RepositoryMetadata.objects.get(repo_name=repo_name)
114 repo_data.description = api_data["description"]
115 repo_data.language = api_data["language"]
116 repo_data.forks = api_data["forks_count"]
117 repo_data.stargazers = api_data["stargazers_count"]
118 except RepositoryMetadata.DoesNotExist:
119 repo_data = RepositoryMetadata(
120 repo_name=api_data["full_name"],
121 description=api_data["description"],
122 forks=api_data["forks_count"],
123 stargazers=api_data["stargazers_count"],
124 language=api_data["language"],
125 )
126 repo_data.save()
127 database_repositories.append(repo_data)
128 return database_repositories
129
130 # Otherwise, if the data is fresher than 2 minutes old, we should just return it.
131 else:
132 return RepositoryMetadata.objects.all()
133
134 def get(self, request: WSGIRequest) -> HttpResponse:
135 """Collect repo data and render the homepage view."""
136 repo_data = self._get_repo_data()
137 return render(request, "home/index.html", {"repo_data": repo_data})
138
139
140 def timeline(request: WSGIRequest) -> HttpResponse:
141 """Render timeline view."""
142 return render(request, 'home/timeline.html')
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydis_site/apps/home/views/home.py b/pydis_site/apps/home/views/home.py
--- a/pydis_site/apps/home/views/home.py
+++ b/pydis_site/apps/home/views/home.py
@@ -9,6 +9,7 @@
from django.views import View
from pydis_site.apps.home.models import RepositoryMetadata
+from pydis_site.constants import GITHUB_TOKEN
log = logging.getLogger(__name__)
@@ -18,6 +19,7 @@
github_api = "https://api.github.com/users/python-discord/repos?per_page=100"
repository_cache_ttl = 3600
+ headers = {"Authorization": f"token {GITHUB_TOKEN}"}
# Which of our GitHub repos should be displayed on the front page, and in which order?
repos = [
@@ -42,7 +44,7 @@
repo_dict = {}
# Fetch the data from the GitHub API
- api_data: List[dict] = requests.get(self.github_api).json()
+ api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()
# Process the API data into our dict
for repo in api_data:
diff --git a/pydis_site/constants.py b/pydis_site/constants.py
--- a/pydis_site/constants.py
+++ b/pydis_site/constants.py
@@ -1,3 +1,4 @@
import os
GIT_SHA = os.environ.get("GIT_SHA", "development")
+GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
| {"golden_diff": "diff --git a/pydis_site/apps/home/views/home.py b/pydis_site/apps/home/views/home.py\n--- a/pydis_site/apps/home/views/home.py\n+++ b/pydis_site/apps/home/views/home.py\n@@ -9,6 +9,7 @@\n from django.views import View\n \n from pydis_site.apps.home.models import RepositoryMetadata\n+from pydis_site.constants import GITHUB_TOKEN\n \n log = logging.getLogger(__name__)\n \n@@ -18,6 +19,7 @@\n \n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n+ headers = {\"Authorization\": f\"token {GITHUB_TOKEN}\"}\n \n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n@@ -42,7 +44,7 @@\n repo_dict = {}\n \n # Fetch the data from the GitHub API\n- api_data: List[dict] = requests.get(self.github_api).json()\n+ api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()\n \n # Process the API data into our dict\n for repo in api_data:\ndiff --git a/pydis_site/constants.py b/pydis_site/constants.py\n--- a/pydis_site/constants.py\n+++ b/pydis_site/constants.py\n@@ -1,3 +1,4 @@\n import os\n \n GIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\n+GITHUB_TOKEN = os.environ.get(\"GITHUB_TOKEN\")\n", "issue": "Unable to parse the GitHub repository metadata from response!\nSentry Issue: [SITE-P](https://sentry.io/organizations/python-discord/issues/2093966668/?referrer=github_integration)\n\n```\nUnable to parse the GitHub repository metadata from response!\n```\n\nThis is caused by us hitting github ratelimits, as we're not authenticating with the API.\n", "before_files": [{"content": "import os\n\nGIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\n", "path": "pydis_site/constants.py"}, {"content": "import logging\nfrom typing import Dict, List\n\nimport requests\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.views import View\n\nfrom pydis_site.apps.home.models import RepositoryMetadata\n\nlog = logging.getLogger(__name__)\n\n\nclass HomeView(View):\n \"\"\"The main landing page for the website.\"\"\"\n\n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n\n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n \"python-discord/site\",\n \"python-discord/bot\",\n \"python-discord/snekbox\",\n \"python-discord/sir-lancebot\",\n \"python-discord/metricity\",\n \"python-discord/django-simple-bulma\",\n ]\n\n def __init__(self):\n \"\"\"Clean up stale RepositoryMetadata.\"\"\"\n RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()\n\n def _get_api_data(self) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Call the GitHub API and get information about our repos.\n\n If we're unable to get that info for any reason, return an empty dict.\n \"\"\"\n repo_dict = {}\n\n # Fetch the data from the GitHub API\n api_data: List[dict] = requests.get(self.github_api).json()\n\n # Process the API data into our dict\n for repo in api_data:\n try:\n full_name = repo[\"full_name\"]\n\n if full_name in self.repos:\n repo_dict[full_name] = {\n \"full_name\": repo[\"full_name\"],\n \"description\": repo[\"description\"],\n \"language\": repo[\"language\"],\n \"forks_count\": repo[\"forks_count\"],\n \"stargazers_count\": repo[\"stargazers_count\"],\n }\n # Something is not right about the API data we got back from GitHub.\n except (TypeError, ConnectionError, KeyError) as e:\n log.error(\n \"Unable to parse the GitHub repository metadata from response!\",\n extra={\n 'api_data': api_data,\n 'error': e\n }\n )\n continue\n\n return repo_dict\n\n def _get_repo_data(self) -> List[RepositoryMetadata]:\n \"\"\"Build a list of RepositoryMetadata objects that we can use to populate the front page.\"\"\"\n database_repositories = []\n\n # First, let's see if we have any metadata cached.\n cached_data = RepositoryMetadata.objects.all()\n\n # If we don't, we have to create some!\n if not cached_data:\n\n # Try to get new data from the API. If it fails, we'll return an empty list.\n # In this case, we simply don't display our projects on the site.\n api_repositories = self._get_api_data()\n\n # Create all the repodata records in the database.\n for api_data in api_repositories.values():\n repo_data = RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n\n repo_data.save()\n database_repositories.append(repo_data)\n\n return database_repositories\n\n # If the data is stale, we should refresh it.\n if (timezone.now() - cached_data[0].last_updated).seconds > self.repository_cache_ttl:\n # Try to get new data from the API. If it fails, return the cached data.\n api_repositories = self._get_api_data()\n\n if not api_repositories:\n return RepositoryMetadata.objects.all()\n\n # Update or create all RepoData objects in self.repos\n for repo_name, api_data in api_repositories.items():\n try:\n repo_data = RepositoryMetadata.objects.get(repo_name=repo_name)\n repo_data.description = api_data[\"description\"]\n repo_data.language = api_data[\"language\"]\n repo_data.forks = api_data[\"forks_count\"]\n repo_data.stargazers = api_data[\"stargazers_count\"]\n except RepositoryMetadata.DoesNotExist:\n repo_data = RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n repo_data.save()\n database_repositories.append(repo_data)\n return database_repositories\n\n # Otherwise, if the data is fresher than 2 minutes old, we should just return it.\n else:\n return RepositoryMetadata.objects.all()\n\n def get(self, request: WSGIRequest) -> HttpResponse:\n \"\"\"Collect repo data and render the homepage view.\"\"\"\n repo_data = self._get_repo_data()\n return render(request, \"home/index.html\", {\"repo_data\": repo_data})\n\n\ndef timeline(request: WSGIRequest) -> HttpResponse:\n \"\"\"Render timeline view.\"\"\"\n return render(request, 'home/timeline.html')\n", "path": "pydis_site/apps/home/views/home.py"}], "after_files": [{"content": "import os\n\nGIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\nGITHUB_TOKEN = os.environ.get(\"GITHUB_TOKEN\")\n", "path": "pydis_site/constants.py"}, {"content": "import logging\nfrom typing import Dict, List\n\nimport requests\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.views import View\n\nfrom pydis_site.apps.home.models import RepositoryMetadata\nfrom pydis_site.constants import GITHUB_TOKEN\n\nlog = logging.getLogger(__name__)\n\n\nclass HomeView(View):\n \"\"\"The main landing page for the website.\"\"\"\n\n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n headers = {\"Authorization\": f\"token {GITHUB_TOKEN}\"}\n\n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n \"python-discord/site\",\n \"python-discord/bot\",\n \"python-discord/snekbox\",\n \"python-discord/sir-lancebot\",\n \"python-discord/metricity\",\n \"python-discord/django-simple-bulma\",\n ]\n\n def __init__(self):\n \"\"\"Clean up stale RepositoryMetadata.\"\"\"\n RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()\n\n def _get_api_data(self) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Call the GitHub API and get information about our repos.\n\n If we're unable to get that info for any reason, return an empty dict.\n \"\"\"\n repo_dict = {}\n\n # Fetch the data from the GitHub API\n api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()\n\n # Process the API data into our dict\n for repo in api_data:\n try:\n full_name = repo[\"full_name\"]\n\n if full_name in self.repos:\n repo_dict[full_name] = {\n \"full_name\": repo[\"full_name\"],\n \"description\": repo[\"description\"],\n \"language\": repo[\"language\"],\n \"forks_count\": repo[\"forks_count\"],\n \"stargazers_count\": repo[\"stargazers_count\"],\n }\n # Something is not right about the API data we got back from GitHub.\n except (TypeError, ConnectionError, KeyError) as e:\n log.error(\n \"Unable to parse the GitHub repository metadata from response!\",\n extra={\n 'api_data': api_data,\n 'error': e\n }\n )\n continue\n\n return repo_dict\n\n def _get_repo_data(self) -> List[RepositoryMetadata]:\n \"\"\"Build a list of RepositoryMetadata objects that we can use to populate the front page.\"\"\"\n database_repositories = []\n\n # First, let's see if we have any metadata cached.\n cached_data = RepositoryMetadata.objects.all()\n\n # If we don't, we have to create some!\n if not cached_data:\n\n # Try to get new data from the API. If it fails, we'll return an empty list.\n # In this case, we simply don't display our projects on the site.\n api_repositories = self._get_api_data()\n\n # Create all the repodata records in the database.\n for api_data in api_repositories.values():\n repo_data = RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n\n repo_data.save()\n database_repositories.append(repo_data)\n\n return database_repositories\n\n # If the data is stale, we should refresh it.\n if (timezone.now() - cached_data[0].last_updated).seconds > self.repository_cache_ttl:\n # Try to get new data from the API. If it fails, return the cached data.\n api_repositories = self._get_api_data()\n\n if not api_repositories:\n return RepositoryMetadata.objects.all()\n\n # Update or create all RepoData objects in self.repos\n for repo_name, api_data in api_repositories.items():\n try:\n repo_data = RepositoryMetadata.objects.get(repo_name=repo_name)\n repo_data.description = api_data[\"description\"]\n repo_data.language = api_data[\"language\"]\n repo_data.forks = api_data[\"forks_count\"]\n repo_data.stargazers = api_data[\"stargazers_count\"]\n except RepositoryMetadata.DoesNotExist:\n repo_data = RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n repo_data.save()\n database_repositories.append(repo_data)\n return database_repositories\n\n # Otherwise, if the data is fresher than 2 minutes old, we should just return it.\n else:\n return RepositoryMetadata.objects.all()\n\n def get(self, request: WSGIRequest) -> HttpResponse:\n \"\"\"Collect repo data and render the homepage view.\"\"\"\n repo_data = self._get_repo_data()\n return render(request, \"home/index.html\", {\"repo_data\": repo_data})\n\n\ndef timeline(request: WSGIRequest) -> HttpResponse:\n \"\"\"Render timeline view.\"\"\"\n return render(request, 'home/timeline.html')\n", "path": "pydis_site/apps/home/views/home.py"}]} | 1,814 | 335 |
gh_patches_debug_3554 | rasdani/github-patches | git_diff | microsoft__torchgeo-2000 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Auto download fails for FireRisk
### Description
Auto download fails for the FireRisk dataset hosted on Google Drive.
Warning and error:
```bash
/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py:260: UserWarning: We detected some HTML elements in the downloaded file. This most likely means that the dow
nload triggered an unhandled API response by GDrive. Please report this to torchvision at https://github.com/pytorch/vision/issues including the response:
<!DOCTYPE html><html><head><title>Google Drive - Virus scan warning</title><meta http-equiv="content-type" content="text/html; charset=utf-8"/><style nonce="Udd3l48zF0spb_ikIDzQdw">.goog-link-button{position:rel
ative;color:#15c;text-decoration:underline;cursor:pointer}.goog-link-button-disabled{color:#ccc;text-decoration:none;cursor:default}body{color:#222;font:normal 13px/1.4 arial,sans-serif;margin:0}.grecaptcha-badg
e{visibility:hidden}.uc-main{padding-top:50px;text-align:center}#uc-dl-icon{display:inline-block;margin-top:16px;padding-right:1em;vertical-align:top}#uc-text{display:inline-block;max-width:68ex;text-align:left}.uc-error-caption,.uc-warning-caption{color:#222;font-size:16px}#uc-download-link{text-decoration:none}.uc-name-size a{color:#15c;text-decoration:none}.uc-name-size a:visited{color:#61c;text-decoration:none}.uc-name-size a:active{color:#d14836;text-decoration:none}.uc-footer{color:#777;font-size:11px;padding-bottom:5ex;padding-top:5ex;text-align:center}.uc-footer a{color:#15c}.uc-footer a:visited{color:#61c}.uc-footer a:active{color:#d14836}.uc-footer-divider{color:#ccc;width:100%}.goog-inline-block{position:relative;display:-moz-inline-box;display:inline-block}* html .goog-inline-block{display:inline}*:first-child+html .goog-inline-block{display:inline}sentinel{}</style><link rel="icon" href="//ssl.gstatic.com/docs/doclist/images/drive_2022q3_32dp.png"/></head><body><div class="uc-main"><div id="uc-dl-icon" class="image-container"><div class="drive-sprite-aux-download-file"></div></div><div id="uc-text"><p class="uc-warning-caption">Google Drive can't scan this file for viruses.</p><p class="uc-warning-subcaption"><span class="uc-name-size"><a href="/open?id=1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP">FireRisk.zip</a> (14G)</span> is too large for Google to scan for viruses. Would you still like to download this file?</p><form id="download-form" action="https://drive.usercontent.google.com/download" method="get"><input type="submit" id="uc-download-link" class="goog-inline-block jfk-button jfk-button-action" value="Download anyway"/><input type="hidden" name="id" value="1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP"><input type="hidden" name="export" value="download"><input type="hidden" name="confirm" value="t"><input type="hidden" name="uuid" value="c4203717-b28d-4640-8d59-e9f5d88a2120"></form></div></div><div class="uc-footer"><hr class="uc-footer-divider"></div></body></html>
warnings.warn(
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jb/code/torchgeo/slip/datasets/firerisk.py", line 25, in __init__
super().__init__(root=root, split=split, download=download, checksum=checksum)
File "/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py", line 94, in __init__
self._verify()
File "/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py", line 126, in _verify
self._download()
File "/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py", line 131, in _download
download_url(
File "/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py", line 139, in download_url
return download_file_from_google_drive(file_id, root, filename, md5)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py", line 268, in download_file_from_google_drive
raise RuntimeError(
RuntimeError: The MD5 checksum of the download file /data/labeleff/datasets/firerisk/FireRisk.zip does not match the one on record.Please delete the file and try again. If the issue persists, please report this to torchvision at https://github.com/pytorch/vision/issues.
```
### Steps to reproduce
```python
from torchgeo.datasets import FireRisk
dataset = FireRisk(download=True, checksum=True)
```
### Version
0.5.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchgeo/datasets/fire_risk.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """FireRisk dataset."""
5
6 import os
7 from collections.abc import Callable
8 from typing import cast
9
10 import matplotlib.pyplot as plt
11 from matplotlib.figure import Figure
12 from torch import Tensor
13
14 from .geo import NonGeoClassificationDataset
15 from .utils import DatasetNotFoundError, download_url, extract_archive
16
17
18 class FireRisk(NonGeoClassificationDataset):
19 """FireRisk dataset.
20
21 The `FireRisk <https://github.com/CharmonyShen/FireRisk>`__
22 dataset is a dataset for remote sensing fire risk classification.
23
24 Dataset features:
25
26 * 91,872 images with 1 m per pixel resolution (320x320 px)
27 * 70,331 and 21,541 train and val images, respectively
28 * three spectral bands - RGB
29 * 7 fire risk classes
30 * images extracted from NAIP tiles
31
32 Dataset format:
33
34 * images are three-channel pngs
35
36 Dataset classes:
37
38 0. high
39 1. low
40 2. moderate
41 3. non-burnable
42 4. very_high
43 5. very_low
44 6. water
45
46 If you use this dataset in your research, please cite the following paper:
47
48 * https://arxiv.org/abs/2303.07035
49
50 .. versionadded:: 0.5
51 """
52
53 url = "https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP"
54 md5 = "a77b9a100d51167992ae8c51d26198a6"
55 filename = "FireRisk.zip"
56 directory = "FireRisk"
57 splits = ["train", "val"]
58 classes = [
59 "High",
60 "Low",
61 "Moderate",
62 "Non-burnable",
63 "Very_High",
64 "Very_Low",
65 "Water",
66 ]
67
68 def __init__(
69 self,
70 root: str = "data",
71 split: str = "train",
72 transforms: Callable[[dict[str, Tensor]], dict[str, Tensor]] | None = None,
73 download: bool = False,
74 checksum: bool = False,
75 ) -> None:
76 """Initialize a new FireRisk dataset instance.
77
78 Args:
79 root: root directory where dataset can be found
80 split: one of "train" or "val"
81 transforms: a function/transform that takes input sample and its target as
82 entry and returns a transformed version
83 download: if True, download dataset and store it in the root directory
84 checksum: if True, check the MD5 of the downloaded files (may be slow)
85
86 Raises:
87 AssertionError: if ``split`` argument is invalid
88 DatasetNotFoundError: If dataset is not found and *download* is False.
89 """
90 assert split in self.splits
91 self.root = root
92 self.split = split
93 self.download = download
94 self.checksum = checksum
95 self._verify()
96
97 super().__init__(
98 root=os.path.join(root, self.directory, self.split), transforms=transforms
99 )
100
101 def _verify(self) -> None:
102 """Verify the integrity of the dataset."""
103 # Check if the files already exist
104 path = os.path.join(self.root, self.directory)
105 if os.path.exists(path):
106 return
107
108 # Check if zip file already exists (if so then extract)
109 filepath = os.path.join(self.root, self.filename)
110 if os.path.exists(filepath):
111 self._extract()
112 return
113
114 # Check if the user requested to download the dataset
115 if not self.download:
116 raise DatasetNotFoundError(self)
117
118 # Download and extract the dataset
119 self._download()
120 self._extract()
121
122 def _download(self) -> None:
123 """Download the dataset."""
124 download_url(
125 self.url,
126 self.root,
127 filename=self.filename,
128 md5=self.md5 if self.checksum else None,
129 )
130
131 def _extract(self) -> None:
132 """Extract the dataset."""
133 filepath = os.path.join(self.root, self.filename)
134 extract_archive(filepath)
135
136 def plot(
137 self,
138 sample: dict[str, Tensor],
139 show_titles: bool = True,
140 suptitle: str | None = None,
141 ) -> Figure:
142 """Plot a sample from the dataset.
143
144 Args:
145 sample: a sample returned by :meth:`NonGeoClassificationDataset.__getitem__`
146 show_titles: flag indicating whether to show titles above each panel
147 suptitle: optional string to use as a suptitle
148
149 Returns:
150 a matplotlib Figure with the rendered sample
151 """
152 image = sample["image"].permute((1, 2, 0)).numpy()
153 label = cast(int, sample["label"].item())
154 label_class = self.classes[label]
155
156 showing_predictions = "prediction" in sample
157 if showing_predictions:
158 prediction = cast(int, sample["prediction"].item())
159 prediction_class = self.classes[prediction]
160
161 fig, ax = plt.subplots(figsize=(4, 4))
162 ax.imshow(image)
163 ax.axis("off")
164 if show_titles:
165 title = f"Label: {label_class}"
166 if showing_predictions:
167 title += f"\nPrediction: {prediction_class}"
168 ax.set_title(title)
169
170 if suptitle is not None:
171 plt.suptitle(suptitle)
172 return fig
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchgeo/datasets/fire_risk.py b/torchgeo/datasets/fire_risk.py
--- a/torchgeo/datasets/fire_risk.py
+++ b/torchgeo/datasets/fire_risk.py
@@ -50,7 +50,7 @@
.. versionadded:: 0.5
"""
- url = "https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP"
+ url = "https://hf.co/datasets/torchgeo/fire_risk/resolve/e6046a04350c6f1ab4ad791fb3a40bf8940be269/FireRisk.zip"
md5 = "a77b9a100d51167992ae8c51d26198a6"
filename = "FireRisk.zip"
directory = "FireRisk"
| {"golden_diff": "diff --git a/torchgeo/datasets/fire_risk.py b/torchgeo/datasets/fire_risk.py\n--- a/torchgeo/datasets/fire_risk.py\n+++ b/torchgeo/datasets/fire_risk.py\n@@ -50,7 +50,7 @@\n .. versionadded:: 0.5\n \"\"\"\n \n- url = \"https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\"\n+ url = \"https://hf.co/datasets/torchgeo/fire_risk/resolve/e6046a04350c6f1ab4ad791fb3a40bf8940be269/FireRisk.zip\"\n md5 = \"a77b9a100d51167992ae8c51d26198a6\"\n filename = \"FireRisk.zip\"\n directory = \"FireRisk\"\n", "issue": "Auto download fails for FireRisk\n### Description\n\nAuto download fails for the FireRisk dataset hosted on Google Drive.\r\n\r\nWarning and error:\r\n```bash\r\n/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py:260: UserWarning: We detected some HTML elements in the downloaded file. This most likely means that the dow\r\nnload triggered an unhandled API response by GDrive. Please report this to torchvision at https://github.com/pytorch/vision/issues including the response:\r\n\r\n<!DOCTYPE html><html><head><title>Google Drive - Virus scan warning</title><meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\"/><style nonce=\"Udd3l48zF0spb_ikIDzQdw\">.goog-link-button{position:rel\r\native;color:#15c;text-decoration:underline;cursor:pointer}.goog-link-button-disabled{color:#ccc;text-decoration:none;cursor:default}body{color:#222;font:normal 13px/1.4 arial,sans-serif;margin:0}.grecaptcha-badg\r\ne{visibility:hidden}.uc-main{padding-top:50px;text-align:center}#uc-dl-icon{display:inline-block;margin-top:16px;padding-right:1em;vertical-align:top}#uc-text{display:inline-block;max-width:68ex;text-align:left}.uc-error-caption,.uc-warning-caption{color:#222;font-size:16px}#uc-download-link{text-decoration:none}.uc-name-size a{color:#15c;text-decoration:none}.uc-name-size a:visited{color:#61c;text-decoration:none}.uc-name-size a:active{color:#d14836;text-decoration:none}.uc-footer{color:#777;font-size:11px;padding-bottom:5ex;padding-top:5ex;text-align:center}.uc-footer a{color:#15c}.uc-footer a:visited{color:#61c}.uc-footer a:active{color:#d14836}.uc-footer-divider{color:#ccc;width:100%}.goog-inline-block{position:relative;display:-moz-inline-box;display:inline-block}* html .goog-inline-block{display:inline}*:first-child+html .goog-inline-block{display:inline}sentinel{}</style><link rel=\"icon\" href=\"//ssl.gstatic.com/docs/doclist/images/drive_2022q3_32dp.png\"/></head><body><div class=\"uc-main\"><div id=\"uc-dl-icon\" class=\"image-container\"><div class=\"drive-sprite-aux-download-file\"></div></div><div id=\"uc-text\"><p class=\"uc-warning-caption\">Google Drive can't scan this file for viruses.</p><p class=\"uc-warning-subcaption\"><span class=\"uc-name-size\"><a href=\"/open?id=1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\">FireRisk.zip</a> (14G)</span> is too large for Google to scan for viruses. Would you still like to download this file?</p><form id=\"download-form\" action=\"https://drive.usercontent.google.com/download\" method=\"get\"><input type=\"submit\" id=\"uc-download-link\" class=\"goog-inline-block jfk-button jfk-button-action\" value=\"Download anyway\"/><input type=\"hidden\" name=\"id\" value=\"1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\"><input type=\"hidden\" name=\"export\" value=\"download\"><input type=\"hidden\" name=\"confirm\" value=\"t\"><input type=\"hidden\" name=\"uuid\" value=\"c4203717-b28d-4640-8d59-e9f5d88a2120\"></form></div></div><div class=\"uc-footer\"><hr class=\"uc-footer-divider\"></div></body></html>\r\n warnings.warn(\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/jb/code/torchgeo/slip/datasets/firerisk.py\", line 25, in __init__\r\n super().__init__(root=root, split=split, download=download, checksum=checksum)\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\", line 94, in __init__\r\n self._verify()\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\", line 126, in _verify\r\n self._download()\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchgeo/datasets/fire_risk.py\", line 131, in _download\r\n download_url(\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py\", line 139, in download_url\r\n return download_file_from_google_drive(file_id, root, filename, md5)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/jb/miniconda3/envs/torchgeo/lib/python3.11/site-packages/torchvision/datasets/utils.py\", line 268, in download_file_from_google_drive\r\n raise RuntimeError(\r\nRuntimeError: The MD5 checksum of the download file /data/labeleff/datasets/firerisk/FireRisk.zip does not match the one on record.Please delete the file and try again. If the issue persists, please report this to torchvision at https://github.com/pytorch/vision/issues.\r\n```\n\n### Steps to reproduce\n\n```python\r\nfrom torchgeo.datasets import FireRisk\r\ndataset = FireRisk(download=True, checksum=True)\r\n```\n\n### Version\n\n0.5.1\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"FireRisk dataset.\"\"\"\n\nimport os\nfrom collections.abc import Callable\nfrom typing import cast\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\n\nfrom .geo import NonGeoClassificationDataset\nfrom .utils import DatasetNotFoundError, download_url, extract_archive\n\n\nclass FireRisk(NonGeoClassificationDataset):\n \"\"\"FireRisk dataset.\n\n The `FireRisk <https://github.com/CharmonyShen/FireRisk>`__\n dataset is a dataset for remote sensing fire risk classification.\n\n Dataset features:\n\n * 91,872 images with 1 m per pixel resolution (320x320 px)\n * 70,331 and 21,541 train and val images, respectively\n * three spectral bands - RGB\n * 7 fire risk classes\n * images extracted from NAIP tiles\n\n Dataset format:\n\n * images are three-channel pngs\n\n Dataset classes:\n\n 0. high\n 1. low\n 2. moderate\n 3. non-burnable\n 4. very_high\n 5. very_low\n 6. water\n\n If you use this dataset in your research, please cite the following paper:\n\n * https://arxiv.org/abs/2303.07035\n\n .. versionadded:: 0.5\n \"\"\"\n\n url = \"https://drive.google.com/file/d/1J5GrJJPLWkpuptfY_kgqkiDtcSNP88OP\"\n md5 = \"a77b9a100d51167992ae8c51d26198a6\"\n filename = \"FireRisk.zip\"\n directory = \"FireRisk\"\n splits = [\"train\", \"val\"]\n classes = [\n \"High\",\n \"Low\",\n \"Moderate\",\n \"Non-burnable\",\n \"Very_High\",\n \"Very_Low\",\n \"Water\",\n ]\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n transforms: Callable[[dict[str, Tensor]], dict[str, Tensor]] | None = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new FireRisk dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: one of \"train\" or \"val\"\n transforms: a function/transform that takes input sample and its target as\n entry and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 of the downloaded files (may be slow)\n\n Raises:\n AssertionError: if ``split`` argument is invalid\n DatasetNotFoundError: If dataset is not found and *download* is False.\n \"\"\"\n assert split in self.splits\n self.root = root\n self.split = split\n self.download = download\n self.checksum = checksum\n self._verify()\n\n super().__init__(\n root=os.path.join(root, self.directory, self.split), transforms=transforms\n )\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\"\"\"\n # Check if the files already exist\n path = os.path.join(self.root, self.directory)\n if os.path.exists(path):\n return\n\n # Check if zip file already exists (if so then extract)\n filepath = os.path.join(self.root, self.filename)\n if os.path.exists(filepath):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise DatasetNotFoundError(self)\n\n # Download and extract the dataset\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset.\"\"\"\n download_url(\n self.url,\n self.root,\n filename=self.filename,\n md5=self.md5 if self.checksum else None,\n )\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n filepath = os.path.join(self.root, self.filename)\n extract_archive(filepath)\n\n def plot(\n self,\n sample: dict[str, Tensor],\n show_titles: bool = True,\n suptitle: str | None = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`NonGeoClassificationDataset.__getitem__`\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n \"\"\"\n image = sample[\"image\"].permute((1, 2, 0)).numpy()\n label = cast(int, sample[\"label\"].item())\n label_class = self.classes[label]\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n prediction = cast(int, sample[\"prediction\"].item())\n prediction_class = self.classes[prediction]\n\n fig, ax = plt.subplots(figsize=(4, 4))\n ax.imshow(image)\n ax.axis(\"off\")\n if show_titles:\n title = f\"Label: {label_class}\"\n if showing_predictions:\n title += f\"\\nPrediction: {prediction_class}\"\n ax.set_title(title)\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n return fig\n", "path": "torchgeo/datasets/fire_risk.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"FireRisk dataset.\"\"\"\n\nimport os\nfrom collections.abc import Callable\nfrom typing import cast\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\n\nfrom .geo import NonGeoClassificationDataset\nfrom .utils import DatasetNotFoundError, download_url, extract_archive\n\n\nclass FireRisk(NonGeoClassificationDataset):\n \"\"\"FireRisk dataset.\n\n The `FireRisk <https://github.com/CharmonyShen/FireRisk>`__\n dataset is a dataset for remote sensing fire risk classification.\n\n Dataset features:\n\n * 91,872 images with 1 m per pixel resolution (320x320 px)\n * 70,331 and 21,541 train and val images, respectively\n * three spectral bands - RGB\n * 7 fire risk classes\n * images extracted from NAIP tiles\n\n Dataset format:\n\n * images are three-channel pngs\n\n Dataset classes:\n\n 0. high\n 1. low\n 2. moderate\n 3. non-burnable\n 4. very_high\n 5. very_low\n 6. water\n\n If you use this dataset in your research, please cite the following paper:\n\n * https://arxiv.org/abs/2303.07035\n\n .. versionadded:: 0.5\n \"\"\"\n\n url = \"https://hf.co/datasets/torchgeo/fire_risk/resolve/e6046a04350c6f1ab4ad791fb3a40bf8940be269/FireRisk.zip\"\n md5 = \"a77b9a100d51167992ae8c51d26198a6\"\n filename = \"FireRisk.zip\"\n directory = \"FireRisk\"\n splits = [\"train\", \"val\"]\n classes = [\n \"High\",\n \"Low\",\n \"Moderate\",\n \"Non-burnable\",\n \"Very_High\",\n \"Very_Low\",\n \"Water\",\n ]\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n transforms: Callable[[dict[str, Tensor]], dict[str, Tensor]] | None = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new FireRisk dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: one of \"train\" or \"val\"\n transforms: a function/transform that takes input sample and its target as\n entry and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 of the downloaded files (may be slow)\n\n Raises:\n AssertionError: if ``split`` argument is invalid\n DatasetNotFoundError: If dataset is not found and *download* is False.\n \"\"\"\n assert split in self.splits\n self.root = root\n self.split = split\n self.download = download\n self.checksum = checksum\n self._verify()\n\n super().__init__(\n root=os.path.join(root, self.directory, self.split), transforms=transforms\n )\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\"\"\"\n # Check if the files already exist\n path = os.path.join(self.root, self.directory)\n if os.path.exists(path):\n return\n\n # Check if zip file already exists (if so then extract)\n filepath = os.path.join(self.root, self.filename)\n if os.path.exists(filepath):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise DatasetNotFoundError(self)\n\n # Download and extract the dataset\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset.\"\"\"\n download_url(\n self.url,\n self.root,\n filename=self.filename,\n md5=self.md5 if self.checksum else None,\n )\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n filepath = os.path.join(self.root, self.filename)\n extract_archive(filepath)\n\n def plot(\n self,\n sample: dict[str, Tensor],\n show_titles: bool = True,\n suptitle: str | None = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`NonGeoClassificationDataset.__getitem__`\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n \"\"\"\n image = sample[\"image\"].permute((1, 2, 0)).numpy()\n label = cast(int, sample[\"label\"].item())\n label_class = self.classes[label]\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n prediction = cast(int, sample[\"prediction\"].item())\n prediction_class = self.classes[prediction]\n\n fig, ax = plt.subplots(figsize=(4, 4))\n ax.imshow(image)\n ax.axis(\"off\")\n if show_titles:\n title = f\"Label: {label_class}\"\n if showing_predictions:\n title += f\"\\nPrediction: {prediction_class}\"\n ax.set_title(title)\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n return fig\n", "path": "torchgeo/datasets/fire_risk.py"}]} | 3,199 | 221 |
gh_patches_debug_37959 | rasdani/github-patches | git_diff | openvinotoolkit__datumaro-371 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Import ImageNet dataset
### Steps to reproduce problem
1. Download and extract ImageNet dataset for image classification: [link](https://www.kaggle.com/c/imagenet-object-localization-challenge/data);
2. Add the loaded dataset into a Datumaro project;
3. Run `datum info`.
### Current behaviour
ImageNet dataset has ~1.2m images, but in the `info` output we can see that imported dataset has only 69647, and also these images have wrong labels.
### Expected behaviour
Correct import.
### Environment
`git log -1`: 7e35c8
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datumaro/plugins/imagenet_format.py`
Content:
```
1 # Copyright (C) 2020 Intel Corporation
2 #
3 # SPDX-License-Identifier: MIT
4
5 import logging as log
6 import os
7 import os.path as osp
8
9 from datumaro.components.converter import Converter
10 from datumaro.components.extractor import (
11 AnnotationType, DatasetItem, Importer, Label, LabelCategories,
12 SourceExtractor,
13 )
14 from datumaro.util.image import find_images
15
16
17 class ImagenetPath:
18 IMAGE_DIR_NO_LABEL = 'no_label'
19
20
21 class ImagenetExtractor(SourceExtractor):
22 def __init__(self, path, subset=None):
23 assert osp.isdir(path), path
24 super().__init__(subset=subset)
25
26 self._categories = self._load_categories(path)
27 self._items = list(self._load_items(path).values())
28
29 def _load_categories(self, path):
30 label_cat = LabelCategories()
31 for dirname in sorted(os.listdir(path)):
32 if dirname != ImagenetPath.IMAGE_DIR_NO_LABEL:
33 label_cat.add(dirname)
34 return { AnnotationType.label: label_cat }
35
36 def _load_items(self, path):
37 items = {}
38
39 for image_path in find_images(path, recursive=True, max_depth=1):
40 label = osp.basename(osp.dirname(image_path))
41 image_name = osp.splitext(osp.basename(image_path))[0]
42 if image_name.startswith(label + '_'):
43 image_name = image_name[len(label) + 1:]
44
45 item = items.get(image_name)
46 if item is None:
47 item = DatasetItem(id=image_name, subset=self._subset,
48 image=image_path)
49 items[image_name] = item
50 annotations = item.annotations
51
52 if label != ImagenetPath.IMAGE_DIR_NO_LABEL:
53 label = self._categories[AnnotationType.label].find(label)[0]
54 annotations.append(Label(label=label))
55
56 return items
57
58
59 class ImagenetImporter(Importer):
60 @classmethod
61 def find_sources(cls, path):
62 if not osp.isdir(path):
63 return []
64 return [{ 'url': path, 'format': 'imagenet' }]
65
66
67 class ImagenetConverter(Converter):
68 DEFAULT_IMAGE_EXT = '.jpg'
69
70 def apply(self):
71 if 1 < len(self._extractor.subsets()):
72 log.warning("ImageNet format only supports exporting a single "
73 "subset, subset information will not be used.")
74
75 subset_dir = self._save_dir
76 extractor = self._extractor
77 labels = {}
78 for item in self._extractor:
79 labels = set(p.label for p in item.annotations
80 if p.type == AnnotationType.label)
81
82 for label in labels:
83 label_name = extractor.categories()[AnnotationType.label][label].name
84 self._save_image(item, osp.join(subset_dir, label_name,
85 '%s_%s' % (label_name, self._make_image_filename(item))))
86
87 if not labels:
88 self._save_image(item, osp.join(subset_dir,
89 ImagenetPath.IMAGE_DIR_NO_LABEL,
90 ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \
91 self._make_image_filename(item)))
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/datumaro/plugins/imagenet_format.py b/datumaro/plugins/imagenet_format.py
--- a/datumaro/plugins/imagenet_format.py
+++ b/datumaro/plugins/imagenet_format.py
@@ -39,14 +39,13 @@
for image_path in find_images(path, recursive=True, max_depth=1):
label = osp.basename(osp.dirname(image_path))
image_name = osp.splitext(osp.basename(image_path))[0]
- if image_name.startswith(label + '_'):
- image_name = image_name[len(label) + 1:]
- item = items.get(image_name)
+ item_id = osp.join(label, image_name)
+ item = items.get(item_id)
if item is None:
- item = DatasetItem(id=image_name, subset=self._subset,
+ item = DatasetItem(id=item_id, subset=self._subset,
image=image_path)
- items[image_name] = item
+ items[item_id] = item
annotations = item.annotations
if label != ImagenetPath.IMAGE_DIR_NO_LABEL:
@@ -68,6 +67,13 @@
DEFAULT_IMAGE_EXT = '.jpg'
def apply(self):
+
+ def _get_dir_name(id_parts, label_name):
+ if 1 < len(id_parts) and id_parts[0] == label_name:
+ return ''
+ else:
+ return label_name
+
if 1 < len(self._extractor.subsets()):
log.warning("ImageNet format only supports exporting a single "
"subset, subset information will not be used.")
@@ -76,16 +82,15 @@
extractor = self._extractor
labels = {}
for item in self._extractor:
+ id_parts = item.id.split('/')
labels = set(p.label for p in item.annotations
if p.type == AnnotationType.label)
for label in labels:
label_name = extractor.categories()[AnnotationType.label][label].name
- self._save_image(item, osp.join(subset_dir, label_name,
- '%s_%s' % (label_name, self._make_image_filename(item))))
+ self._save_image(item, subdir=osp.join(subset_dir,
+ _get_dir_name(id_parts, label_name)))
if not labels:
- self._save_image(item, osp.join(subset_dir,
- ImagenetPath.IMAGE_DIR_NO_LABEL,
- ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \
- self._make_image_filename(item)))
+ self._save_image(item, subdir=osp.join(subset_dir,
+ _get_dir_name(id_parts, ImagenetPath.IMAGE_DIR_NO_LABEL)))
| {"golden_diff": "diff --git a/datumaro/plugins/imagenet_format.py b/datumaro/plugins/imagenet_format.py\n--- a/datumaro/plugins/imagenet_format.py\n+++ b/datumaro/plugins/imagenet_format.py\n@@ -39,14 +39,13 @@\n for image_path in find_images(path, recursive=True, max_depth=1):\n label = osp.basename(osp.dirname(image_path))\n image_name = osp.splitext(osp.basename(image_path))[0]\n- if image_name.startswith(label + '_'):\n- image_name = image_name[len(label) + 1:]\n \n- item = items.get(image_name)\n+ item_id = osp.join(label, image_name)\n+ item = items.get(item_id)\n if item is None:\n- item = DatasetItem(id=image_name, subset=self._subset,\n+ item = DatasetItem(id=item_id, subset=self._subset,\n image=image_path)\n- items[image_name] = item\n+ items[item_id] = item\n annotations = item.annotations\n \n if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\n@@ -68,6 +67,13 @@\n DEFAULT_IMAGE_EXT = '.jpg'\n \n def apply(self):\n+\n+ def _get_dir_name(id_parts, label_name):\n+ if 1 < len(id_parts) and id_parts[0] == label_name:\n+ return ''\n+ else:\n+ return label_name\n+\n if 1 < len(self._extractor.subsets()):\n log.warning(\"ImageNet format only supports exporting a single \"\n \"subset, subset information will not be used.\")\n@@ -76,16 +82,15 @@\n extractor = self._extractor\n labels = {}\n for item in self._extractor:\n+ id_parts = item.id.split('/')\n labels = set(p.label for p in item.annotations\n if p.type == AnnotationType.label)\n \n for label in labels:\n label_name = extractor.categories()[AnnotationType.label][label].name\n- self._save_image(item, osp.join(subset_dir, label_name,\n- '%s_%s' % (label_name, self._make_image_filename(item))))\n+ self._save_image(item, subdir=osp.join(subset_dir,\n+ _get_dir_name(id_parts, label_name)))\n \n if not labels:\n- self._save_image(item, osp.join(subset_dir,\n- ImagenetPath.IMAGE_DIR_NO_LABEL,\n- ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \\\n- self._make_image_filename(item)))\n+ self._save_image(item, subdir=osp.join(subset_dir,\n+ _get_dir_name(id_parts, ImagenetPath.IMAGE_DIR_NO_LABEL)))\n", "issue": "Import ImageNet dataset\n### Steps to reproduce problem\r\n1. Download and extract ImageNet dataset for image classification: [link](https://www.kaggle.com/c/imagenet-object-localization-challenge/data);\r\n2. Add the loaded dataset into a Datumaro project;\r\n3. Run `datum info`.\r\n\r\n### Current behaviour\r\nImageNet dataset has ~1.2m images, but in the `info` output we can see that imported dataset has only 69647, and also these images have wrong labels.\r\n\r\n### Expected behaviour\r\nCorrect import.\r\n\r\n### Environment\r\n`git log -1`: 7e35c8\n", "before_files": [{"content": "# Copyright (C) 2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport logging as log\nimport os\nimport os.path as osp\n\nfrom datumaro.components.converter import Converter\nfrom datumaro.components.extractor import (\n AnnotationType, DatasetItem, Importer, Label, LabelCategories,\n SourceExtractor,\n)\nfrom datumaro.util.image import find_images\n\n\nclass ImagenetPath:\n IMAGE_DIR_NO_LABEL = 'no_label'\n\n\nclass ImagenetExtractor(SourceExtractor):\n def __init__(self, path, subset=None):\n assert osp.isdir(path), path\n super().__init__(subset=subset)\n\n self._categories = self._load_categories(path)\n self._items = list(self._load_items(path).values())\n\n def _load_categories(self, path):\n label_cat = LabelCategories()\n for dirname in sorted(os.listdir(path)):\n if dirname != ImagenetPath.IMAGE_DIR_NO_LABEL:\n label_cat.add(dirname)\n return { AnnotationType.label: label_cat }\n\n def _load_items(self, path):\n items = {}\n\n for image_path in find_images(path, recursive=True, max_depth=1):\n label = osp.basename(osp.dirname(image_path))\n image_name = osp.splitext(osp.basename(image_path))[0]\n if image_name.startswith(label + '_'):\n image_name = image_name[len(label) + 1:]\n\n item = items.get(image_name)\n if item is None:\n item = DatasetItem(id=image_name, subset=self._subset,\n image=image_path)\n items[image_name] = item\n annotations = item.annotations\n\n if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\n label = self._categories[AnnotationType.label].find(label)[0]\n annotations.append(Label(label=label))\n\n return items\n\n\nclass ImagenetImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n if not osp.isdir(path):\n return []\n return [{ 'url': path, 'format': 'imagenet' }]\n\n\nclass ImagenetConverter(Converter):\n DEFAULT_IMAGE_EXT = '.jpg'\n\n def apply(self):\n if 1 < len(self._extractor.subsets()):\n log.warning(\"ImageNet format only supports exporting a single \"\n \"subset, subset information will not be used.\")\n\n subset_dir = self._save_dir\n extractor = self._extractor\n labels = {}\n for item in self._extractor:\n labels = set(p.label for p in item.annotations\n if p.type == AnnotationType.label)\n\n for label in labels:\n label_name = extractor.categories()[AnnotationType.label][label].name\n self._save_image(item, osp.join(subset_dir, label_name,\n '%s_%s' % (label_name, self._make_image_filename(item))))\n\n if not labels:\n self._save_image(item, osp.join(subset_dir,\n ImagenetPath.IMAGE_DIR_NO_LABEL,\n ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \\\n self._make_image_filename(item)))\n", "path": "datumaro/plugins/imagenet_format.py"}], "after_files": [{"content": "# Copyright (C) 2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport logging as log\nimport os\nimport os.path as osp\n\nfrom datumaro.components.converter import Converter\nfrom datumaro.components.extractor import (\n AnnotationType, DatasetItem, Importer, Label, LabelCategories,\n SourceExtractor,\n)\nfrom datumaro.util.image import find_images\n\n\nclass ImagenetPath:\n IMAGE_DIR_NO_LABEL = 'no_label'\n\n\nclass ImagenetExtractor(SourceExtractor):\n def __init__(self, path, subset=None):\n assert osp.isdir(path), path\n super().__init__(subset=subset)\n\n self._categories = self._load_categories(path)\n self._items = list(self._load_items(path).values())\n\n def _load_categories(self, path):\n label_cat = LabelCategories()\n for dirname in sorted(os.listdir(path)):\n if dirname != ImagenetPath.IMAGE_DIR_NO_LABEL:\n label_cat.add(dirname)\n return { AnnotationType.label: label_cat }\n\n def _load_items(self, path):\n items = {}\n\n for image_path in find_images(path, recursive=True, max_depth=1):\n label = osp.basename(osp.dirname(image_path))\n image_name = osp.splitext(osp.basename(image_path))[0]\n\n item_id = osp.join(label, image_name)\n item = items.get(item_id)\n if item is None:\n item = DatasetItem(id=item_id, subset=self._subset,\n image=image_path)\n items[item_id] = item\n annotations = item.annotations\n\n if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\n label = self._categories[AnnotationType.label].find(label)[0]\n annotations.append(Label(label=label))\n\n return items\n\n\nclass ImagenetImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n if not osp.isdir(path):\n return []\n return [{ 'url': path, 'format': 'imagenet' }]\n\n\nclass ImagenetConverter(Converter):\n DEFAULT_IMAGE_EXT = '.jpg'\n\n def apply(self):\n\n def _get_dir_name(id_parts, label_name):\n if 1 < len(id_parts) and id_parts[0] == label_name:\n return ''\n else:\n return label_name\n\n if 1 < len(self._extractor.subsets()):\n log.warning(\"ImageNet format only supports exporting a single \"\n \"subset, subset information will not be used.\")\n\n subset_dir = self._save_dir\n extractor = self._extractor\n labels = {}\n for item in self._extractor:\n id_parts = item.id.split('/')\n labels = set(p.label for p in item.annotations\n if p.type == AnnotationType.label)\n\n for label in labels:\n label_name = extractor.categories()[AnnotationType.label][label].name\n self._save_image(item, subdir=osp.join(subset_dir,\n _get_dir_name(id_parts, label_name)))\n\n if not labels:\n self._save_image(item, subdir=osp.join(subset_dir,\n _get_dir_name(id_parts, ImagenetPath.IMAGE_DIR_NO_LABEL)))\n", "path": "datumaro/plugins/imagenet_format.py"}]} | 1,227 | 590 |
gh_patches_debug_4923 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-2852 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash with web-history-max-items and no items in the history
When running `qutebrowser --debug --temp-basedir --backend webengine -s completion web-history-max-items 1000` and immediately pressing `o`, this happens:
```
12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: "SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)"
12:21:36 DEBUG sql sql:run:99 Running SQL query: "SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)"
12:21:36 DEBUG sql sql:run:102 query bindings: {':limit': 1000}
12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: "SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\' or title LIKE :pat escape '\') AND last_atime >= ORDER BY last_atime DESC"
12:21:36 DEBUG completion debug:__exit__:264 Starting url completion took 0.003652 seconds.
12:21:36 ERROR misc crashsignal:exception_hook:205 Uncaught exception
Traceback (most recent call last):
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py", line 236, in _update_completion
model = func(*args)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/urlmodel.py", line 70, in url
hist_cat = histcategory.HistoryCategory(delete_func=_delete_history)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py", line 54, in __init__
]), forward_only=False)
File "/home/florian/proj/qutebrowser/git/qutebrowser/misc/sql.py", line 83, in __init__
querystr, self.lastError().text()))
qutebrowser.misc.sql.SqlException: Failed to prepare query "SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\' or title LIKE :pat escape '\') AND last_atime >= ORDER BY last_atime DESC": "near "ORDER": syntax error Unable to execute statement"
```
cc @rcorre
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/completion/models/histcategory.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2017 Ryan Roden-Corrent (rcorre) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """A completion category that queries the SQL History store."""
21
22 import re
23
24 from PyQt5.QtSql import QSqlQueryModel
25
26 from qutebrowser.misc import sql
27 from qutebrowser.utils import debug
28 from qutebrowser.commands import cmdexc
29 from qutebrowser.config import config
30
31
32 class HistoryCategory(QSqlQueryModel):
33
34 """A completion category that queries the SQL History store."""
35
36 def __init__(self, *, delete_func=None, parent=None):
37 """Create a new History completion category."""
38 super().__init__(parent=parent)
39 self.name = "History"
40
41 # replace ' in timestamp-format to avoid breaking the query
42 timefmt = ("strftime('{}', last_atime, 'unixepoch', 'localtime')"
43 .format(config.get('completion', 'timestamp-format')
44 .replace("'", "`")))
45
46 self._query = sql.Query(' '.join([
47 "SELECT url, title, {}".format(timefmt),
48 "FROM CompletionHistory",
49 # the incoming pattern will have literal % and _ escaped with '\'
50 # we need to tell sql to treat '\' as an escape character
51 "WHERE (url LIKE :pat escape '\\' or title LIKE :pat escape '\\')",
52 self._atime_expr(),
53 "ORDER BY last_atime DESC",
54 ]), forward_only=False)
55
56 # advertise that this model filters by URL and title
57 self.columns_to_filter = [0, 1]
58 self.delete_func = delete_func
59
60 def _atime_expr(self):
61 """If max_items is set, return an expression to limit the query."""
62 max_items = config.get('completion', 'web-history-max-items')
63 # HistoryCategory should not be added to the completion in that case.
64 assert max_items != 0
65
66 if max_items < 0:
67 return ''
68
69 min_atime = sql.Query(' '.join([
70 'SELECT min(last_atime) FROM',
71 '(SELECT last_atime FROM CompletionHistory',
72 'ORDER BY last_atime DESC LIMIT :limit)',
73 ])).run(limit=max_items).value()
74
75 return "AND last_atime >= {}".format(min_atime)
76
77 def set_pattern(self, pattern):
78 """Set the pattern used to filter results.
79
80 Args:
81 pattern: string pattern to filter by.
82 """
83 # escape to treat a user input % or _ as a literal, not a wildcard
84 pattern = pattern.replace('%', '\\%')
85 pattern = pattern.replace('_', '\\_')
86 # treat spaces as wildcards to match any of the typed words
87 pattern = re.sub(r' +', '%', pattern)
88 pattern = '%{}%'.format(pattern)
89 with debug.log_time('sql', 'Running completion query'):
90 self._query.run(pat=pattern)
91 self.setQuery(self._query)
92
93 def delete_cur_item(self, index):
94 """Delete the row at the given index."""
95 if not self.delete_func:
96 raise cmdexc.CommandError("Cannot delete this item.")
97 data = [self.data(index.sibling(index.row(), i))
98 for i in range(self.columnCount())]
99 self.delete_func(data)
100 # re-run query to reload updated table
101 with debug.log_time('sql', 'Re-running completion query post-delete'):
102 self._query.run()
103 self.setQuery(self._query)
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py
--- a/qutebrowser/completion/models/histcategory.py
+++ b/qutebrowser/completion/models/histcategory.py
@@ -72,6 +72,10 @@
'ORDER BY last_atime DESC LIMIT :limit)',
])).run(limit=max_items).value()
+ if not min_atime:
+ # if there are no history items, min_atime may be '' (issue #2849)
+ return ''
+
return "AND last_atime >= {}".format(min_atime)
def set_pattern(self, pattern):
| {"golden_diff": "diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py\n--- a/qutebrowser/completion/models/histcategory.py\n+++ b/qutebrowser/completion/models/histcategory.py\n@@ -72,6 +72,10 @@\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n \n+ if not min_atime:\n+ # if there are no history items, min_atime may be '' (issue #2849)\n+ return ''\n+\n return \"AND last_atime >= {}\".format(min_atime)\n \n def set_pattern(self, pattern):\n", "issue": "Crash with web-history-max-items and no items in the history\nWhen running `qutebrowser --debug --temp-basedir --backend webengine -s completion web-history-max-items 1000` and immediately pressing `o`, this happens:\r\n\r\n```\r\n12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: \"SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)\"\r\n12:21:36 DEBUG sql sql:run:99 Running SQL query: \"SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)\"\r\n12:21:36 DEBUG sql sql:run:102 query bindings: {':limit': 1000}\r\n12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: \"SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\\' or title LIKE :pat escape '\\') AND last_atime >= ORDER BY last_atime DESC\"\r\n12:21:36 DEBUG completion debug:__exit__:264 Starting url completion took 0.003652 seconds.\r\n12:21:36 ERROR misc crashsignal:exception_hook:205 Uncaught exception\r\nTraceback (most recent call last):\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py\", line 236, in _update_completion\r\n model = func(*args)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/urlmodel.py\", line 70, in url\r\n hist_cat = histcategory.HistoryCategory(delete_func=_delete_history)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py\", line 54, in __init__\r\n ]), forward_only=False)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/misc/sql.py\", line 83, in __init__\r\n querystr, self.lastError().text()))\r\nqutebrowser.misc.sql.SqlException: Failed to prepare query \"SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\\' or title LIKE :pat escape '\\') AND last_atime >= ORDER BY last_atime DESC\": \"near \"ORDER\": syntax error Unable to execute statement\"\r\n```\r\n\r\ncc @rcorre \n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A completion category that queries the SQL History store.\"\"\"\n\nimport re\n\nfrom PyQt5.QtSql import QSqlQueryModel\n\nfrom qutebrowser.misc import sql\nfrom qutebrowser.utils import debug\nfrom qutebrowser.commands import cmdexc\nfrom qutebrowser.config import config\n\n\nclass HistoryCategory(QSqlQueryModel):\n\n \"\"\"A completion category that queries the SQL History store.\"\"\"\n\n def __init__(self, *, delete_func=None, parent=None):\n \"\"\"Create a new History completion category.\"\"\"\n super().__init__(parent=parent)\n self.name = \"History\"\n\n # replace ' in timestamp-format to avoid breaking the query\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(config.get('completion', 'timestamp-format')\n .replace(\"'\", \"`\")))\n\n self._query = sql.Query(' '.join([\n \"SELECT url, title, {}\".format(timefmt),\n \"FROM CompletionHistory\",\n # the incoming pattern will have literal % and _ escaped with '\\'\n # we need to tell sql to treat '\\' as an escape character\n \"WHERE (url LIKE :pat escape '\\\\' or title LIKE :pat escape '\\\\')\",\n self._atime_expr(),\n \"ORDER BY last_atime DESC\",\n ]), forward_only=False)\n\n # advertise that this model filters by URL and title\n self.columns_to_filter = [0, 1]\n self.delete_func = delete_func\n\n def _atime_expr(self):\n \"\"\"If max_items is set, return an expression to limit the query.\"\"\"\n max_items = config.get('completion', 'web-history-max-items')\n # HistoryCategory should not be added to the completion in that case.\n assert max_items != 0\n\n if max_items < 0:\n return ''\n\n min_atime = sql.Query(' '.join([\n 'SELECT min(last_atime) FROM',\n '(SELECT last_atime FROM CompletionHistory',\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n\n return \"AND last_atime >= {}\".format(min_atime)\n\n def set_pattern(self, pattern):\n \"\"\"Set the pattern used to filter results.\n\n Args:\n pattern: string pattern to filter by.\n \"\"\"\n # escape to treat a user input % or _ as a literal, not a wildcard\n pattern = pattern.replace('%', '\\\\%')\n pattern = pattern.replace('_', '\\\\_')\n # treat spaces as wildcards to match any of the typed words\n pattern = re.sub(r' +', '%', pattern)\n pattern = '%{}%'.format(pattern)\n with debug.log_time('sql', 'Running completion query'):\n self._query.run(pat=pattern)\n self.setQuery(self._query)\n\n def delete_cur_item(self, index):\n \"\"\"Delete the row at the given index.\"\"\"\n if not self.delete_func:\n raise cmdexc.CommandError(\"Cannot delete this item.\")\n data = [self.data(index.sibling(index.row(), i))\n for i in range(self.columnCount())]\n self.delete_func(data)\n # re-run query to reload updated table\n with debug.log_time('sql', 'Re-running completion query post-delete'):\n self._query.run()\n self.setQuery(self._query)\n", "path": "qutebrowser/completion/models/histcategory.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A completion category that queries the SQL History store.\"\"\"\n\nimport re\n\nfrom PyQt5.QtSql import QSqlQueryModel\n\nfrom qutebrowser.misc import sql\nfrom qutebrowser.utils import debug\nfrom qutebrowser.commands import cmdexc\nfrom qutebrowser.config import config\n\n\nclass HistoryCategory(QSqlQueryModel):\n\n \"\"\"A completion category that queries the SQL History store.\"\"\"\n\n def __init__(self, *, delete_func=None, parent=None):\n \"\"\"Create a new History completion category.\"\"\"\n super().__init__(parent=parent)\n self.name = \"History\"\n\n # replace ' in timestamp-format to avoid breaking the query\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(config.get('completion', 'timestamp-format')\n .replace(\"'\", \"`\")))\n\n self._query = sql.Query(' '.join([\n \"SELECT url, title, {}\".format(timefmt),\n \"FROM CompletionHistory\",\n # the incoming pattern will have literal % and _ escaped with '\\'\n # we need to tell sql to treat '\\' as an escape character\n \"WHERE (url LIKE :pat escape '\\\\' or title LIKE :pat escape '\\\\')\",\n self._atime_expr(),\n \"ORDER BY last_atime DESC\",\n ]), forward_only=False)\n\n # advertise that this model filters by URL and title\n self.columns_to_filter = [0, 1]\n self.delete_func = delete_func\n\n def _atime_expr(self):\n \"\"\"If max_items is set, return an expression to limit the query.\"\"\"\n max_items = config.get('completion', 'web-history-max-items')\n # HistoryCategory should not be added to the completion in that case.\n assert max_items != 0\n\n if max_items < 0:\n return ''\n\n min_atime = sql.Query(' '.join([\n 'SELECT min(last_atime) FROM',\n '(SELECT last_atime FROM CompletionHistory',\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n\n if not min_atime:\n # if there are no history items, min_atime may be '' (issue #2849)\n return ''\n\n return \"AND last_atime >= {}\".format(min_atime)\n\n def set_pattern(self, pattern):\n \"\"\"Set the pattern used to filter results.\n\n Args:\n pattern: string pattern to filter by.\n \"\"\"\n # escape to treat a user input % or _ as a literal, not a wildcard\n pattern = pattern.replace('%', '\\\\%')\n pattern = pattern.replace('_', '\\\\_')\n # treat spaces as wildcards to match any of the typed words\n pattern = re.sub(r' +', '%', pattern)\n pattern = '%{}%'.format(pattern)\n with debug.log_time('sql', 'Running completion query'):\n self._query.run(pat=pattern)\n self.setQuery(self._query)\n\n def delete_cur_item(self, index):\n \"\"\"Delete the row at the given index.\"\"\"\n if not self.delete_func:\n raise cmdexc.CommandError(\"Cannot delete this item.\")\n data = [self.data(index.sibling(index.row(), i))\n for i in range(self.columnCount())]\n self.delete_func(data)\n # re-run query to reload updated table\n with debug.log_time('sql', 'Re-running completion query post-delete'):\n self._query.run()\n self.setQuery(self._query)\n", "path": "qutebrowser/completion/models/histcategory.py"}]} | 1,957 | 148 |
gh_patches_debug_32333 | rasdani/github-patches | git_diff | airctic__icevision-1091 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo lightning -> lighting in Albumentations helper
There are several instances in the codebase with the typo `lightning` when the intended term is `lighting`
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74
Typo lightning -> lighting in Albumentations helper
There are several instances in the codebase with the typo `lightning` when the intended term is `lighting`
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57
https://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `icevision/tfms/albumentations/albumentations_helpers.py`
Content:
```
1 __all__ = ["aug_tfms", "resize", "resize_and_pad", "get_size_without_padding"]
2
3 import albumentations as A
4
5 from icevision.imports import *
6 from icevision.core import *
7
8
9 def resize(size, ratio_resize=A.LongestMaxSize):
10 return ratio_resize(size) if isinstance(size, int) else A.Resize(*size[::-1])
11
12
13 def resize_and_pad(
14 size: Union[int, Tuple[int, int]],
15 pad: A.DualTransform = partial(
16 A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]
17 ),
18 ):
19 width, height = (size, size) if isinstance(size, int) else size
20 return [resize(size), pad(min_height=height, min_width=width)]
21
22
23 def aug_tfms(
24 size: Union[int, Tuple[int, int]],
25 presize: Optional[Union[int, Tuple[int, int]]] = None,
26 horizontal_flip: Optional[A.HorizontalFlip] = A.HorizontalFlip(),
27 shift_scale_rotate: Optional[A.ShiftScaleRotate] = A.ShiftScaleRotate(
28 rotate_limit=15,
29 ),
30 rgb_shift: Optional[A.RGBShift] = A.RGBShift(
31 r_shift_limit=10,
32 g_shift_limit=10,
33 b_shift_limit=10,
34 ),
35 lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),
36 blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),
37 crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),
38 pad: Optional[A.DualTransform] = partial(
39 A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]
40 ),
41 ) -> List[A.BasicTransform]:
42 """Collection of useful augmentation transforms.
43
44 # Arguments
45 size: The final size of the image. If an `int` is given, the maximum size of
46 the image is rescaled, maintaing aspect ratio. If a `tuple` is given,
47 the image is rescaled to have that exact size (width, height).
48 presize: Rescale the image before applying other transfroms. If `None` this
49 transform is not applied. First introduced by fastai,this technique is
50 explained in their book in [this](https://github.com/fastai/fastbook/blob/master/05_pet_breeds.ipynb)
51 chapter (tip: search for "Presizing").
52 horizontal_flip: Flip around the y-axis. If `None` this transform is not applied.
53 shift_scale_rotate: Randomly shift, scale, and rotate. If `None` this transform
54 is not applied.
55 rgb_shift: Randomly shift values for each channel of RGB image. If `None` this
56 transform is not applied.
57 lightning: Randomly changes Brightness and Contrast. If `None` this transform
58 is not applied.
59 blur: Randomly blur the image. If `None` this transform is not applied.
60 crop_fn: Randomly crop the image. If `None` this transform is not applied.
61 Use `partial` to saturate other parameters of the class.
62 pad: Pad the image to `size`, squaring the image if `size` is an `int`.
63 If `None` this transform is not applied. Use `partial` to sature other
64 parameters of the class.
65
66 # Returns
67 A list of albumentations transforms.
68 """
69
70 width, height = (size, size) if isinstance(size, int) else size
71
72 tfms = []
73 tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]
74 tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]
75 # Resize as the last transforms to reduce the number of artificial artifacts created
76 if crop_fn is not None:
77 crop = crop_fn(height=height, width=width)
78 tfms += [A.OneOrOther(crop, resize(size), p=crop.p)]
79 else:
80 tfms += [resize(size)]
81 tfms += [pad(min_height=height, min_width=width) if pad is not None else None]
82
83 tfms = [tfm for tfm in tfms if tfm is not None]
84
85 return tfms
86
87
88 def get_size_without_padding(
89 tfms_list: List[Any], before_tfm_img: PIL.Image.Image, height: int, width: int
90 ) -> Tuple[int, int]:
91 """
92 Infer the height and width of the pre-processed image after removing padding.
93
94 Parameters
95 ----------
96 tfms_list: list of albumentations transforms applied to the `before_tfm_img` image
97 before passing it to the model for inference.
98 before_tfm_img: original image before being pre-processed for inference.
99 height: height of output image from icevision `predict` function.
100 width: width of output image from icevision `predict` function.
101
102 Returns
103 -------
104 height and width of the image coming out of the inference pipeline, after removing padding
105 """
106 if get_transform(tfms_list, "Pad") is not None:
107 before_pad_h, before_pad_w, _ = np.array(before_tfm_img).shape
108
109 t = get_transform(tfms_list, "SmallestMaxSize")
110 if t is not None:
111 presize = t.max_size
112 height, width = func_max_size(before_pad_h, before_pad_w, presize, min)
113
114 t = get_transform(tfms_list, "LongestMaxSize")
115 if t is not None:
116 size = t.max_size
117 height, width = func_max_size(before_pad_h, before_pad_w, size, max)
118
119 return height, width
120
121
122 def py3round(number: float) -> int:
123 """
124 Unified rounding in all python versions. Used by albumentations.
125
126 Parameters
127 ----------
128 number: float to round.
129
130 Returns
131 -------
132 Rounded number
133 """
134 if abs(round(number) - number) == 0.5:
135 return int(2.0 * round(number / 2.0))
136
137 return int(round(number))
138
139
140 def func_max_size(
141 height: int, width: int, max_size: int, func: Callable[[int, int], int]
142 ) -> Tuple[int, int]:
143 """
144 Calculate rescaled height and width of the image in question wrt to a specific size.
145
146 Parameters
147 ----------
148 height: height of the image in question.
149 width: width of the image in question.
150 max_size: size wrt the image needs to be rescaled (resized).
151 func: min/max. Whether to compare max_size to the smallest/longest of the image dims.
152
153 Returns
154 -------
155 Rescaled height and width
156 """
157 scale = max_size / float(func(width, height))
158
159 if scale != 1.0:
160 height, width = tuple(py3round(dim * scale) for dim in (height, width))
161 return height, width
162
163
164 def get_transform(tfms_list: List[Any], t: str) -> Any:
165 """
166 Extract transform `t` from `tfms_list`.
167
168 Parameters
169 ----------
170 tfms_list: list of albumentations transforms.
171 t: name (str) of the transform to look for and return from within `tfms_list`.
172
173 Returns
174 -------
175 The `t` transform if found inside `tfms_list`, otherwise None.
176 """
177 for el in tfms_list:
178 if t in str(type(el)):
179 return el
180 return None
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/icevision/tfms/albumentations/albumentations_helpers.py b/icevision/tfms/albumentations/albumentations_helpers.py
--- a/icevision/tfms/albumentations/albumentations_helpers.py
+++ b/icevision/tfms/albumentations/albumentations_helpers.py
@@ -32,7 +32,7 @@
g_shift_limit=10,
b_shift_limit=10,
),
- lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),
+ lighting: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),
blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),
crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),
pad: Optional[A.DualTransform] = partial(
@@ -54,7 +54,7 @@
is not applied.
rgb_shift: Randomly shift values for each channel of RGB image. If `None` this
transform is not applied.
- lightning: Randomly changes Brightness and Contrast. If `None` this transform
+ lighting: Randomly changes Brightness and Contrast. If `None` this transform
is not applied.
blur: Randomly blur the image. If `None` this transform is not applied.
crop_fn: Randomly crop the image. If `None` this transform is not applied.
@@ -71,7 +71,7 @@
tfms = []
tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]
- tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]
+ tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lighting, blur]
# Resize as the last transforms to reduce the number of artificial artifacts created
if crop_fn is not None:
crop = crop_fn(height=height, width=width)
| {"golden_diff": "diff --git a/icevision/tfms/albumentations/albumentations_helpers.py b/icevision/tfms/albumentations/albumentations_helpers.py\n--- a/icevision/tfms/albumentations/albumentations_helpers.py\n+++ b/icevision/tfms/albumentations/albumentations_helpers.py\n@@ -32,7 +32,7 @@\n g_shift_limit=10,\n b_shift_limit=10,\n ),\n- lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\n+ lighting: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\n blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),\n crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),\n pad: Optional[A.DualTransform] = partial(\n@@ -54,7 +54,7 @@\n is not applied.\n rgb_shift: Randomly shift values for each channel of RGB image. If `None` this\n transform is not applied.\n- lightning: Randomly changes Brightness and Contrast. If `None` this transform\n+ lighting: Randomly changes Brightness and Contrast. If `None` this transform\n is not applied.\n blur: Randomly blur the image. If `None` this transform is not applied.\n crop_fn: Randomly crop the image. If `None` this transform is not applied.\n@@ -71,7 +71,7 @@\n \n tfms = []\n tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]\n- tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]\n+ tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lighting, blur]\n # Resize as the last transforms to reduce the number of artificial artifacts created\n if crop_fn is not None:\n crop = crop_fn(height=height, width=width)\n", "issue": "Typo lightning -> lighting in Albumentations helper\nThere are several instances in the codebase with the typo `lightning` when the intended term is `lighting`\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74\r\n\r\n\nTypo lightning -> lighting in Albumentations helper\nThere are several instances in the codebase with the typo `lightning` when the intended term is `lighting`\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L35\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L57\r\n\r\nhttps://github.com/airctic/icevision/blob/af0693e355423e9879c97ab903f324db080411f4/icevision/tfms/albumentations/albumentations_helpers.py#L74\r\n\r\n\n", "before_files": [{"content": "__all__ = [\"aug_tfms\", \"resize\", \"resize_and_pad\", \"get_size_without_padding\"]\n\nimport albumentations as A\n\nfrom icevision.imports import *\nfrom icevision.core import *\n\n\ndef resize(size, ratio_resize=A.LongestMaxSize):\n return ratio_resize(size) if isinstance(size, int) else A.Resize(*size[::-1])\n\n\ndef resize_and_pad(\n size: Union[int, Tuple[int, int]],\n pad: A.DualTransform = partial(\n A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]\n ),\n):\n width, height = (size, size) if isinstance(size, int) else size\n return [resize(size), pad(min_height=height, min_width=width)]\n\n\ndef aug_tfms(\n size: Union[int, Tuple[int, int]],\n presize: Optional[Union[int, Tuple[int, int]]] = None,\n horizontal_flip: Optional[A.HorizontalFlip] = A.HorizontalFlip(),\n shift_scale_rotate: Optional[A.ShiftScaleRotate] = A.ShiftScaleRotate(\n rotate_limit=15,\n ),\n rgb_shift: Optional[A.RGBShift] = A.RGBShift(\n r_shift_limit=10,\n g_shift_limit=10,\n b_shift_limit=10,\n ),\n lightning: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\n blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),\n crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),\n pad: Optional[A.DualTransform] = partial(\n A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]\n ),\n) -> List[A.BasicTransform]:\n \"\"\"Collection of useful augmentation transforms.\n\n # Arguments\n size: The final size of the image. If an `int` is given, the maximum size of\n the image is rescaled, maintaing aspect ratio. If a `tuple` is given,\n the image is rescaled to have that exact size (width, height).\n presize: Rescale the image before applying other transfroms. If `None` this\n transform is not applied. First introduced by fastai,this technique is\n explained in their book in [this](https://github.com/fastai/fastbook/blob/master/05_pet_breeds.ipynb)\n chapter (tip: search for \"Presizing\").\n horizontal_flip: Flip around the y-axis. If `None` this transform is not applied.\n shift_scale_rotate: Randomly shift, scale, and rotate. If `None` this transform\n is not applied.\n rgb_shift: Randomly shift values for each channel of RGB image. If `None` this\n transform is not applied.\n lightning: Randomly changes Brightness and Contrast. If `None` this transform\n is not applied.\n blur: Randomly blur the image. If `None` this transform is not applied.\n crop_fn: Randomly crop the image. If `None` this transform is not applied.\n Use `partial` to saturate other parameters of the class.\n pad: Pad the image to `size`, squaring the image if `size` is an `int`.\n If `None` this transform is not applied. Use `partial` to sature other\n parameters of the class.\n\n # Returns\n A list of albumentations transforms.\n \"\"\"\n\n width, height = (size, size) if isinstance(size, int) else size\n\n tfms = []\n tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]\n tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lightning, blur]\n # Resize as the last transforms to reduce the number of artificial artifacts created\n if crop_fn is not None:\n crop = crop_fn(height=height, width=width)\n tfms += [A.OneOrOther(crop, resize(size), p=crop.p)]\n else:\n tfms += [resize(size)]\n tfms += [pad(min_height=height, min_width=width) if pad is not None else None]\n\n tfms = [tfm for tfm in tfms if tfm is not None]\n\n return tfms\n\n\ndef get_size_without_padding(\n tfms_list: List[Any], before_tfm_img: PIL.Image.Image, height: int, width: int\n) -> Tuple[int, int]:\n \"\"\"\n Infer the height and width of the pre-processed image after removing padding.\n\n Parameters\n ----------\n tfms_list: list of albumentations transforms applied to the `before_tfm_img` image\n before passing it to the model for inference.\n before_tfm_img: original image before being pre-processed for inference.\n height: height of output image from icevision `predict` function.\n width: width of output image from icevision `predict` function.\n\n Returns\n -------\n height and width of the image coming out of the inference pipeline, after removing padding\n \"\"\"\n if get_transform(tfms_list, \"Pad\") is not None:\n before_pad_h, before_pad_w, _ = np.array(before_tfm_img).shape\n\n t = get_transform(tfms_list, \"SmallestMaxSize\")\n if t is not None:\n presize = t.max_size\n height, width = func_max_size(before_pad_h, before_pad_w, presize, min)\n\n t = get_transform(tfms_list, \"LongestMaxSize\")\n if t is not None:\n size = t.max_size\n height, width = func_max_size(before_pad_h, before_pad_w, size, max)\n\n return height, width\n\n\ndef py3round(number: float) -> int:\n \"\"\"\n Unified rounding in all python versions. Used by albumentations.\n\n Parameters\n ----------\n number: float to round.\n\n Returns\n -------\n Rounded number\n \"\"\"\n if abs(round(number) - number) == 0.5:\n return int(2.0 * round(number / 2.0))\n\n return int(round(number))\n\n\ndef func_max_size(\n height: int, width: int, max_size: int, func: Callable[[int, int], int]\n) -> Tuple[int, int]:\n \"\"\"\n Calculate rescaled height and width of the image in question wrt to a specific size.\n\n Parameters\n ----------\n height: height of the image in question.\n width: width of the image in question.\n max_size: size wrt the image needs to be rescaled (resized).\n func: min/max. Whether to compare max_size to the smallest/longest of the image dims.\n\n Returns\n -------\n Rescaled height and width\n \"\"\"\n scale = max_size / float(func(width, height))\n\n if scale != 1.0:\n height, width = tuple(py3round(dim * scale) for dim in (height, width))\n return height, width\n\n\ndef get_transform(tfms_list: List[Any], t: str) -> Any:\n \"\"\"\n Extract transform `t` from `tfms_list`.\n\n Parameters\n ----------\n tfms_list: list of albumentations transforms.\n t: name (str) of the transform to look for and return from within `tfms_list`.\n\n Returns\n -------\n The `t` transform if found inside `tfms_list`, otherwise None.\n \"\"\"\n for el in tfms_list:\n if t in str(type(el)):\n return el\n return None\n", "path": "icevision/tfms/albumentations/albumentations_helpers.py"}], "after_files": [{"content": "__all__ = [\"aug_tfms\", \"resize\", \"resize_and_pad\", \"get_size_without_padding\"]\n\nimport albumentations as A\n\nfrom icevision.imports import *\nfrom icevision.core import *\n\n\ndef resize(size, ratio_resize=A.LongestMaxSize):\n return ratio_resize(size) if isinstance(size, int) else A.Resize(*size[::-1])\n\n\ndef resize_and_pad(\n size: Union[int, Tuple[int, int]],\n pad: A.DualTransform = partial(\n A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]\n ),\n):\n width, height = (size, size) if isinstance(size, int) else size\n return [resize(size), pad(min_height=height, min_width=width)]\n\n\ndef aug_tfms(\n size: Union[int, Tuple[int, int]],\n presize: Optional[Union[int, Tuple[int, int]]] = None,\n horizontal_flip: Optional[A.HorizontalFlip] = A.HorizontalFlip(),\n shift_scale_rotate: Optional[A.ShiftScaleRotate] = A.ShiftScaleRotate(\n rotate_limit=15,\n ),\n rgb_shift: Optional[A.RGBShift] = A.RGBShift(\n r_shift_limit=10,\n g_shift_limit=10,\n b_shift_limit=10,\n ),\n lighting: Optional[A.RandomBrightnessContrast] = A.RandomBrightnessContrast(),\n blur: Optional[A.Blur] = A.Blur(blur_limit=(1, 3)),\n crop_fn: Optional[A.DualTransform] = partial(A.RandomSizedBBoxSafeCrop, p=0.5),\n pad: Optional[A.DualTransform] = partial(\n A.PadIfNeeded, border_mode=cv2.BORDER_CONSTANT, value=[124, 116, 104]\n ),\n) -> List[A.BasicTransform]:\n \"\"\"Collection of useful augmentation transforms.\n\n # Arguments\n size: The final size of the image. If an `int` is given, the maximum size of\n the image is rescaled, maintaing aspect ratio. If a `tuple` is given,\n the image is rescaled to have that exact size (width, height).\n presize: Rescale the image before applying other transfroms. If `None` this\n transform is not applied. First introduced by fastai,this technique is\n explained in their book in [this](https://github.com/fastai/fastbook/blob/master/05_pet_breeds.ipynb)\n chapter (tip: search for \"Presizing\").\n horizontal_flip: Flip around the y-axis. If `None` this transform is not applied.\n shift_scale_rotate: Randomly shift, scale, and rotate. If `None` this transform\n is not applied.\n rgb_shift: Randomly shift values for each channel of RGB image. If `None` this\n transform is not applied.\n lighting: Randomly changes Brightness and Contrast. If `None` this transform\n is not applied.\n blur: Randomly blur the image. If `None` this transform is not applied.\n crop_fn: Randomly crop the image. If `None` this transform is not applied.\n Use `partial` to saturate other parameters of the class.\n pad: Pad the image to `size`, squaring the image if `size` is an `int`.\n If `None` this transform is not applied. Use `partial` to sature other\n parameters of the class.\n\n # Returns\n A list of albumentations transforms.\n \"\"\"\n\n width, height = (size, size) if isinstance(size, int) else size\n\n tfms = []\n tfms += [resize(presize, A.SmallestMaxSize) if presize is not None else None]\n tfms += [horizontal_flip, shift_scale_rotate, rgb_shift, lighting, blur]\n # Resize as the last transforms to reduce the number of artificial artifacts created\n if crop_fn is not None:\n crop = crop_fn(height=height, width=width)\n tfms += [A.OneOrOther(crop, resize(size), p=crop.p)]\n else:\n tfms += [resize(size)]\n tfms += [pad(min_height=height, min_width=width) if pad is not None else None]\n\n tfms = [tfm for tfm in tfms if tfm is not None]\n\n return tfms\n\n\ndef get_size_without_padding(\n tfms_list: List[Any], before_tfm_img: PIL.Image.Image, height: int, width: int\n) -> Tuple[int, int]:\n \"\"\"\n Infer the height and width of the pre-processed image after removing padding.\n\n Parameters\n ----------\n tfms_list: list of albumentations transforms applied to the `before_tfm_img` image\n before passing it to the model for inference.\n before_tfm_img: original image before being pre-processed for inference.\n height: height of output image from icevision `predict` function.\n width: width of output image from icevision `predict` function.\n\n Returns\n -------\n height and width of the image coming out of the inference pipeline, after removing padding\n \"\"\"\n if get_transform(tfms_list, \"Pad\") is not None:\n before_pad_h, before_pad_w, _ = np.array(before_tfm_img).shape\n\n t = get_transform(tfms_list, \"SmallestMaxSize\")\n if t is not None:\n presize = t.max_size\n height, width = func_max_size(before_pad_h, before_pad_w, presize, min)\n\n t = get_transform(tfms_list, \"LongestMaxSize\")\n if t is not None:\n size = t.max_size\n height, width = func_max_size(before_pad_h, before_pad_w, size, max)\n\n return height, width\n\n\ndef py3round(number: float) -> int:\n \"\"\"\n Unified rounding in all python versions. Used by albumentations.\n\n Parameters\n ----------\n number: float to round.\n\n Returns\n -------\n Rounded number\n \"\"\"\n if abs(round(number) - number) == 0.5:\n return int(2.0 * round(number / 2.0))\n\n return int(round(number))\n\n\ndef func_max_size(\n height: int, width: int, max_size: int, func: Callable[[int, int], int]\n) -> Tuple[int, int]:\n \"\"\"\n Calculate rescaled height and width of the image in question wrt to a specific size.\n\n Parameters\n ----------\n height: height of the image in question.\n width: width of the image in question.\n max_size: size wrt the image needs to be rescaled (resized).\n func: min/max. Whether to compare max_size to the smallest/longest of the image dims.\n\n Returns\n -------\n Rescaled height and width\n \"\"\"\n scale = max_size / float(func(width, height))\n\n if scale != 1.0:\n height, width = tuple(py3round(dim * scale) for dim in (height, width))\n return height, width\n\n\ndef get_transform(tfms_list: List[Any], t: str) -> Any:\n \"\"\"\n Extract transform `t` from `tfms_list`.\n\n Parameters\n ----------\n tfms_list: list of albumentations transforms.\n t: name (str) of the transform to look for and return from within `tfms_list`.\n\n Returns\n -------\n The `t` transform if found inside `tfms_list`, otherwise None.\n \"\"\"\n for el in tfms_list:\n if t in str(type(el)):\n return el\n return None\n", "path": "icevision/tfms/albumentations/albumentations_helpers.py"}]} | 2,876 | 447 |
gh_patches_debug_6977 | rasdani/github-patches | git_diff | systemd__mkosi-2315 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`sh` unavailable in sandbox on non `usr`-merged systems
mkosi's sandbox [mounts `/usr:/usr`](https://github.com/systemd/mkosi/blob/14b07c74d5b3a8b0f44d9a5c733f7902c7859417/mkosi/sandbox.py#L88) if no ToolsTree is specified. On NixOS, which is not usr-merged, this causes problems when programs have runtime dependencies on absolute paths in `(/usr)/bin`. This is the case for `apt-get install`, that tries to find an `sh` at runtime in `/usr/bin` or `/bin`. That fails on NixOS, due to only `/usr` being mounted, while `sh` is in `/bin/sh`.
Two possible workarounds that I have in mind:
1. (Preferred) Find a way to tell `apt-get` which shell to use. `dpkg` [respects the `SHELL` environment variable](https://man7.org/linux/man-pages/man1/dpkg.1.html), but I couldn't find a similar option for `apt-get`. If there would be such an option, we should simply tell `apt-get` to use the `sh` from the Nix store that's already mounted.
2. Loosen the sandbox and mount `/bin(/sh)` into the environment.
If you have any input regarding the first proposed solution, let me know. Otherwise, I'd be happy to implement the second solution too.
Cheers, Moritz
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkosi/sandbox.py`
Content:
```
1 # SPDX-License-Identifier: LGPL-2.1+
2 import enum
3 import logging
4 import os
5 import uuid
6 from collections.abc import Sequence
7 from pathlib import Path
8 from typing import Optional
9
10 from mkosi.types import PathString
11 from mkosi.util import INVOKING_USER, flatten, one_zero
12
13
14 # https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h
15 class Capability(enum.Enum):
16 CAP_NET_ADMIN = 12
17
18
19 def have_effective_cap(capability: Capability) -> bool:
20 for line in Path("/proc/self/status").read_text().splitlines():
21 if line.startswith("CapEff:"):
22 hexcap = line.removeprefix("CapEff:").strip()
23 break
24 else:
25 logging.warning(f"\"CapEff:\" not found in /proc/self/status, assuming we don't have {capability}")
26 return False
27
28 return (int(hexcap, 16) & (1 << capability.value)) != 0
29
30
31 def finalize_passwd_mounts(root: Path) -> list[PathString]:
32 """
33 If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we
34 run the command, to make sure that the command we run uses user/group information from the apivfs
35 directory instead of from the host.
36 """
37 options: list[PathString] = []
38
39 for f in ("passwd", "group", "shadow", "gshadow"):
40 options += ["--ro-bind-try", root / "etc" / f, f"/etc/{f}"]
41
42 return options
43
44
45 def finalize_crypto_mounts(tools: Path = Path("/")) -> list[PathString]:
46 mounts = [
47 (tools / subdir, Path("/") / subdir)
48 for subdir in (
49 Path("etc/pki"),
50 Path("etc/ssl"),
51 Path("etc/crypto-policies"),
52 Path("etc/ca-certificates"),
53 Path("etc/pacman.d/gnupg"),
54 Path("var/lib/ca-certificates"),
55 )
56 if (tools / subdir).exists()
57 ]
58
59 return flatten(
60 ["--ro-bind", src, target]
61 for src, target
62 in sorted(set(mounts), key=lambda s: s[1])
63 )
64
65
66 def sandbox_cmd(
67 *,
68 network: bool = False,
69 devices: bool = False,
70 scripts: Optional[Path] = None,
71 tools: Path = Path("/"),
72 relaxed: bool = False,
73 options: Sequence[PathString] = (),
74 ) -> list[PathString]:
75 cmdline: list[PathString] = []
76
77 if not relaxed:
78 # We want to use an empty subdirectory in the host's /var/tmp as the sandbox's /var/tmp. To make sure it only
79 # gets created when we run the sandboxed command and cleaned up when the sandboxed command exits, we create it
80 # using shell.
81 vartmp = f"/var/tmp/mkosi-var-tmp-{uuid.uuid4().hex[:16]}"
82 cmdline += ["sh", "-c", f"trap 'rm -rf {vartmp}' EXIT && mkdir --mode 1777 {vartmp} && $0 \"$@\""]
83 else:
84 vartmp = None
85
86 cmdline += [
87 "bwrap",
88 "--ro-bind", tools / "usr", "/usr",
89 *(["--unshare-net"] if not network and have_effective_cap(Capability.CAP_NET_ADMIN) else []),
90 "--die-with-parent",
91 "--proc", "/proc",
92 "--setenv", "SYSTEMD_OFFLINE", one_zero(network),
93 ]
94
95 if relaxed:
96 cmdline += ["--bind", "/tmp", "/tmp"]
97 else:
98 cmdline += [
99 "--tmpfs", "/tmp",
100 "--unshare-ipc",
101 ]
102
103 if (tools / "nix/store").exists():
104 cmdline += ["--bind", tools / "nix/store", "/nix/store"]
105
106 if devices or relaxed:
107 cmdline += [
108 "--bind", "/sys", "/sys",
109 "--bind", "/run", "/run",
110 "--dev-bind", "/dev", "/dev",
111 ]
112 else:
113 cmdline += ["--dev", "/dev"]
114
115 if relaxed:
116 dirs = ("/etc", "/opt", "/srv", "/media", "/mnt", "/var", os.fspath(INVOKING_USER.home()))
117
118 for d in dirs:
119 if Path(d).exists():
120 cmdline += ["--bind", d, d]
121
122 if len(Path.cwd().parents) >= 2:
123 # `Path.parents` only supports slices and negative indexing from Python 3.10 onwards.
124 # TODO: Remove list() when we depend on Python 3.10 or newer.
125 d = os.fspath(list(Path.cwd().parents)[-2])
126 elif len(Path.cwd().parents) == 1:
127 d = os.fspath(Path.cwd())
128 else:
129 d = ""
130
131 if d and d not in (*dirs, "/home", "/usr", "/nix", "/tmp"):
132 cmdline += ["--bind", d, d]
133
134 if vartmp:
135 cmdline += ["--bind", vartmp, "/var/tmp"]
136
137 for d in ("bin", "sbin", "lib", "lib32", "lib64"):
138 if (p := tools / d).is_symlink():
139 cmdline += ["--symlink", p.readlink(), Path("/") / p.relative_to(tools)]
140
141 path = "/usr/bin:/usr/sbin" if tools != Path("/") else os.environ["PATH"]
142
143 cmdline += [
144 "--setenv", "PATH", f"{scripts or ''}:{path}",
145 *options,
146 ]
147
148 if not relaxed:
149 cmdline += ["--symlink", "../proc/self/mounts", "/etc/mtab"]
150
151 # If we're using /usr from a tools tree, we have to use /etc/alternatives from the tools tree as well if it
152 # exists since that points directly back to /usr. Apply this after the options so the caller can mount
153 # something else to /etc without overriding this mount. In relaxed mode, we only do this if /etc/alternatives
154 # already exists on the host as otherwise we'd modify the host's /etc by creating the mountpoint ourselves (or
155 # fail when trying to create it).
156 if (tools / "etc/alternatives").exists() and (not relaxed or Path("/etc/alternatives").exists()):
157 cmdline += ["--ro-bind", tools / "etc/alternatives", "/etc/alternatives"]
158
159 if scripts:
160 cmdline += ["--ro-bind", scripts, scripts]
161
162 if network and not relaxed:
163 cmdline += ["--bind", "/etc/resolv.conf", "/etc/resolv.conf"]
164
165 # bubblewrap creates everything with a restricted mode so relax stuff as needed.
166 ops = []
167 if not devices:
168 ops += ["chmod 1777 /dev/shm"]
169 if not relaxed:
170 ops += ["chmod 755 /etc"]
171 ops += ["exec $0 \"$@\""]
172
173 cmdline += ["sh", "-c", " && ".join(ops)]
174
175 return cmdline
176
177
178 def apivfs_cmd(root: Path) -> list[PathString]:
179 return [
180 "bwrap",
181 "--dev-bind", "/", "/",
182 "--tmpfs", root / "run",
183 "--tmpfs", root / "tmp",
184 "--bind", "/var/tmp", root / "var/tmp",
185 "--proc", root / "proc",
186 "--dev", root / "dev",
187 # APIVFS generally means chrooting is going to happen so unset TMPDIR just to be safe.
188 "--unsetenv", "TMPDIR",
189 # Make sure /etc/machine-id is not overwritten by any package manager post install scripts.
190 "--ro-bind-try", root / "etc/machine-id", root / "etc/machine-id",
191 *finalize_passwd_mounts(root),
192 "sh", "-c",
193 f"chmod 1777 {root / 'tmp'} {root / 'var/tmp'} {root / 'dev/shm'} && "
194 f"chmod 755 {root / 'run'} && "
195 # Make sure anything running in the root directory thinks it's in a container. $container can't always be
196 # accessed so we write /run/host/container-manager as well which is always accessible.
197 f"mkdir -m 755 {root}/run/host && echo mkosi >{root}/run/host/container-manager && "
198 "exec $0 \"$@\"",
199 ]
200
201
202 def chroot_cmd(root: Path, *, resolve: bool = False, options: Sequence[PathString] = ()) -> list[PathString]:
203 cmdline: list[PathString] = [
204 "sh", "-c",
205 f"trap 'rm -rf {root / 'work'}' EXIT && "
206 # /etc/resolv.conf can be a dangling symlink to /run/systemd/resolve/stub-resolv.conf. Bubblewrap tries to call
207 # mkdir() on each component of the path which means it will try to call
208 # mkdir(/run/systemd/resolve/stub-resolv.conf) which will fail unless /run/systemd/resolve exists already so
209 # we make sure that it already exists.
210 f"mkdir -p -m 755 {root / 'work'} {root / 'run/systemd'} {root / 'run/systemd/resolve'} && "
211 # No exec here because we need to clean up the /work directory afterwards.
212 f"$0 \"$@\"",
213 "bwrap",
214 "--dev-bind", root, "/",
215 "--setenv", "container", "mkosi",
216 "--setenv", "HOME", "/",
217 "--setenv", "PATH", "/work/scripts:/usr/bin:/usr/sbin",
218 ]
219
220 if resolve:
221 cmdline += ["--ro-bind-try", "/etc/resolv.conf", "/etc/resolv.conf"]
222
223 cmdline += options
224
225 return apivfs_cmd(root) + cmdline
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkosi/sandbox.py b/mkosi/sandbox.py
--- a/mkosi/sandbox.py
+++ b/mkosi/sandbox.py
@@ -137,6 +137,8 @@
for d in ("bin", "sbin", "lib", "lib32", "lib64"):
if (p := tools / d).is_symlink():
cmdline += ["--symlink", p.readlink(), Path("/") / p.relative_to(tools)]
+ elif p.is_dir():
+ cmdline += ["--ro-bind", p, Path("/") / p.relative_to(tools)]
path = "/usr/bin:/usr/sbin" if tools != Path("/") else os.environ["PATH"]
| {"golden_diff": "diff --git a/mkosi/sandbox.py b/mkosi/sandbox.py\n--- a/mkosi/sandbox.py\n+++ b/mkosi/sandbox.py\n@@ -137,6 +137,8 @@\n for d in (\"bin\", \"sbin\", \"lib\", \"lib32\", \"lib64\"):\n if (p := tools / d).is_symlink():\n cmdline += [\"--symlink\", p.readlink(), Path(\"/\") / p.relative_to(tools)]\n+ elif p.is_dir():\n+ cmdline += [\"--ro-bind\", p, Path(\"/\") / p.relative_to(tools)]\n \n path = \"/usr/bin:/usr/sbin\" if tools != Path(\"/\") else os.environ[\"PATH\"]\n", "issue": "`sh` unavailable in sandbox on non `usr`-merged systems\nmkosi's sandbox [mounts `/usr:/usr`](https://github.com/systemd/mkosi/blob/14b07c74d5b3a8b0f44d9a5c733f7902c7859417/mkosi/sandbox.py#L88) if no ToolsTree is specified. On NixOS, which is not usr-merged, this causes problems when programs have runtime dependencies on absolute paths in `(/usr)/bin`. This is the case for `apt-get install`, that tries to find an `sh` at runtime in `/usr/bin` or `/bin`. That fails on NixOS, due to only `/usr` being mounted, while `sh` is in `/bin/sh`.\r\n\r\nTwo possible workarounds that I have in mind:\r\n1. (Preferred) Find a way to tell `apt-get` which shell to use. `dpkg` [respects the `SHELL` environment variable](https://man7.org/linux/man-pages/man1/dpkg.1.html), but I couldn't find a similar option for `apt-get`. If there would be such an option, we should simply tell `apt-get` to use the `sh` from the Nix store that's already mounted.\r\n2. Loosen the sandbox and mount `/bin(/sh)` into the environment. \r\n\r\nIf you have any input regarding the first proposed solution, let me know. Otherwise, I'd be happy to implement the second solution too.\r\n\r\nCheers, Moritz\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\nimport enum\nimport logging\nimport os\nimport uuid\nfrom collections.abc import Sequence\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom mkosi.types import PathString\nfrom mkosi.util import INVOKING_USER, flatten, one_zero\n\n\n# https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h\nclass Capability(enum.Enum):\n CAP_NET_ADMIN = 12\n\n\ndef have_effective_cap(capability: Capability) -> bool:\n for line in Path(\"/proc/self/status\").read_text().splitlines():\n if line.startswith(\"CapEff:\"):\n hexcap = line.removeprefix(\"CapEff:\").strip()\n break\n else:\n logging.warning(f\"\\\"CapEff:\\\" not found in /proc/self/status, assuming we don't have {capability}\")\n return False\n\n return (int(hexcap, 16) & (1 << capability.value)) != 0\n\n\ndef finalize_passwd_mounts(root: Path) -> list[PathString]:\n \"\"\"\n If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we\n run the command, to make sure that the command we run uses user/group information from the apivfs\n directory instead of from the host.\n \"\"\"\n options: list[PathString] = []\n\n for f in (\"passwd\", \"group\", \"shadow\", \"gshadow\"):\n options += [\"--ro-bind-try\", root / \"etc\" / f, f\"/etc/{f}\"]\n\n return options\n\n\ndef finalize_crypto_mounts(tools: Path = Path(\"/\")) -> list[PathString]:\n mounts = [\n (tools / subdir, Path(\"/\") / subdir)\n for subdir in (\n Path(\"etc/pki\"),\n Path(\"etc/ssl\"),\n Path(\"etc/crypto-policies\"),\n Path(\"etc/ca-certificates\"),\n Path(\"etc/pacman.d/gnupg\"),\n Path(\"var/lib/ca-certificates\"),\n )\n if (tools / subdir).exists()\n ]\n\n return flatten(\n [\"--ro-bind\", src, target]\n for src, target\n in sorted(set(mounts), key=lambda s: s[1])\n )\n\n\ndef sandbox_cmd(\n *,\n network: bool = False,\n devices: bool = False,\n scripts: Optional[Path] = None,\n tools: Path = Path(\"/\"),\n relaxed: bool = False,\n options: Sequence[PathString] = (),\n) -> list[PathString]:\n cmdline: list[PathString] = []\n\n if not relaxed:\n # We want to use an empty subdirectory in the host's /var/tmp as the sandbox's /var/tmp. To make sure it only\n # gets created when we run the sandboxed command and cleaned up when the sandboxed command exits, we create it\n # using shell.\n vartmp = f\"/var/tmp/mkosi-var-tmp-{uuid.uuid4().hex[:16]}\"\n cmdline += [\"sh\", \"-c\", f\"trap 'rm -rf {vartmp}' EXIT && mkdir --mode 1777 {vartmp} && $0 \\\"$@\\\"\"]\n else:\n vartmp = None\n\n cmdline += [\n \"bwrap\",\n \"--ro-bind\", tools / \"usr\", \"/usr\",\n *([\"--unshare-net\"] if not network and have_effective_cap(Capability.CAP_NET_ADMIN) else []),\n \"--die-with-parent\",\n \"--proc\", \"/proc\",\n \"--setenv\", \"SYSTEMD_OFFLINE\", one_zero(network),\n ]\n\n if relaxed:\n cmdline += [\"--bind\", \"/tmp\", \"/tmp\"]\n else:\n cmdline += [\n \"--tmpfs\", \"/tmp\",\n \"--unshare-ipc\",\n ]\n\n if (tools / \"nix/store\").exists():\n cmdline += [\"--bind\", tools / \"nix/store\", \"/nix/store\"]\n\n if devices or relaxed:\n cmdline += [\n \"--bind\", \"/sys\", \"/sys\",\n \"--bind\", \"/run\", \"/run\",\n \"--dev-bind\", \"/dev\", \"/dev\",\n ]\n else:\n cmdline += [\"--dev\", \"/dev\"]\n\n if relaxed:\n dirs = (\"/etc\", \"/opt\", \"/srv\", \"/media\", \"/mnt\", \"/var\", os.fspath(INVOKING_USER.home()))\n\n for d in dirs:\n if Path(d).exists():\n cmdline += [\"--bind\", d, d]\n\n if len(Path.cwd().parents) >= 2:\n # `Path.parents` only supports slices and negative indexing from Python 3.10 onwards.\n # TODO: Remove list() when we depend on Python 3.10 or newer.\n d = os.fspath(list(Path.cwd().parents)[-2])\n elif len(Path.cwd().parents) == 1:\n d = os.fspath(Path.cwd())\n else:\n d = \"\"\n\n if d and d not in (*dirs, \"/home\", \"/usr\", \"/nix\", \"/tmp\"):\n cmdline += [\"--bind\", d, d]\n\n if vartmp:\n cmdline += [\"--bind\", vartmp, \"/var/tmp\"]\n\n for d in (\"bin\", \"sbin\", \"lib\", \"lib32\", \"lib64\"):\n if (p := tools / d).is_symlink():\n cmdline += [\"--symlink\", p.readlink(), Path(\"/\") / p.relative_to(tools)]\n\n path = \"/usr/bin:/usr/sbin\" if tools != Path(\"/\") else os.environ[\"PATH\"]\n\n cmdline += [\n \"--setenv\", \"PATH\", f\"{scripts or ''}:{path}\",\n *options,\n ]\n\n if not relaxed:\n cmdline += [\"--symlink\", \"../proc/self/mounts\", \"/etc/mtab\"]\n\n # If we're using /usr from a tools tree, we have to use /etc/alternatives from the tools tree as well if it\n # exists since that points directly back to /usr. Apply this after the options so the caller can mount\n # something else to /etc without overriding this mount. In relaxed mode, we only do this if /etc/alternatives\n # already exists on the host as otherwise we'd modify the host's /etc by creating the mountpoint ourselves (or\n # fail when trying to create it).\n if (tools / \"etc/alternatives\").exists() and (not relaxed or Path(\"/etc/alternatives\").exists()):\n cmdline += [\"--ro-bind\", tools / \"etc/alternatives\", \"/etc/alternatives\"]\n\n if scripts:\n cmdline += [\"--ro-bind\", scripts, scripts]\n\n if network and not relaxed:\n cmdline += [\"--bind\", \"/etc/resolv.conf\", \"/etc/resolv.conf\"]\n\n # bubblewrap creates everything with a restricted mode so relax stuff as needed.\n ops = []\n if not devices:\n ops += [\"chmod 1777 /dev/shm\"]\n if not relaxed:\n ops += [\"chmod 755 /etc\"]\n ops += [\"exec $0 \\\"$@\\\"\"]\n\n cmdline += [\"sh\", \"-c\", \" && \".join(ops)]\n\n return cmdline\n\n\ndef apivfs_cmd(root: Path) -> list[PathString]:\n return [\n \"bwrap\",\n \"--dev-bind\", \"/\", \"/\",\n \"--tmpfs\", root / \"run\",\n \"--tmpfs\", root / \"tmp\",\n \"--bind\", \"/var/tmp\", root / \"var/tmp\",\n \"--proc\", root / \"proc\",\n \"--dev\", root / \"dev\",\n # APIVFS generally means chrooting is going to happen so unset TMPDIR just to be safe.\n \"--unsetenv\", \"TMPDIR\",\n # Make sure /etc/machine-id is not overwritten by any package manager post install scripts.\n \"--ro-bind-try\", root / \"etc/machine-id\", root / \"etc/machine-id\",\n *finalize_passwd_mounts(root),\n \"sh\", \"-c\",\n f\"chmod 1777 {root / 'tmp'} {root / 'var/tmp'} {root / 'dev/shm'} && \"\n f\"chmod 755 {root / 'run'} && \"\n # Make sure anything running in the root directory thinks it's in a container. $container can't always be\n # accessed so we write /run/host/container-manager as well which is always accessible.\n f\"mkdir -m 755 {root}/run/host && echo mkosi >{root}/run/host/container-manager && \"\n \"exec $0 \\\"$@\\\"\",\n ]\n\n\ndef chroot_cmd(root: Path, *, resolve: bool = False, options: Sequence[PathString] = ()) -> list[PathString]:\n cmdline: list[PathString] = [\n \"sh\", \"-c\",\n f\"trap 'rm -rf {root / 'work'}' EXIT && \"\n # /etc/resolv.conf can be a dangling symlink to /run/systemd/resolve/stub-resolv.conf. Bubblewrap tries to call\n # mkdir() on each component of the path which means it will try to call\n # mkdir(/run/systemd/resolve/stub-resolv.conf) which will fail unless /run/systemd/resolve exists already so\n # we make sure that it already exists.\n f\"mkdir -p -m 755 {root / 'work'} {root / 'run/systemd'} {root / 'run/systemd/resolve'} && \"\n # No exec here because we need to clean up the /work directory afterwards.\n f\"$0 \\\"$@\\\"\",\n \"bwrap\",\n \"--dev-bind\", root, \"/\",\n \"--setenv\", \"container\", \"mkosi\",\n \"--setenv\", \"HOME\", \"/\",\n \"--setenv\", \"PATH\", \"/work/scripts:/usr/bin:/usr/sbin\",\n ]\n\n if resolve:\n cmdline += [\"--ro-bind-try\", \"/etc/resolv.conf\", \"/etc/resolv.conf\"]\n\n cmdline += options\n\n return apivfs_cmd(root) + cmdline\n", "path": "mkosi/sandbox.py"}], "after_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\nimport enum\nimport logging\nimport os\nimport uuid\nfrom collections.abc import Sequence\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom mkosi.types import PathString\nfrom mkosi.util import INVOKING_USER, flatten, one_zero\n\n\n# https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h\nclass Capability(enum.Enum):\n CAP_NET_ADMIN = 12\n\n\ndef have_effective_cap(capability: Capability) -> bool:\n for line in Path(\"/proc/self/status\").read_text().splitlines():\n if line.startswith(\"CapEff:\"):\n hexcap = line.removeprefix(\"CapEff:\").strip()\n break\n else:\n logging.warning(f\"\\\"CapEff:\\\" not found in /proc/self/status, assuming we don't have {capability}\")\n return False\n\n return (int(hexcap, 16) & (1 << capability.value)) != 0\n\n\ndef finalize_passwd_mounts(root: Path) -> list[PathString]:\n \"\"\"\n If passwd or a related file exists in the apivfs directory, bind mount it over the host files while we\n run the command, to make sure that the command we run uses user/group information from the apivfs\n directory instead of from the host.\n \"\"\"\n options: list[PathString] = []\n\n for f in (\"passwd\", \"group\", \"shadow\", \"gshadow\"):\n options += [\"--ro-bind-try\", root / \"etc\" / f, f\"/etc/{f}\"]\n\n return options\n\n\ndef finalize_crypto_mounts(tools: Path = Path(\"/\")) -> list[PathString]:\n mounts = [\n (tools / subdir, Path(\"/\") / subdir)\n for subdir in (\n Path(\"etc/pki\"),\n Path(\"etc/ssl\"),\n Path(\"etc/crypto-policies\"),\n Path(\"etc/ca-certificates\"),\n Path(\"etc/pacman.d/gnupg\"),\n Path(\"var/lib/ca-certificates\"),\n )\n if (tools / subdir).exists()\n ]\n\n return flatten(\n [\"--ro-bind\", src, target]\n for src, target\n in sorted(set(mounts), key=lambda s: s[1])\n )\n\n\ndef sandbox_cmd(\n *,\n network: bool = False,\n devices: bool = False,\n scripts: Optional[Path] = None,\n tools: Path = Path(\"/\"),\n relaxed: bool = False,\n options: Sequence[PathString] = (),\n) -> list[PathString]:\n cmdline: list[PathString] = []\n\n if not relaxed:\n # We want to use an empty subdirectory in the host's /var/tmp as the sandbox's /var/tmp. To make sure it only\n # gets created when we run the sandboxed command and cleaned up when the sandboxed command exits, we create it\n # using shell.\n vartmp = f\"/var/tmp/mkosi-var-tmp-{uuid.uuid4().hex[:16]}\"\n cmdline += [\"sh\", \"-c\", f\"trap 'rm -rf {vartmp}' EXIT && mkdir --mode 1777 {vartmp} && $0 \\\"$@\\\"\"]\n else:\n vartmp = None\n\n cmdline += [\n \"bwrap\",\n \"--ro-bind\", tools / \"usr\", \"/usr\",\n *([\"--unshare-net\"] if not network and have_effective_cap(Capability.CAP_NET_ADMIN) else []),\n \"--die-with-parent\",\n \"--proc\", \"/proc\",\n \"--setenv\", \"SYSTEMD_OFFLINE\", one_zero(network),\n ]\n\n if relaxed:\n cmdline += [\"--bind\", \"/tmp\", \"/tmp\"]\n else:\n cmdline += [\n \"--tmpfs\", \"/tmp\",\n \"--unshare-ipc\",\n ]\n\n if (tools / \"nix/store\").exists():\n cmdline += [\"--bind\", tools / \"nix/store\", \"/nix/store\"]\n\n if devices or relaxed:\n cmdline += [\n \"--bind\", \"/sys\", \"/sys\",\n \"--bind\", \"/run\", \"/run\",\n \"--dev-bind\", \"/dev\", \"/dev\",\n ]\n else:\n cmdline += [\"--dev\", \"/dev\"]\n\n if relaxed:\n dirs = (\"/etc\", \"/opt\", \"/srv\", \"/media\", \"/mnt\", \"/var\", os.fspath(INVOKING_USER.home()))\n\n for d in dirs:\n if Path(d).exists():\n cmdline += [\"--bind\", d, d]\n\n if len(Path.cwd().parents) >= 2:\n # `Path.parents` only supports slices and negative indexing from Python 3.10 onwards.\n # TODO: Remove list() when we depend on Python 3.10 or newer.\n d = os.fspath(list(Path.cwd().parents)[-2])\n elif len(Path.cwd().parents) == 1:\n d = os.fspath(Path.cwd())\n else:\n d = \"\"\n\n if d and d not in (*dirs, \"/home\", \"/usr\", \"/nix\", \"/tmp\"):\n cmdline += [\"--bind\", d, d]\n\n if vartmp:\n cmdline += [\"--bind\", vartmp, \"/var/tmp\"]\n\n for d in (\"bin\", \"sbin\", \"lib\", \"lib32\", \"lib64\"):\n if (p := tools / d).is_symlink():\n cmdline += [\"--symlink\", p.readlink(), Path(\"/\") / p.relative_to(tools)]\n elif p.is_dir():\n cmdline += [\"--ro-bind\", p, Path(\"/\") / p.relative_to(tools)]\n\n path = \"/usr/bin:/usr/sbin\" if tools != Path(\"/\") else os.environ[\"PATH\"]\n\n cmdline += [\n \"--setenv\", \"PATH\", f\"{scripts or ''}:{path}\",\n *options,\n ]\n\n if not relaxed:\n cmdline += [\"--symlink\", \"../proc/self/mounts\", \"/etc/mtab\"]\n\n # If we're using /usr from a tools tree, we have to use /etc/alternatives from the tools tree as well if it\n # exists since that points directly back to /usr. Apply this after the options so the caller can mount\n # something else to /etc without overriding this mount. In relaxed mode, we only do this if /etc/alternatives\n # already exists on the host as otherwise we'd modify the host's /etc by creating the mountpoint ourselves (or\n # fail when trying to create it).\n if (tools / \"etc/alternatives\").exists() and (not relaxed or Path(\"/etc/alternatives\").exists()):\n cmdline += [\"--ro-bind\", tools / \"etc/alternatives\", \"/etc/alternatives\"]\n\n if scripts:\n cmdline += [\"--ro-bind\", scripts, scripts]\n\n if network and not relaxed:\n cmdline += [\"--bind\", \"/etc/resolv.conf\", \"/etc/resolv.conf\"]\n\n # bubblewrap creates everything with a restricted mode so relax stuff as needed.\n ops = []\n if not devices:\n ops += [\"chmod 1777 /dev/shm\"]\n if not relaxed:\n ops += [\"chmod 755 /etc\"]\n ops += [\"exec $0 \\\"$@\\\"\"]\n\n cmdline += [\"sh\", \"-c\", \" && \".join(ops)]\n\n return cmdline\n\n\ndef apivfs_cmd(root: Path) -> list[PathString]:\n return [\n \"bwrap\",\n \"--dev-bind\", \"/\", \"/\",\n \"--tmpfs\", root / \"run\",\n \"--tmpfs\", root / \"tmp\",\n \"--bind\", \"/var/tmp\", root / \"var/tmp\",\n \"--proc\", root / \"proc\",\n \"--dev\", root / \"dev\",\n # APIVFS generally means chrooting is going to happen so unset TMPDIR just to be safe.\n \"--unsetenv\", \"TMPDIR\",\n # Make sure /etc/machine-id is not overwritten by any package manager post install scripts.\n \"--ro-bind-try\", root / \"etc/machine-id\", root / \"etc/machine-id\",\n *finalize_passwd_mounts(root),\n \"sh\", \"-c\",\n f\"chmod 1777 {root / 'tmp'} {root / 'var/tmp'} {root / 'dev/shm'} && \"\n f\"chmod 755 {root / 'run'} && \"\n # Make sure anything running in the root directory thinks it's in a container. $container can't always be\n # accessed so we write /run/host/container-manager as well which is always accessible.\n f\"mkdir -m 755 {root}/run/host && echo mkosi >{root}/run/host/container-manager && \"\n \"exec $0 \\\"$@\\\"\",\n ]\n\n\ndef chroot_cmd(root: Path, *, resolve: bool = False, options: Sequence[PathString] = ()) -> list[PathString]:\n cmdline: list[PathString] = [\n \"sh\", \"-c\",\n f\"trap 'rm -rf {root / 'work'}' EXIT && \"\n # /etc/resolv.conf can be a dangling symlink to /run/systemd/resolve/stub-resolv.conf. Bubblewrap tries to call\n # mkdir() on each component of the path which means it will try to call\n # mkdir(/run/systemd/resolve/stub-resolv.conf) which will fail unless /run/systemd/resolve exists already so\n # we make sure that it already exists.\n f\"mkdir -p -m 755 {root / 'work'} {root / 'run/systemd'} {root / 'run/systemd/resolve'} && \"\n # No exec here because we need to clean up the /work directory afterwards.\n f\"$0 \\\"$@\\\"\",\n \"bwrap\",\n \"--dev-bind\", root, \"/\",\n \"--setenv\", \"container\", \"mkosi\",\n \"--setenv\", \"HOME\", \"/\",\n \"--setenv\", \"PATH\", \"/work/scripts:/usr/bin:/usr/sbin\",\n ]\n\n if resolve:\n cmdline += [\"--ro-bind-try\", \"/etc/resolv.conf\", \"/etc/resolv.conf\"]\n\n cmdline += options\n\n return apivfs_cmd(root) + cmdline\n", "path": "mkosi/sandbox.py"}]} | 3,363 | 156 |
gh_patches_debug_35400 | rasdani/github-patches | git_diff | opsdroid__opsdroid-167 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Module specific site-packages
It could be good for modules to install their dependancies in a specific `site-packages` directory which is only added to the path when it is time to import the modules.
A good place could be `~/.opsdroid/site-packages`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/loader.py`
Content:
```
1 """Class for loading in modules to OpsDroid."""
2
3 import logging
4 import os
5 import sys
6 import shutil
7 import subprocess
8 import importlib
9 import yaml
10 from opsdroid.const import (
11 DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,
12 DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE)
13
14
15 _LOGGER = logging.getLogger(__name__)
16
17
18 class Loader:
19 """Class to load in config and modules."""
20
21 def __init__(self, opsdroid):
22 """Create object with opsdroid instance."""
23 self.opsdroid = opsdroid
24 self.modules_directory = None
25 self.current_import_config = None
26 _LOGGER.debug("Loaded loader")
27
28 @staticmethod
29 def import_module(config):
30 """Import module namespace as variable and return it."""
31 try:
32 module = importlib.import_module(
33 config["module_path"] + "." + config["name"])
34 _LOGGER.debug("Loaded " + config["type"] + ": " +
35 config["module_path"])
36 return module
37 except ImportError as error:
38 _LOGGER.debug("Failed to load " + config["type"] +
39 " " + config["module_path"] + "." + config["name"])
40 _LOGGER.debug(error)
41
42 try:
43 module = importlib.import_module(
44 config["module_path"])
45 _LOGGER.debug("Loaded " + config["type"] + ": " +
46 config["module_path"])
47 return module
48 except ImportError as error:
49 _LOGGER.debug("Failed to load " + config["type"] +
50 " " + config["module_path"])
51 _LOGGER.debug(error)
52
53 _LOGGER.error("Failed to load " + config["type"] +
54 " " + config["module_path"])
55 return None
56
57 @staticmethod
58 def check_cache(config):
59 """Remove module if 'no-cache' set in config."""
60 if "no-cache" in config \
61 and config["no-cache"]:
62 _LOGGER.debug("'no-cache' set, removing " + config["install_path"])
63 if os.path.isdir(config["install_path"]):
64 shutil.rmtree(config["install_path"])
65 if os.path.isfile(config["install_path"] + ".py"):
66 os.remove(config["install_path"] + ".py")
67
68 def build_module_path(self, path_type, config):
69 """Generate the module path from name and type."""
70 if path_type == "import":
71 return MODULES_DIRECTORY + "." + config["type"] + \
72 "." + config["name"]
73 elif path_type == "install":
74 return self.modules_directory + "/" + config["type"] + \
75 "/" + config["name"]
76
77 @staticmethod
78 def git_clone(git_url, install_path, branch):
79 """Clone a git repo to a location and wait for finish."""
80 process = subprocess.Popen(["git", "clone", "-b", branch,
81 git_url, install_path], shell=False,
82 stdout=subprocess.PIPE,
83 stderr=subprocess.PIPE)
84 for output in process.communicate():
85 if output != "":
86 for line in output.splitlines():
87 _LOGGER.debug(str(line).strip())
88 process.wait()
89
90 @staticmethod
91 def pip_install_deps(requirements_path):
92 """Pip install a requirements.txt file and wait for finish."""
93 process = subprocess.Popen(["pip", "install", "-r", requirements_path],
94 shell=False,
95 stdout=subprocess.PIPE,
96 stderr=subprocess.PIPE)
97 for output in process.communicate():
98 if output != "":
99 for line in output.splitlines():
100 _LOGGER.debug(str(line).strip())
101 process.wait()
102
103 @staticmethod
104 def create_default_config(config_path):
105 """Create a default config file based on the included example."""
106 _LOGGER.info("Creating %s.", config_path)
107 config_dir, _ = os.path.split(config_path)
108 if not os.path.isdir(config_dir):
109 os.makedirs(config_dir)
110 shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)
111 return config_path
112
113 def load_config_file(self, config_paths):
114 """Load a yaml config file from path."""
115 config_path = ""
116 for possible_path in config_paths:
117 if not os.path.isfile(possible_path):
118 _LOGGER.debug("Config file " + possible_path +
119 " not found")
120 else:
121 config_path = possible_path
122 break
123
124 if not config_path:
125 _LOGGER.info("No configuration files found.")
126 config_path = self.create_default_config(DEFAULT_CONFIG_PATH)
127
128 try:
129 with open(config_path, 'r') as stream:
130 _LOGGER.info("Loaded config from %s", config_path)
131 return yaml.load(stream)
132 except yaml.YAMLError as error:
133 self.opsdroid.critical(error, 1)
134 except FileNotFoundError as error:
135 self.opsdroid.critical(str(error), 1)
136
137 def setup_modules_directory(self, config):
138 """Create and configure the modules directory."""
139 module_path = os.path.expanduser(
140 config.get("module-path", DEFAULT_MODULES_PATH))
141 sys.path.append(module_path)
142
143 if not os.path.isdir(module_path):
144 os.makedirs(module_path, exist_ok=True)
145
146 self.modules_directory = os.path.join(module_path, MODULES_DIRECTORY)
147
148 # Create modules directory if doesn't exist
149 if not os.path.isdir(self.modules_directory):
150 os.makedirs(self.modules_directory)
151
152 def load_modules_from_config(self, config):
153 """Load all module types based on config."""
154 _LOGGER.debug("Loading modules from config")
155
156 self.setup_modules_directory(config)
157
158 connectors, databases, skills = None, None, None
159
160 if 'databases' in config.keys():
161 databases = self._load_modules('database', config['databases'])
162 else:
163 _LOGGER.warning("No databases in configuration")
164
165 if 'skills' in config.keys():
166 skills = self._load_modules('skill', config['skills'])
167 else:
168 self.opsdroid.critical(
169 "No skills in configuration, at least 1 required", 1)
170
171 if 'connectors' in config.keys():
172 connectors = self._load_modules('connector', config['connectors'])
173 else:
174 self.opsdroid.critical(
175 "No connectors in configuration, at least 1 required", 1)
176
177 return connectors, databases, skills
178
179 def _load_modules(self, modules_type, modules):
180 """Install and load modules."""
181 _LOGGER.debug("Loading " + modules_type + " modules")
182 loaded_modules = []
183
184 for module in modules:
185
186 # Set up module config
187 config = module
188 config = {} if config is None else config
189 config["name"] = module["name"]
190 config["type"] = modules_type
191 config["module_path"] = self.build_module_path("import", config)
192 config["install_path"] = self.build_module_path("install", config)
193 if "branch" not in config:
194 config["branch"] = DEFAULT_MODULE_BRANCH
195
196 # Remove module for reinstall if no-cache set
197 self.check_cache(config)
198
199 # Install module
200 self._install_module(config)
201
202 # Import module
203 self.current_import_config = config
204 module = self.import_module(config)
205 if module is not None:
206 loaded_modules.append({
207 "module": module,
208 "config": config})
209 else:
210 _LOGGER.error(
211 "Module " + config["name"] + " failed to import")
212
213 return loaded_modules
214
215 def _install_module(self, config):
216 # pylint: disable=R0201
217 """Install a module."""
218 _LOGGER.debug("Installing " + config["name"])
219
220 if os.path.isdir(config["install_path"]) or \
221 os.path.isfile(config["install_path"] + ".py"):
222 # TODO Allow for updating or reinstalling of modules
223 _LOGGER.debug("Module " + config["name"] +
224 " already installed, skipping")
225 return
226
227 if "path" in config:
228 self._install_local_module(config)
229 else:
230 self._install_git_module(config)
231
232 if os.path.isdir(config["install_path"]):
233 _LOGGER.debug("Installed " + config["name"] +
234 " to " + config["install_path"])
235 else:
236 _LOGGER.debug("Install of " + config["name"] + " failed")
237
238 # Install module dependancies
239 if os.path.isfile(config["install_path"] + "/requirements.txt"):
240 self.pip_install_deps(config["install_path"] +
241 "/requirements.txt")
242
243 def _install_git_module(self, config):
244 """Install a module from a git repository."""
245 if config is not None and "repo" in config:
246 git_url = config["repo"]
247 else:
248 git_url = DEFAULT_GIT_URL + config["type"] + \
249 "-" + config["name"] + ".git"
250
251 if any(prefix in git_url for prefix in ["http", "https", "ssh"]):
252 # TODO Test if url or ssh path exists
253 # TODO Handle github authentication
254 _LOGGER.debug("Cloning from remote repository")
255 self.git_clone(git_url, config["install_path"],
256 config["branch"])
257 else:
258 if os.path.isdir(git_url):
259 _LOGGER.debug("Cloning from local repository")
260 self.git_clone(git_url, config["install_path"],
261 config["branch"])
262 else:
263 _LOGGER.debug("Could not find local git repo " + git_url)
264
265 @staticmethod
266 def _install_local_module(config):
267 """Install a module from a local path."""
268 installed = False
269
270 installdir, _ = os.path.split(config["install_path"])
271 if not os.path.isdir(installdir):
272 os.makedirs(installdir, exist_ok=True)
273
274 if os.path.isdir(config["path"]):
275 shutil.copytree(config["path"], config["install_path"])
276 installed = True
277
278 if os.path.isfile(config["path"]):
279 os.makedirs(config["install_path"], exist_ok=True)
280 shutil.copyfile(config["path"], config["install_path"] +
281 "/__init__.py")
282 installed = True
283
284 if not installed:
285 _LOGGER.error("Failed to install from " + config["path"])
286
```
Path: `opsdroid/const.py`
Content:
```
1 """Constants used by OpsDroid."""
2 import os
3
4 __version__ = "0.8.0"
5
6 LOG_FILENAME = 'output.log'
7 DEFAULT_GIT_URL = "https://github.com/opsdroid/"
8 MODULES_DIRECTORY = "opsdroid-modules"
9 DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser("~"), ".opsdroid")
10 DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, "modules")
11 DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml")
12 DEFAULT_MODULE_BRANCH = "master"
13 EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
14 "configuration/example_configuration.yaml")
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/const.py b/opsdroid/const.py
--- a/opsdroid/const.py
+++ b/opsdroid/const.py
@@ -8,6 +8,7 @@
MODULES_DIRECTORY = "opsdroid-modules"
DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser("~"), ".opsdroid")
DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, "modules")
+DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, "site-packages")
DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml")
DEFAULT_MODULE_BRANCH = "master"
EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
diff --git a/opsdroid/loader.py b/opsdroid/loader.py
--- a/opsdroid/loader.py
+++ b/opsdroid/loader.py
@@ -9,7 +9,8 @@
import yaml
from opsdroid.const import (
DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,
- DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE)
+ DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE,
+ DEFAULT_MODULE_DEPS_PATH)
_LOGGER = logging.getLogger(__name__)
@@ -90,7 +91,11 @@
@staticmethod
def pip_install_deps(requirements_path):
"""Pip install a requirements.txt file and wait for finish."""
- process = subprocess.Popen(["pip", "install", "-r", requirements_path],
+ process = subprocess.Popen(["pip", "install",
+ "--target={}".format(
+ DEFAULT_MODULE_DEPS_PATH),
+ "--ignore-installed",
+ "-r", requirements_path],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@@ -181,6 +186,10 @@
_LOGGER.debug("Loading " + modules_type + " modules")
loaded_modules = []
+ if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH):
+ os.makedirs(DEFAULT_MODULE_DEPS_PATH)
+ sys.path.append(DEFAULT_MODULE_DEPS_PATH)
+
for module in modules:
# Set up module config
| {"golden_diff": "diff --git a/opsdroid/const.py b/opsdroid/const.py\n--- a/opsdroid/const.py\n+++ b/opsdroid/const.py\n@@ -8,6 +8,7 @@\n MODULES_DIRECTORY = \"opsdroid-modules\"\n DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\n DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\n+DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\n DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\n DEFAULT_MODULE_BRANCH = \"master\"\n EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\ndiff --git a/opsdroid/loader.py b/opsdroid/loader.py\n--- a/opsdroid/loader.py\n+++ b/opsdroid/loader.py\n@@ -9,7 +9,8 @@\n import yaml\n from opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\n- DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE)\n+ DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE,\n+ DEFAULT_MODULE_DEPS_PATH)\n \n \n _LOGGER = logging.getLogger(__name__)\n@@ -90,7 +91,11 @@\n @staticmethod\n def pip_install_deps(requirements_path):\n \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n- process = subprocess.Popen([\"pip\", \"install\", \"-r\", requirements_path],\n+ process = subprocess.Popen([\"pip\", \"install\",\n+ \"--target={}\".format(\n+ DEFAULT_MODULE_DEPS_PATH),\n+ \"--ignore-installed\",\n+ \"-r\", requirements_path],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n@@ -181,6 +186,10 @@\n _LOGGER.debug(\"Loading \" + modules_type + \" modules\")\n loaded_modules = []\n \n+ if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH):\n+ os.makedirs(DEFAULT_MODULE_DEPS_PATH)\n+ sys.path.append(DEFAULT_MODULE_DEPS_PATH)\n+\n for module in modules:\n \n # Set up module config\n", "issue": "Module specific site-packages\nIt could be good for modules to install their dependancies in a specific `site-packages` directory which is only added to the path when it is time to import the modules.\r\n\r\nA good place could be `~/.opsdroid/site-packages`.\n", "before_files": [{"content": "\"\"\"Class for loading in modules to OpsDroid.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport shutil\nimport subprocess\nimport importlib\nimport yaml\nfrom opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\n DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE)\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Loader:\n \"\"\"Class to load in config and modules.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Create object with opsdroid instance.\"\"\"\n self.opsdroid = opsdroid\n self.modules_directory = None\n self.current_import_config = None\n _LOGGER.debug(\"Loaded loader\")\n\n @staticmethod\n def import_module(config):\n \"\"\"Import module namespace as variable and return it.\"\"\"\n try:\n module = importlib.import_module(\n config[\"module_path\"] + \".\" + config[\"name\"])\n _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n config[\"module_path\"])\n return module\n except ImportError as error:\n _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"] + \".\" + config[\"name\"])\n _LOGGER.debug(error)\n\n try:\n module = importlib.import_module(\n config[\"module_path\"])\n _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n config[\"module_path\"])\n return module\n except ImportError as error:\n _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"])\n _LOGGER.debug(error)\n\n _LOGGER.error(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"])\n return None\n\n @staticmethod\n def check_cache(config):\n \"\"\"Remove module if 'no-cache' set in config.\"\"\"\n if \"no-cache\" in config \\\n and config[\"no-cache\"]:\n _LOGGER.debug(\"'no-cache' set, removing \" + config[\"install_path\"])\n if os.path.isdir(config[\"install_path\"]):\n shutil.rmtree(config[\"install_path\"])\n if os.path.isfile(config[\"install_path\"] + \".py\"):\n os.remove(config[\"install_path\"] + \".py\")\n\n def build_module_path(self, path_type, config):\n \"\"\"Generate the module path from name and type.\"\"\"\n if path_type == \"import\":\n return MODULES_DIRECTORY + \".\" + config[\"type\"] + \\\n \".\" + config[\"name\"]\n elif path_type == \"install\":\n return self.modules_directory + \"/\" + config[\"type\"] + \\\n \"/\" + config[\"name\"]\n\n @staticmethod\n def git_clone(git_url, install_path, branch):\n \"\"\"Clone a git repo to a location and wait for finish.\"\"\"\n process = subprocess.Popen([\"git\", \"clone\", \"-b\", branch,\n git_url, install_path], shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n _LOGGER.debug(str(line).strip())\n process.wait()\n\n @staticmethod\n def pip_install_deps(requirements_path):\n \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n process = subprocess.Popen([\"pip\", \"install\", \"-r\", requirements_path],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n _LOGGER.debug(str(line).strip())\n process.wait()\n\n @staticmethod\n def create_default_config(config_path):\n \"\"\"Create a default config file based on the included example.\"\"\"\n _LOGGER.info(\"Creating %s.\", config_path)\n config_dir, _ = os.path.split(config_path)\n if not os.path.isdir(config_dir):\n os.makedirs(config_dir)\n shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)\n return config_path\n\n def load_config_file(self, config_paths):\n \"\"\"Load a yaml config file from path.\"\"\"\n config_path = \"\"\n for possible_path in config_paths:\n if not os.path.isfile(possible_path):\n _LOGGER.debug(\"Config file \" + possible_path +\n \" not found\")\n else:\n config_path = possible_path\n break\n\n if not config_path:\n _LOGGER.info(\"No configuration files found.\")\n config_path = self.create_default_config(DEFAULT_CONFIG_PATH)\n\n try:\n with open(config_path, 'r') as stream:\n _LOGGER.info(\"Loaded config from %s\", config_path)\n return yaml.load(stream)\n except yaml.YAMLError as error:\n self.opsdroid.critical(error, 1)\n except FileNotFoundError as error:\n self.opsdroid.critical(str(error), 1)\n\n def setup_modules_directory(self, config):\n \"\"\"Create and configure the modules directory.\"\"\"\n module_path = os.path.expanduser(\n config.get(\"module-path\", DEFAULT_MODULES_PATH))\n sys.path.append(module_path)\n\n if not os.path.isdir(module_path):\n os.makedirs(module_path, exist_ok=True)\n\n self.modules_directory = os.path.join(module_path, MODULES_DIRECTORY)\n\n # Create modules directory if doesn't exist\n if not os.path.isdir(self.modules_directory):\n os.makedirs(self.modules_directory)\n\n def load_modules_from_config(self, config):\n \"\"\"Load all module types based on config.\"\"\"\n _LOGGER.debug(\"Loading modules from config\")\n\n self.setup_modules_directory(config)\n\n connectors, databases, skills = None, None, None\n\n if 'databases' in config.keys():\n databases = self._load_modules('database', config['databases'])\n else:\n _LOGGER.warning(\"No databases in configuration\")\n\n if 'skills' in config.keys():\n skills = self._load_modules('skill', config['skills'])\n else:\n self.opsdroid.critical(\n \"No skills in configuration, at least 1 required\", 1)\n\n if 'connectors' in config.keys():\n connectors = self._load_modules('connector', config['connectors'])\n else:\n self.opsdroid.critical(\n \"No connectors in configuration, at least 1 required\", 1)\n\n return connectors, databases, skills\n\n def _load_modules(self, modules_type, modules):\n \"\"\"Install and load modules.\"\"\"\n _LOGGER.debug(\"Loading \" + modules_type + \" modules\")\n loaded_modules = []\n\n for module in modules:\n\n # Set up module config\n config = module\n config = {} if config is None else config\n config[\"name\"] = module[\"name\"]\n config[\"type\"] = modules_type\n config[\"module_path\"] = self.build_module_path(\"import\", config)\n config[\"install_path\"] = self.build_module_path(\"install\", config)\n if \"branch\" not in config:\n config[\"branch\"] = DEFAULT_MODULE_BRANCH\n\n # Remove module for reinstall if no-cache set\n self.check_cache(config)\n\n # Install module\n self._install_module(config)\n\n # Import module\n self.current_import_config = config\n module = self.import_module(config)\n if module is not None:\n loaded_modules.append({\n \"module\": module,\n \"config\": config})\n else:\n _LOGGER.error(\n \"Module \" + config[\"name\"] + \" failed to import\")\n\n return loaded_modules\n\n def _install_module(self, config):\n # pylint: disable=R0201\n \"\"\"Install a module.\"\"\"\n _LOGGER.debug(\"Installing \" + config[\"name\"])\n\n if os.path.isdir(config[\"install_path\"]) or \\\n os.path.isfile(config[\"install_path\"] + \".py\"):\n # TODO Allow for updating or reinstalling of modules\n _LOGGER.debug(\"Module \" + config[\"name\"] +\n \" already installed, skipping\")\n return\n\n if \"path\" in config:\n self._install_local_module(config)\n else:\n self._install_git_module(config)\n\n if os.path.isdir(config[\"install_path\"]):\n _LOGGER.debug(\"Installed \" + config[\"name\"] +\n \" to \" + config[\"install_path\"])\n else:\n _LOGGER.debug(\"Install of \" + config[\"name\"] + \" failed\")\n\n # Install module dependancies\n if os.path.isfile(config[\"install_path\"] + \"/requirements.txt\"):\n self.pip_install_deps(config[\"install_path\"] +\n \"/requirements.txt\")\n\n def _install_git_module(self, config):\n \"\"\"Install a module from a git repository.\"\"\"\n if config is not None and \"repo\" in config:\n git_url = config[\"repo\"]\n else:\n git_url = DEFAULT_GIT_URL + config[\"type\"] + \\\n \"-\" + config[\"name\"] + \".git\"\n\n if any(prefix in git_url for prefix in [\"http\", \"https\", \"ssh\"]):\n # TODO Test if url or ssh path exists\n # TODO Handle github authentication\n _LOGGER.debug(\"Cloning from remote repository\")\n self.git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n if os.path.isdir(git_url):\n _LOGGER.debug(\"Cloning from local repository\")\n self.git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n _LOGGER.debug(\"Could not find local git repo \" + git_url)\n\n @staticmethod\n def _install_local_module(config):\n \"\"\"Install a module from a local path.\"\"\"\n installed = False\n\n installdir, _ = os.path.split(config[\"install_path\"])\n if not os.path.isdir(installdir):\n os.makedirs(installdir, exist_ok=True)\n\n if os.path.isdir(config[\"path\"]):\n shutil.copytree(config[\"path\"], config[\"install_path\"])\n installed = True\n\n if os.path.isfile(config[\"path\"]):\n os.makedirs(config[\"install_path\"], exist_ok=True)\n shutil.copyfile(config[\"path\"], config[\"install_path\"] +\n \"/__init__.py\")\n installed = True\n\n if not installed:\n _LOGGER.error(\"Failed to install from \" + config[\"path\"])\n", "path": "opsdroid/loader.py"}, {"content": "\"\"\"Constants used by OpsDroid.\"\"\"\nimport os\n\n__version__ = \"0.8.0\"\n\nLOG_FILENAME = 'output.log'\nDEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\nMODULES_DIRECTORY = \"opsdroid-modules\"\nDEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\nDEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\nDEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\nDEFAULT_MODULE_BRANCH = \"master\"\nEXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\")\n", "path": "opsdroid/const.py"}], "after_files": [{"content": "\"\"\"Class for loading in modules to OpsDroid.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport shutil\nimport subprocess\nimport importlib\nimport yaml\nfrom opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\n DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE,\n DEFAULT_MODULE_DEPS_PATH)\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Loader:\n \"\"\"Class to load in config and modules.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Create object with opsdroid instance.\"\"\"\n self.opsdroid = opsdroid\n self.modules_directory = None\n self.current_import_config = None\n _LOGGER.debug(\"Loaded loader\")\n\n @staticmethod\n def import_module(config):\n \"\"\"Import module namespace as variable and return it.\"\"\"\n try:\n module = importlib.import_module(\n config[\"module_path\"] + \".\" + config[\"name\"])\n _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n config[\"module_path\"])\n return module\n except ImportError as error:\n _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"] + \".\" + config[\"name\"])\n _LOGGER.debug(error)\n\n try:\n module = importlib.import_module(\n config[\"module_path\"])\n _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n config[\"module_path\"])\n return module\n except ImportError as error:\n _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"])\n _LOGGER.debug(error)\n\n _LOGGER.error(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"])\n return None\n\n @staticmethod\n def check_cache(config):\n \"\"\"Remove module if 'no-cache' set in config.\"\"\"\n if \"no-cache\" in config \\\n and config[\"no-cache\"]:\n _LOGGER.debug(\"'no-cache' set, removing \" + config[\"install_path\"])\n if os.path.isdir(config[\"install_path\"]):\n shutil.rmtree(config[\"install_path\"])\n if os.path.isfile(config[\"install_path\"] + \".py\"):\n os.remove(config[\"install_path\"] + \".py\")\n\n def build_module_path(self, path_type, config):\n \"\"\"Generate the module path from name and type.\"\"\"\n if path_type == \"import\":\n return MODULES_DIRECTORY + \".\" + config[\"type\"] + \\\n \".\" + config[\"name\"]\n elif path_type == \"install\":\n return self.modules_directory + \"/\" + config[\"type\"] + \\\n \"/\" + config[\"name\"]\n\n @staticmethod\n def git_clone(git_url, install_path, branch):\n \"\"\"Clone a git repo to a location and wait for finish.\"\"\"\n process = subprocess.Popen([\"git\", \"clone\", \"-b\", branch,\n git_url, install_path], shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n _LOGGER.debug(str(line).strip())\n process.wait()\n\n @staticmethod\n def pip_install_deps(requirements_path):\n \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n process = subprocess.Popen([\"pip\", \"install\",\n \"--target={}\".format(\n DEFAULT_MODULE_DEPS_PATH),\n \"--ignore-installed\",\n \"-r\", requirements_path],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n _LOGGER.debug(str(line).strip())\n process.wait()\n\n @staticmethod\n def create_default_config(config_path):\n \"\"\"Create a default config file based on the included example.\"\"\"\n _LOGGER.info(\"Creating %s.\", config_path)\n config_dir, _ = os.path.split(config_path)\n if not os.path.isdir(config_dir):\n os.makedirs(config_dir)\n shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)\n return config_path\n\n def load_config_file(self, config_paths):\n \"\"\"Load a yaml config file from path.\"\"\"\n config_path = \"\"\n for possible_path in config_paths:\n if not os.path.isfile(possible_path):\n _LOGGER.debug(\"Config file \" + possible_path +\n \" not found\")\n else:\n config_path = possible_path\n break\n\n if not config_path:\n _LOGGER.info(\"No configuration files found.\")\n config_path = self.create_default_config(DEFAULT_CONFIG_PATH)\n\n try:\n with open(config_path, 'r') as stream:\n _LOGGER.info(\"Loaded config from %s\", config_path)\n return yaml.load(stream)\n except yaml.YAMLError as error:\n self.opsdroid.critical(error, 1)\n except FileNotFoundError as error:\n self.opsdroid.critical(str(error), 1)\n\n def setup_modules_directory(self, config):\n \"\"\"Create and configure the modules directory.\"\"\"\n module_path = os.path.expanduser(\n config.get(\"module-path\", DEFAULT_MODULES_PATH))\n sys.path.append(module_path)\n\n if not os.path.isdir(module_path):\n os.makedirs(module_path, exist_ok=True)\n\n self.modules_directory = os.path.join(module_path, MODULES_DIRECTORY)\n\n # Create modules directory if doesn't exist\n if not os.path.isdir(self.modules_directory):\n os.makedirs(self.modules_directory)\n\n def load_modules_from_config(self, config):\n \"\"\"Load all module types based on config.\"\"\"\n _LOGGER.debug(\"Loading modules from config\")\n\n self.setup_modules_directory(config)\n\n connectors, databases, skills = None, None, None\n\n if 'databases' in config.keys():\n databases = self._load_modules('database', config['databases'])\n else:\n _LOGGER.warning(\"No databases in configuration\")\n\n if 'skills' in config.keys():\n skills = self._load_modules('skill', config['skills'])\n else:\n self.opsdroid.critical(\n \"No skills in configuration, at least 1 required\", 1)\n\n if 'connectors' in config.keys():\n connectors = self._load_modules('connector', config['connectors'])\n else:\n self.opsdroid.critical(\n \"No connectors in configuration, at least 1 required\", 1)\n\n return connectors, databases, skills\n\n def _load_modules(self, modules_type, modules):\n \"\"\"Install and load modules.\"\"\"\n _LOGGER.debug(\"Loading \" + modules_type + \" modules\")\n loaded_modules = []\n\n if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH):\n os.makedirs(DEFAULT_MODULE_DEPS_PATH)\n sys.path.append(DEFAULT_MODULE_DEPS_PATH)\n\n for module in modules:\n\n # Set up module config\n config = module\n config = {} if config is None else config\n config[\"name\"] = module[\"name\"]\n config[\"type\"] = modules_type\n config[\"module_path\"] = self.build_module_path(\"import\", config)\n config[\"install_path\"] = self.build_module_path(\"install\", config)\n if \"branch\" not in config:\n config[\"branch\"] = DEFAULT_MODULE_BRANCH\n\n # Remove module for reinstall if no-cache set\n self.check_cache(config)\n\n # Install module\n self._install_module(config)\n\n # Import module\n self.current_import_config = config\n module = self.import_module(config)\n if module is not None:\n loaded_modules.append({\n \"module\": module,\n \"config\": config})\n else:\n _LOGGER.error(\n \"Module \" + config[\"name\"] + \" failed to import\")\n\n return loaded_modules\n\n def _install_module(self, config):\n # pylint: disable=R0201\n \"\"\"Install a module.\"\"\"\n _LOGGER.debug(\"Installing \" + config[\"name\"])\n\n if os.path.isdir(config[\"install_path\"]) or \\\n os.path.isfile(config[\"install_path\"] + \".py\"):\n # TODO Allow for updating or reinstalling of modules\n _LOGGER.debug(\"Module \" + config[\"name\"] +\n \" already installed, skipping\")\n return\n\n if \"path\" in config:\n self._install_local_module(config)\n else:\n self._install_git_module(config)\n\n if os.path.isdir(config[\"install_path\"]):\n _LOGGER.debug(\"Installed \" + config[\"name\"] +\n \" to \" + config[\"install_path\"])\n else:\n _LOGGER.debug(\"Install of \" + config[\"name\"] + \" failed\")\n\n # Install module dependancies\n if os.path.isfile(config[\"install_path\"] + \"/requirements.txt\"):\n self.pip_install_deps(config[\"install_path\"] +\n \"/requirements.txt\")\n\n def _install_git_module(self, config):\n \"\"\"Install a module from a git repository.\"\"\"\n if config is not None and \"repo\" in config:\n git_url = config[\"repo\"]\n else:\n git_url = DEFAULT_GIT_URL + config[\"type\"] + \\\n \"-\" + config[\"name\"] + \".git\"\n\n if any(prefix in git_url for prefix in [\"http\", \"https\", \"ssh\"]):\n # TODO Test if url or ssh path exists\n # TODO Handle github authentication\n _LOGGER.debug(\"Cloning from remote repository\")\n self.git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n if os.path.isdir(git_url):\n _LOGGER.debug(\"Cloning from local repository\")\n self.git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n _LOGGER.debug(\"Could not find local git repo \" + git_url)\n\n @staticmethod\n def _install_local_module(config):\n \"\"\"Install a module from a local path.\"\"\"\n installed = False\n\n installdir, _ = os.path.split(config[\"install_path\"])\n if not os.path.isdir(installdir):\n os.makedirs(installdir, exist_ok=True)\n\n if os.path.isdir(config[\"path\"]):\n shutil.copytree(config[\"path\"], config[\"install_path\"])\n installed = True\n\n if os.path.isfile(config[\"path\"]):\n os.makedirs(config[\"install_path\"], exist_ok=True)\n shutil.copyfile(config[\"path\"], config[\"install_path\"] +\n \"/__init__.py\")\n installed = True\n\n if not installed:\n _LOGGER.error(\"Failed to install from \" + config[\"path\"])\n", "path": "opsdroid/loader.py"}, {"content": "\"\"\"Constants used by OpsDroid.\"\"\"\nimport os\n\n__version__ = \"0.8.0\"\n\nLOG_FILENAME = 'output.log'\nDEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\nMODULES_DIRECTORY = \"opsdroid-modules\"\nDEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\nDEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\nDEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\nDEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\nDEFAULT_MODULE_BRANCH = \"master\"\nEXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\")\n", "path": "opsdroid/const.py"}]} | 3,397 | 463 |
gh_patches_debug_38680 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-528 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]Ikea FYRTUR blind and remote reporting wrong battery or no battery
First
Blind:
Never updates the battery automatically.
However if you do a zigbee get cluster attribute for battery percentage remaining it does return a correct value.
Remote:
does update battery on its own.
Both:
both values are displayed correctly in the get attribute box but the sensor for the batteries figures are always half what it shows.


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zhaquirks/ikea/blinds.py`
Content:
```
1 """Device handler for IKEA of Sweden TRADFRI Fyrtur blinds."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomDevice
4 from zigpy.zcl.clusters.closures import WindowCovering
5 from zigpy.zcl.clusters.general import (
6 Basic,
7 Groups,
8 Identify,
9 Ota,
10 PollControl,
11 PowerConfiguration,
12 Scenes,
13 )
14 from zigpy.zcl.clusters.lightlink import LightLink
15
16 from . import IKEA
17 from .. import DoublingPowerConfigurationCluster
18 from ..const import (
19 DEVICE_TYPE,
20 ENDPOINTS,
21 INPUT_CLUSTERS,
22 MODELS_INFO,
23 OUTPUT_CLUSTERS,
24 PROFILE_ID,
25 )
26
27 IKEA_CLUSTER_ID = 0xFC7C # decimal = 64636
28
29
30 class IkeaTradfriRollerBlinds(CustomDevice):
31 """Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds."""
32
33 signature = {
34 # <SimpleDescriptor endpoint=1 profile=260 device_type=2080
35 # device_version=1
36 # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096]
37 # output_clusters=[25, 4096]>
38 MODELS_INFO: [
39 (IKEA, "FYRTUR block-out roller blind"),
40 (IKEA, "KADRILJ roller blind"),
41 ],
42 ENDPOINTS: {
43 1: {
44 PROFILE_ID: zha.PROFILE_ID,
45 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,
46 INPUT_CLUSTERS: [
47 Basic.cluster_id,
48 PowerConfiguration.cluster_id,
49 Identify.cluster_id,
50 Groups.cluster_id,
51 Scenes.cluster_id,
52 PollControl.cluster_id,
53 WindowCovering.cluster_id,
54 LightLink.cluster_id,
55 ],
56 OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],
57 }
58 },
59 }
60
61 replacement = {
62 "endpoints": {
63 1: {
64 "profile_id": zha.PROFILE_ID,
65 "device_type": zha.DeviceType.WINDOW_COVERING_DEVICE,
66 "input_clusters": [
67 Basic.cluster_id,
68 DoublingPowerConfigurationCluster,
69 Identify.cluster_id,
70 Groups.cluster_id,
71 Scenes.cluster_id,
72 PollControl.cluster_id,
73 WindowCovering.cluster_id,
74 LightLink.cluster_id,
75 ],
76 "output_clusters": [Ota.cluster_id, LightLink.cluster_id],
77 }
78 }
79 }
80
```
Path: `zhaquirks/ikea/opencloseremote.py`
Content:
```
1 """Device handler for IKEA of Sweden TRADFRI remote control."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomDevice
4 from zigpy.zcl.clusters.closures import WindowCovering
5 from zigpy.zcl.clusters.general import (
6 Alarms,
7 Basic,
8 Groups,
9 Identify,
10 LevelControl,
11 OnOff,
12 Ota,
13 PollControl,
14 PowerConfiguration,
15 )
16 from zigpy.zcl.clusters.lightlink import LightLink
17
18 from . import IKEA
19 from .. import DoublingPowerConfigurationCluster
20 from ..const import (
21 DEVICE_TYPE,
22 ENDPOINTS,
23 INPUT_CLUSTERS,
24 MODELS_INFO,
25 OUTPUT_CLUSTERS,
26 PROFILE_ID,
27 )
28
29 IKEA_CLUSTER_ID = 0xFC7C # decimal = 64636
30
31
32 class IkeaTradfriOpenCloseRemote(CustomDevice):
33 """Custom device representing IKEA of Sweden TRADFRI remote control."""
34
35 signature = {
36 MODELS_INFO: [("\x02KE", "TRADFRI open/close remote")],
37 ENDPOINTS: {
38 1: {
39 PROFILE_ID: zha.PROFILE_ID,
40 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,
41 INPUT_CLUSTERS: [
42 Basic.cluster_id,
43 PowerConfiguration.cluster_id,
44 Identify.cluster_id,
45 Alarms.cluster_id,
46 PollControl.cluster_id,
47 LightLink.cluster_id,
48 IKEA_CLUSTER_ID,
49 ],
50 OUTPUT_CLUSTERS: [
51 Identify.cluster_id,
52 Groups.cluster_id,
53 OnOff.cluster_id,
54 LevelControl.cluster_id,
55 Ota.cluster_id,
56 WindowCovering.cluster_id,
57 LightLink.cluster_id,
58 ],
59 }
60 },
61 }
62
63 replacement = {
64 MODELS_INFO: [(IKEA, "TRADFRI open/close remote")],
65 ENDPOINTS: {
66 1: {
67 PROFILE_ID: zha.PROFILE_ID,
68 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,
69 INPUT_CLUSTERS: [
70 Basic.cluster_id,
71 DoublingPowerConfigurationCluster,
72 Identify.cluster_id,
73 Alarms.cluster_id,
74 PollControl.cluster_id,
75 LightLink.cluster_id,
76 IKEA_CLUSTER_ID,
77 ],
78 OUTPUT_CLUSTERS: [
79 Identify.cluster_id,
80 Groups.cluster_id,
81 OnOff.cluster_id,
82 LevelControl.cluster_id,
83 Ota.cluster_id,
84 WindowCovering.cluster_id,
85 LightLink.cluster_id,
86 ],
87 }
88 },
89 }
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zhaquirks/ikea/blinds.py b/zhaquirks/ikea/blinds.py
--- a/zhaquirks/ikea/blinds.py
+++ b/zhaquirks/ikea/blinds.py
@@ -31,9 +31,9 @@
"""Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds."""
signature = {
- # <SimpleDescriptor endpoint=1 profile=260 device_type=2080
+ # <SimpleDescriptor endpoint=1 profile=260 device_type=514
# device_version=1
- # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096]
+ # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096, 64636]
# output_clusters=[25, 4096]>
MODELS_INFO: [
(IKEA, "FYRTUR block-out roller blind"),
@@ -52,6 +52,7 @@
PollControl.cluster_id,
WindowCovering.cluster_id,
LightLink.cluster_id,
+ IKEA_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],
}
@@ -59,11 +60,11 @@
}
replacement = {
- "endpoints": {
+ ENDPOINTS: {
1: {
- "profile_id": zha.PROFILE_ID,
- "device_type": zha.DeviceType.WINDOW_COVERING_DEVICE,
- "input_clusters": [
+ PROFILE_ID: zha.PROFILE_ID,
+ DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,
+ INPUT_CLUSTERS: [
Basic.cluster_id,
DoublingPowerConfigurationCluster,
Identify.cluster_id,
@@ -72,8 +73,9 @@
PollControl.cluster_id,
WindowCovering.cluster_id,
LightLink.cluster_id,
+ IKEA_CLUSTER_ID,
],
- "output_clusters": [Ota.cluster_id, LightLink.cluster_id],
+ OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],
}
}
}
diff --git a/zhaquirks/ikea/opencloseremote.py b/zhaquirks/ikea/opencloseremote.py
--- a/zhaquirks/ikea/opencloseremote.py
+++ b/zhaquirks/ikea/opencloseremote.py
@@ -33,7 +33,14 @@
"""Custom device representing IKEA of Sweden TRADFRI remote control."""
signature = {
- MODELS_INFO: [("\x02KE", "TRADFRI open/close remote")],
+ # <SimpleDescriptor endpoint=1 profile=260 device_type=515
+ # device_version=1
+ # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]
+ # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>
+ MODELS_INFO: [
+ ("\x02KE", "TRADFRI open/close remote"),
+ (IKEA, "TRADFRI open/close remote"),
+ ],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
| {"golden_diff": "diff --git a/zhaquirks/ikea/blinds.py b/zhaquirks/ikea/blinds.py\n--- a/zhaquirks/ikea/blinds.py\n+++ b/zhaquirks/ikea/blinds.py\n@@ -31,9 +31,9 @@\n \"\"\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\n \n signature = {\n- # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n+ # <SimpleDescriptor endpoint=1 profile=260 device_type=514\n # device_version=1\n- # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096]\n+ # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096, 64636]\n # output_clusters=[25, 4096]>\n MODELS_INFO: [\n (IKEA, \"FYRTUR block-out roller blind\"),\n@@ -52,6 +52,7 @@\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n+ IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n@@ -59,11 +60,11 @@\n }\n \n replacement = {\n- \"endpoints\": {\n+ ENDPOINTS: {\n 1: {\n- \"profile_id\": zha.PROFILE_ID,\n- \"device_type\": zha.DeviceType.WINDOW_COVERING_DEVICE,\n- \"input_clusters\": [\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n+ INPUT_CLUSTERS: [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n@@ -72,8 +73,9 @@\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n+ IKEA_CLUSTER_ID,\n ],\n- \"output_clusters\": [Ota.cluster_id, LightLink.cluster_id],\n+ OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n }\n }\ndiff --git a/zhaquirks/ikea/opencloseremote.py b/zhaquirks/ikea/opencloseremote.py\n--- a/zhaquirks/ikea/opencloseremote.py\n+++ b/zhaquirks/ikea/opencloseremote.py\n@@ -33,7 +33,14 @@\n \"\"\"Custom device representing IKEA of Sweden TRADFRI remote control.\"\"\"\n \n signature = {\n- MODELS_INFO: [(\"\\x02KE\", \"TRADFRI open/close remote\")],\n+ # <SimpleDescriptor endpoint=1 profile=260 device_type=515\n+ # device_version=1\n+ # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]\n+ # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>\n+ MODELS_INFO: [\n+ (\"\\x02KE\", \"TRADFRI open/close remote\"),\n+ (IKEA, \"TRADFRI open/close remote\"),\n+ ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n", "issue": "[BUG]Ikea FYRTUR blind and remote reporting wrong battery or no battery\nFirst \r\n\r\nBlind:\r\n\r\nNever updates the battery automatically.\r\n\r\nHowever if you do a zigbee get cluster attribute for battery percentage remaining it does return a correct value.\r\n\r\nRemote:\r\n\r\ndoes update battery on its own.\r\n\r\nBoth:\r\n\r\nboth values are displayed correctly in the get attribute box but the sensor for the batteries figures are always half what it shows.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Basic,\n Groups,\n Identify,\n Ota,\n PollControl,\n PowerConfiguration,\n Scenes,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom . import IKEA\nfrom .. import DoublingPowerConfigurationCluster\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n\n\nclass IkeaTradfriRollerBlinds(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n # device_version=1\n # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096]\n # output_clusters=[25, 4096]>\n MODELS_INFO: [\n (IKEA, \"FYRTUR block-out roller blind\"),\n (IKEA, \"KADRILJ roller blind\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n },\n }\n\n replacement = {\n \"endpoints\": {\n 1: {\n \"profile_id\": zha.PROFILE_ID,\n \"device_type\": zha.DeviceType.WINDOW_COVERING_DEVICE,\n \"input_clusters\": [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n \"output_clusters\": [Ota.cluster_id, LightLink.cluster_id],\n }\n }\n }\n", "path": "zhaquirks/ikea/blinds.py"}, {"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI remote control.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Alarms,\n Basic,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PollControl,\n PowerConfiguration,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom . import IKEA\nfrom .. import DoublingPowerConfigurationCluster\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n\n\nclass IkeaTradfriOpenCloseRemote(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI remote control.\"\"\"\n\n signature = {\n MODELS_INFO: [(\"\\x02KE\", \"TRADFRI open/close remote\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n MODELS_INFO: [(IKEA, \"TRADFRI open/close remote\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n", "path": "zhaquirks/ikea/opencloseremote.py"}], "after_files": [{"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Basic,\n Groups,\n Identify,\n Ota,\n PollControl,\n PowerConfiguration,\n Scenes,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom . import IKEA\nfrom .. import DoublingPowerConfigurationCluster\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n\n\nclass IkeaTradfriRollerBlinds(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=514\n # device_version=1\n # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096, 64636]\n # output_clusters=[25, 4096]>\n MODELS_INFO: [\n (IKEA, \"FYRTUR block-out roller blind\"),\n (IKEA, \"KADRILJ roller blind\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n }\n }\n", "path": "zhaquirks/ikea/blinds.py"}, {"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI remote control.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Alarms,\n Basic,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PollControl,\n PowerConfiguration,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom . import IKEA\nfrom .. import DoublingPowerConfigurationCluster\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n\n\nclass IkeaTradfriOpenCloseRemote(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI remote control.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=515\n # device_version=1\n # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]\n # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>\n MODELS_INFO: [\n (\"\\x02KE\", \"TRADFRI open/close remote\"),\n (IKEA, \"TRADFRI open/close remote\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n MODELS_INFO: [(IKEA, \"TRADFRI open/close remote\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n", "path": "zhaquirks/ikea/opencloseremote.py"}]} | 1,921 | 789 |
gh_patches_debug_12128 | rasdani/github-patches | git_diff | nf-core__tools-1357 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Linting should fail if pipeline module file is edited
In https://github.com/ewels/nf-core-methylseq/pull/2 I have edited a module file that was pulled from nf-core/modules. I shouldn't be allowed to do this.
The linting warns me that something is amiss:
```
│ bismark/align │ modules/nf-cor… │ Local copy of │
│ │ │ module outdated │
```
But it should be checking the git sha in `modules.json` and recognising that it doesn't match what it expects. Then throwing a hard error.
Possible that the code for this is lurking in a PR that's waiting to be merged..
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nf_core/modules/lint/module_changes.py`
Content:
```
1 """
2 Check whether the content of a module has changed compared to the original repository
3 """
4 import os
5 import requests
6 import rich
7 from nf_core.modules.lint import LintResult
8
9
10 def module_changes(module_lint_object, module):
11 """
12 Checks whether installed nf-core modules have changed compared to the
13 original repository
14 Downloads the 'main.nf' and 'meta.yml' files for every module
15 and compares them to the local copies
16
17 If the module has a 'git_sha', the file content is checked against this sha
18 """
19 files_to_check = ["main.nf", "meta.yml"]
20
21 # Loop over nf-core modules
22 module_base_url = f"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module_lint_object.modules_repo.branch}/modules/{module.module_name}/"
23
24 # If module.git_sha specified, check specific commit version for changes
25 if module.git_sha:
26 module_base_url = f"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module.git_sha}/modules/{module.module_name}/"
27
28 for f in files_to_check:
29 # open local copy, continue if file not found (a failed message has already been issued in this case)
30 try:
31 local_copy = open(os.path.join(module.module_dir, f), "r").read()
32 except FileNotFoundError as e:
33 continue
34
35 # Download remote copy and compare
36 url = module_base_url + f
37 r = requests.get(url=url)
38
39 if r.status_code != 200:
40 module.warned.append(
41 (
42 "check_local_copy",
43 f"Could not fetch remote copy, skipping comparison.",
44 f"{os.path.join(module.module_dir, f)}",
45 )
46 )
47 else:
48 try:
49 remote_copy = r.content.decode("utf-8")
50
51 if local_copy != remote_copy:
52 module.warned.append(
53 (
54 "check_local_copy",
55 "Local copy of module outdated",
56 f"{os.path.join(module.module_dir, f)}",
57 )
58 )
59 else:
60 module.passed.append(
61 (
62 "check_local_copy",
63 "Local copy of module up to date",
64 f"{os.path.join(module.module_dir, f)}",
65 )
66 )
67 except UnicodeDecodeError as e:
68 module.warned.append(
69 (
70 "check_local_copy",
71 f"Could not decode file from {url}. Skipping comparison ({e})",
72 f"{os.path.join(module.module_dir, f)}",
73 )
74 )
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nf_core/modules/lint/module_changes.py b/nf_core/modules/lint/module_changes.py
--- a/nf_core/modules/lint/module_changes.py
+++ b/nf_core/modules/lint/module_changes.py
@@ -49,10 +49,10 @@
remote_copy = r.content.decode("utf-8")
if local_copy != remote_copy:
- module.warned.append(
+ module.failed.append(
(
"check_local_copy",
- "Local copy of module outdated",
+ "Local copy of module does not match remote",
f"{os.path.join(module.module_dir, f)}",
)
)
| {"golden_diff": "diff --git a/nf_core/modules/lint/module_changes.py b/nf_core/modules/lint/module_changes.py\n--- a/nf_core/modules/lint/module_changes.py\n+++ b/nf_core/modules/lint/module_changes.py\n@@ -49,10 +49,10 @@\n remote_copy = r.content.decode(\"utf-8\")\n \n if local_copy != remote_copy:\n- module.warned.append(\n+ module.failed.append(\n (\n \"check_local_copy\",\n- \"Local copy of module outdated\",\n+ \"Local copy of module does not match remote\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n", "issue": "Linting should fail if pipeline module file is edited\nIn https://github.com/ewels/nf-core-methylseq/pull/2 I have edited a module file that was pulled from nf-core/modules. I shouldn't be allowed to do this.\r\n\r\nThe linting warns me that something is amiss:\r\n\r\n```\r\n\u2502 bismark/align \u2502 modules/nf-cor\u2026 \u2502 Local copy of \u2502\r\n\u2502 \u2502 \u2502 module outdated \u2502\r\n```\r\n\r\nBut it should be checking the git sha in `modules.json` and recognising that it doesn't match what it expects. Then throwing a hard error.\r\n\r\nPossible that the code for this is lurking in a PR that's waiting to be merged..\n", "before_files": [{"content": "\"\"\"\nCheck whether the content of a module has changed compared to the original repository\n\"\"\"\nimport os\nimport requests\nimport rich\nfrom nf_core.modules.lint import LintResult\n\n\ndef module_changes(module_lint_object, module):\n \"\"\"\n Checks whether installed nf-core modules have changed compared to the\n original repository\n Downloads the 'main.nf' and 'meta.yml' files for every module\n and compares them to the local copies\n\n If the module has a 'git_sha', the file content is checked against this sha\n \"\"\"\n files_to_check = [\"main.nf\", \"meta.yml\"]\n\n # Loop over nf-core modules\n module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module_lint_object.modules_repo.branch}/modules/{module.module_name}/\"\n\n # If module.git_sha specified, check specific commit version for changes\n if module.git_sha:\n module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module.git_sha}/modules/{module.module_name}/\"\n\n for f in files_to_check:\n # open local copy, continue if file not found (a failed message has already been issued in this case)\n try:\n local_copy = open(os.path.join(module.module_dir, f), \"r\").read()\n except FileNotFoundError as e:\n continue\n\n # Download remote copy and compare\n url = module_base_url + f\n r = requests.get(url=url)\n\n if r.status_code != 200:\n module.warned.append(\n (\n \"check_local_copy\",\n f\"Could not fetch remote copy, skipping comparison.\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n else:\n try:\n remote_copy = r.content.decode(\"utf-8\")\n\n if local_copy != remote_copy:\n module.warned.append(\n (\n \"check_local_copy\",\n \"Local copy of module outdated\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n else:\n module.passed.append(\n (\n \"check_local_copy\",\n \"Local copy of module up to date\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n except UnicodeDecodeError as e:\n module.warned.append(\n (\n \"check_local_copy\",\n f\"Could not decode file from {url}. Skipping comparison ({e})\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n", "path": "nf_core/modules/lint/module_changes.py"}], "after_files": [{"content": "\"\"\"\nCheck whether the content of a module has changed compared to the original repository\n\"\"\"\nimport os\nimport requests\nimport rich\nfrom nf_core.modules.lint import LintResult\n\n\ndef module_changes(module_lint_object, module):\n \"\"\"\n Checks whether installed nf-core modules have changed compared to the\n original repository\n Downloads the 'main.nf' and 'meta.yml' files for every module\n and compares them to the local copies\n\n If the module has a 'git_sha', the file content is checked against this sha\n \"\"\"\n files_to_check = [\"main.nf\", \"meta.yml\"]\n\n # Loop over nf-core modules\n module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module_lint_object.modules_repo.branch}/modules/{module.module_name}/\"\n\n # If module.git_sha specified, check specific commit version for changes\n if module.git_sha:\n module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module.git_sha}/modules/{module.module_name}/\"\n\n for f in files_to_check:\n # open local copy, continue if file not found (a failed message has already been issued in this case)\n try:\n local_copy = open(os.path.join(module.module_dir, f), \"r\").read()\n except FileNotFoundError as e:\n continue\n\n # Download remote copy and compare\n url = module_base_url + f\n r = requests.get(url=url)\n\n if r.status_code != 200:\n module.warned.append(\n (\n \"check_local_copy\",\n f\"Could not fetch remote copy, skipping comparison.\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n else:\n try:\n remote_copy = r.content.decode(\"utf-8\")\n\n if local_copy != remote_copy:\n module.failed.append(\n (\n \"check_local_copy\",\n \"Local copy of module does not match remote\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n else:\n module.passed.append(\n (\n \"check_local_copy\",\n \"Local copy of module up to date\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n except UnicodeDecodeError as e:\n module.warned.append(\n (\n \"check_local_copy\",\n f\"Could not decode file from {url}. Skipping comparison ({e})\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n", "path": "nf_core/modules/lint/module_changes.py"}]} | 1,086 | 142 |
gh_patches_debug_6286 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-284 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include tailwind.css via npm instead of static file
At the moment, we include tailwind.css as a static file in our repo.
Instead, we should use [the recommended installation via npm](https://tailwindcss.com/docs/installation/) where we can configure exactly which parts we need and compile a minified css file in our deployment chain.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/cms/views/media/media_list_view.py`
Content:
```
1 from django.contrib.auth.decorators import login_required
2 from django.shortcuts import render
3 from django.utils.decorators import method_decorator
4 from django.views.generic import TemplateView
5
6 from ...decorators import region_permission_required
7 from ...models import Document
8
9
10 @method_decorator(login_required, name='dispatch')
11 @method_decorator(region_permission_required, name='dispatch')
12 class MediaListView(TemplateView):
13 template_name = 'media/list.html'
14 base_context = {'current_menu_item': 'media'}
15
16 def get(self, request, *args, **kwargs):
17 documents = Document.objects.all()
18
19 return render(
20 request,
21 self.template_name,
22 {
23 **self.base_context,
24 'documents': documents
25 }
26 )
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/cms/views/media/media_list_view.py b/backend/cms/views/media/media_list_view.py
--- a/backend/cms/views/media/media_list_view.py
+++ b/backend/cms/views/media/media_list_view.py
@@ -10,7 +10,7 @@
@method_decorator(login_required, name='dispatch')
@method_decorator(region_permission_required, name='dispatch')
class MediaListView(TemplateView):
- template_name = 'media/list.html'
+ template_name = 'media/media_list.html'
base_context = {'current_menu_item': 'media'}
def get(self, request, *args, **kwargs):
| {"golden_diff": "diff --git a/backend/cms/views/media/media_list_view.py b/backend/cms/views/media/media_list_view.py\n--- a/backend/cms/views/media/media_list_view.py\n+++ b/backend/cms/views/media/media_list_view.py\n@@ -10,7 +10,7 @@\n @method_decorator(login_required, name='dispatch')\n @method_decorator(region_permission_required, name='dispatch')\n class MediaListView(TemplateView):\n- template_name = 'media/list.html'\n+ template_name = 'media/media_list.html'\n base_context = {'current_menu_item': 'media'}\n \n def get(self, request, *args, **kwargs):\n", "issue": "Include tailwind.css via npm instead of static file\nAt the moment, we include tailwind.css as a static file in our repo.\r\nInstead, we should use [the recommended installation via npm](https://tailwindcss.com/docs/installation/) where we can configure exactly which parts we need and compile a minified css file in our deployment chain.\n", "before_files": [{"content": "from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView\n\nfrom ...decorators import region_permission_required\nfrom ...models import Document\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass MediaListView(TemplateView):\n template_name = 'media/list.html'\n base_context = {'current_menu_item': 'media'}\n\n def get(self, request, *args, **kwargs):\n documents = Document.objects.all()\n\n return render(\n request,\n self.template_name,\n {\n **self.base_context,\n 'documents': documents\n }\n )\n", "path": "backend/cms/views/media/media_list_view.py"}], "after_files": [{"content": "from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView\n\nfrom ...decorators import region_permission_required\nfrom ...models import Document\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass MediaListView(TemplateView):\n template_name = 'media/media_list.html'\n base_context = {'current_menu_item': 'media'}\n\n def get(self, request, *args, **kwargs):\n documents = Document.objects.all()\n\n return render(\n request,\n self.template_name,\n {\n **self.base_context,\n 'documents': documents\n }\n )\n", "path": "backend/cms/views/media/media_list_view.py"}]} | 527 | 131 |
gh_patches_debug_378 | rasdani/github-patches | git_diff | encode__uvicorn-1099 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping
### Checklist
<!-- Please make sure you check all these items before submitting your bug report. -->
- [X] The bug is reproducible against the latest release and/or `master`.
- [X] There are no similar issues or pull requests to fix it yet.
### Describe the bug
When adding uvicorn package with poetry the following warning is raised:
PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping
because the constraint `>=7.*` violates PEP440 syntax. It should either be `>=7.0` or `=7.*`.
Because of this, the `click` dependency is not installed and uvicorn may not work.
### To reproduce
Just execute `poetry add uvicorn` in any environment.
### Expected behavior
To install `uvicorn` correctly together with all its dependencies.
### Actual behavior
The `click` dependency is not installed and uvicorn may not work.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import os
5 import re
6
7 from setuptools import setup
8
9
10 def get_version(package):
11 """
12 Return package version as listed in `__version__` in `init.py`.
13 """
14 path = os.path.join(package, "__init__.py")
15 init_py = open(path, "r", encoding="utf8").read()
16 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
17
18
19 def get_long_description():
20 """
21 Return the README.
22 """
23 return open("README.md", "r", encoding="utf8").read()
24
25
26 def get_packages(package):
27 """
28 Return root package and all sub-packages.
29 """
30 return [
31 dirpath
32 for dirpath, dirnames, filenames in os.walk(package)
33 if os.path.exists(os.path.join(dirpath, "__init__.py"))
34 ]
35
36
37 env_marker_cpython = (
38 "sys_platform != 'win32'"
39 " and (sys_platform != 'cygwin'"
40 " and platform_python_implementation != 'PyPy')"
41 )
42
43 env_marker_win = "sys_platform == 'win32'"
44 env_marker_below_38 = "python_version < '3.8'"
45
46 minimal_requirements = [
47 "asgiref>=3.4.0",
48 "click>=7.*",
49 "h11>=0.8",
50 "typing-extensions;" + env_marker_below_38,
51 ]
52
53
54 extra_requirements = [
55 "websockets>=9.1",
56 "httptools==0.2.*",
57 "uvloop>=0.14.0,!=0.15.0,!=0.15.1; " + env_marker_cpython,
58 "colorama>=0.4;" + env_marker_win,
59 "watchgod>=0.6",
60 "python-dotenv>=0.13",
61 "PyYAML>=5.1",
62 ]
63
64
65 setup(
66 name="uvicorn",
67 version=get_version("uvicorn"),
68 url="https://github.com/encode/uvicorn",
69 license="BSD",
70 description="The lightning-fast ASGI server.",
71 long_description=get_long_description(),
72 long_description_content_type="text/markdown",
73 author="Tom Christie",
74 author_email="[email protected]",
75 packages=get_packages("uvicorn"),
76 install_requires=minimal_requirements,
77 extras_require={"standard": extra_requirements},
78 include_package_data=True,
79 classifiers=[
80 "Development Status :: 4 - Beta",
81 "Environment :: Web Environment",
82 "Intended Audience :: Developers",
83 "License :: OSI Approved :: BSD License",
84 "Operating System :: OS Independent",
85 "Topic :: Internet :: WWW/HTTP",
86 "Programming Language :: Python :: 3",
87 "Programming Language :: Python :: 3.6",
88 "Programming Language :: Python :: 3.7",
89 "Programming Language :: Python :: 3.8",
90 "Programming Language :: Python :: 3.9",
91 "Programming Language :: Python :: Implementation :: CPython",
92 "Programming Language :: Python :: Implementation :: PyPy",
93 ],
94 entry_points="""
95 [console_scripts]
96 uvicorn=uvicorn.main:main
97 """,
98 )
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@
minimal_requirements = [
"asgiref>=3.4.0",
- "click>=7.*",
+ "click>=7.0",
"h11>=0.8",
"typing-extensions;" + env_marker_below_38,
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,7 +45,7 @@\n \n minimal_requirements = [\n \"asgiref>=3.4.0\",\n- \"click>=7.*\",\n+ \"click>=7.0\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n ]\n", "issue": "PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping\n### Checklist\r\n\r\n<!-- Please make sure you check all these items before submitting your bug report. -->\r\n\r\n- [X] The bug is reproducible against the latest release and/or `master`.\r\n- [X] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\nWhen adding uvicorn package with poetry the following warning is raised:\r\n\r\n PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping\r\n\r\nbecause the constraint `>=7.*` violates PEP440 syntax. It should either be `>=7.0` or `=7.*`.\r\n\r\nBecause of this, the `click` dependency is not installed and uvicorn may not work.\r\n\r\n### To reproduce\r\n\r\nJust execute `poetry add uvicorn` in any environment.\r\n\r\n### Expected behavior\r\n\r\nTo install `uvicorn` correctly together with all its dependencies.\r\n\r\n### Actual behavior\r\n\r\nThe `click` dependency is not installed and uvicorn may not work.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, \"__init__.py\")\n init_py = open(path, \"r\", encoding=\"utf8\").read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open(\"README.md\", \"r\", encoding=\"utf8\").read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nenv_marker_win = \"sys_platform == 'win32'\"\nenv_marker_below_38 = \"python_version < '3.8'\"\n\nminimal_requirements = [\n \"asgiref>=3.4.0\",\n \"click>=7.*\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n]\n\n\nextra_requirements = [\n \"websockets>=9.1\",\n \"httptools==0.2.*\",\n \"uvloop>=0.14.0,!=0.15.0,!=0.15.1; \" + env_marker_cpython,\n \"colorama>=0.4;\" + env_marker_win,\n \"watchgod>=0.6\",\n \"python-dotenv>=0.13\",\n \"PyYAML>=5.1\",\n]\n\n\nsetup(\n name=\"uvicorn\",\n version=get_version(\"uvicorn\"),\n url=\"https://github.com/encode/uvicorn\",\n license=\"BSD\",\n description=\"The lightning-fast ASGI server.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"uvicorn\"),\n install_requires=minimal_requirements,\n extras_require={\"standard\": extra_requirements},\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, \"__init__.py\")\n init_py = open(path, \"r\", encoding=\"utf8\").read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open(\"README.md\", \"r\", encoding=\"utf8\").read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nenv_marker_win = \"sys_platform == 'win32'\"\nenv_marker_below_38 = \"python_version < '3.8'\"\n\nminimal_requirements = [\n \"asgiref>=3.4.0\",\n \"click>=7.0\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n]\n\n\nextra_requirements = [\n \"websockets>=9.1\",\n \"httptools==0.2.*\",\n \"uvloop>=0.14.0,!=0.15.0,!=0.15.1; \" + env_marker_cpython,\n \"colorama>=0.4;\" + env_marker_win,\n \"watchgod>=0.6\",\n \"python-dotenv>=0.13\",\n \"PyYAML>=5.1\",\n]\n\n\nsetup(\n name=\"uvicorn\",\n version=get_version(\"uvicorn\"),\n url=\"https://github.com/encode/uvicorn\",\n license=\"BSD\",\n description=\"The lightning-fast ASGI server.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"uvicorn\"),\n install_requires=minimal_requirements,\n extras_require={\"standard\": extra_requirements},\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\",\n)\n", "path": "setup.py"}]} | 1,383 | 92 |
gh_patches_debug_31527 | rasdani/github-patches | git_diff | iterative__dvc-4739 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dvc dag --outs
In the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs.
I thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.
Can you maybe (re)implement this feature?
Thanks!
dvc dag --outs
In the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs.
I thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.
Can you maybe (re)implement this feature?
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/command/dag.py`
Content:
```
1 import argparse
2 import logging
3
4 from dvc.command.base import CmdBase, append_doc_link
5 from dvc.exceptions import DvcException
6
7 logger = logging.getLogger(__name__)
8
9
10 def _show_ascii(G):
11 from dvc.dagascii import draw
12 from dvc.repo.graph import get_pipelines
13
14 pipelines = get_pipelines(G)
15
16 ret = []
17 for pipeline in pipelines:
18 ret.append(draw(pipeline.nodes, pipeline.edges))
19
20 return "\n".join(ret)
21
22
23 def _show_dot(G):
24 import io
25
26 from networkx.drawing.nx_pydot import write_dot
27
28 dot_file = io.StringIO()
29 write_dot(G, dot_file)
30 return dot_file.getvalue()
31
32
33 def _build(G, target=None, full=False):
34 import networkx as nx
35
36 from dvc.repo.graph import get_pipeline, get_pipelines
37
38 if target:
39 H = get_pipeline(get_pipelines(G), target)
40 if not full:
41 descendants = nx.descendants(G, target)
42 descendants.add(target)
43 H.remove_nodes_from(set(G.nodes()) - descendants)
44 else:
45 H = G
46
47 def _relabel(stage):
48 return stage.addressing
49
50 return nx.relabel_nodes(H, _relabel, copy=False)
51
52
53 class CmdDAG(CmdBase):
54 def run(self):
55 try:
56 target = None
57 if self.args.target:
58 stages = self.repo.collect(self.args.target)
59 if len(stages) > 1:
60 logger.error(
61 f"'{self.args.target}' contains more than one stage "
62 "{stages}, please specify one stage"
63 )
64 return 1
65 target = stages[0]
66
67 G = _build(self.repo.graph, target=target, full=self.args.full,)
68
69 if self.args.dot:
70 logger.info(_show_dot(G))
71 else:
72 from dvc.utils.pager import pager
73
74 pager(_show_ascii(G))
75
76 return 0
77 except DvcException:
78 msg = "failed to show "
79 if self.args.target:
80 msg += f"a pipeline for '{target}'"
81 else:
82 msg += "pipelines"
83 logger.exception(msg)
84 return 1
85
86
87 def add_parser(subparsers, parent_parser):
88 DAG_HELP = "Visualize DVC project DAG."
89 dag_parser = subparsers.add_parser(
90 "dag",
91 parents=[parent_parser],
92 description=append_doc_link(DAG_HELP, "dag"),
93 help=DAG_HELP,
94 formatter_class=argparse.RawDescriptionHelpFormatter,
95 )
96 dag_parser.add_argument(
97 "--dot",
98 action="store_true",
99 default=False,
100 help="Print DAG with .dot format.",
101 )
102 dag_parser.add_argument(
103 "--full",
104 action="store_true",
105 default=False,
106 help=(
107 "Show full DAG that the target belongs too, instead of "
108 "showing DAG consisting only of ancestors."
109 ),
110 )
111 dag_parser.add_argument(
112 "target",
113 nargs="?",
114 help="Stage or output to show pipeline for (optional). "
115 "Finds all stages in the workspace by default.",
116 )
117 dag_parser.set_defaults(func=CmdDAG)
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/command/dag.py b/dvc/command/dag.py
--- a/dvc/command/dag.py
+++ b/dvc/command/dag.py
@@ -30,7 +30,7 @@
return dot_file.getvalue()
-def _build(G, target=None, full=False):
+def _build(G, target=None, full=False, outs=False):
import networkx as nx
from dvc.repo.graph import get_pipeline, get_pipelines
@@ -44,8 +44,25 @@
else:
H = G
- def _relabel(stage):
- return stage.addressing
+ if outs:
+ G = nx.DiGraph()
+ for stage in H.nodes:
+ G.add_nodes_from(stage.outs)
+
+ for from_stage, to_stage in nx.edge_dfs(H):
+ G.add_edges_from(
+ [
+ (from_out, to_out)
+ for from_out in from_stage.outs
+ for to_out in to_stage.outs
+ ]
+ )
+ H = G
+
+ def _relabel(node):
+ from dvc.stage import Stage
+
+ return node.addressing if isinstance(node, Stage) else str(node)
return nx.relabel_nodes(H, _relabel, copy=False)
@@ -64,7 +81,12 @@
return 1
target = stages[0]
- G = _build(self.repo.graph, target=target, full=self.args.full,)
+ G = _build(
+ self.repo.graph,
+ target=target,
+ full=self.args.full,
+ outs=self.args.outs,
+ )
if self.args.dot:
logger.info(_show_dot(G))
@@ -108,6 +130,13 @@
"showing DAG consisting only of ancestors."
),
)
+ dag_parser.add_argument(
+ "-o",
+ "--outs",
+ action="store_true",
+ default=False,
+ help="Print output files instead of stages.",
+ )
dag_parser.add_argument(
"target",
nargs="?",
| {"golden_diff": "diff --git a/dvc/command/dag.py b/dvc/command/dag.py\n--- a/dvc/command/dag.py\n+++ b/dvc/command/dag.py\n@@ -30,7 +30,7 @@\n return dot_file.getvalue()\n \n \n-def _build(G, target=None, full=False):\n+def _build(G, target=None, full=False, outs=False):\n import networkx as nx\n \n from dvc.repo.graph import get_pipeline, get_pipelines\n@@ -44,8 +44,25 @@\n else:\n H = G\n \n- def _relabel(stage):\n- return stage.addressing\n+ if outs:\n+ G = nx.DiGraph()\n+ for stage in H.nodes:\n+ G.add_nodes_from(stage.outs)\n+\n+ for from_stage, to_stage in nx.edge_dfs(H):\n+ G.add_edges_from(\n+ [\n+ (from_out, to_out)\n+ for from_out in from_stage.outs\n+ for to_out in to_stage.outs\n+ ]\n+ )\n+ H = G\n+\n+ def _relabel(node):\n+ from dvc.stage import Stage\n+\n+ return node.addressing if isinstance(node, Stage) else str(node)\n \n return nx.relabel_nodes(H, _relabel, copy=False)\n \n@@ -64,7 +81,12 @@\n return 1\n target = stages[0]\n \n- G = _build(self.repo.graph, target=target, full=self.args.full,)\n+ G = _build(\n+ self.repo.graph,\n+ target=target,\n+ full=self.args.full,\n+ outs=self.args.outs,\n+ )\n \n if self.args.dot:\n logger.info(_show_dot(G))\n@@ -108,6 +130,13 @@\n \"showing DAG consisting only of ancestors.\"\n ),\n )\n+ dag_parser.add_argument(\n+ \"-o\",\n+ \"--outs\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Print output files instead of stages.\",\n+ )\n dag_parser.add_argument(\n \"target\",\n nargs=\"?\",\n", "issue": "dvc dag --outs\nIn the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. \r\nI thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.\r\nCan you maybe (re)implement this feature?\r\nThanks!\ndvc dag --outs\nIn the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. \r\nI thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.\r\nCan you maybe (re)implement this feature?\r\nThanks!\n", "before_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.command.base import CmdBase, append_doc_link\nfrom dvc.exceptions import DvcException\n\nlogger = logging.getLogger(__name__)\n\n\ndef _show_ascii(G):\n from dvc.dagascii import draw\n from dvc.repo.graph import get_pipelines\n\n pipelines = get_pipelines(G)\n\n ret = []\n for pipeline in pipelines:\n ret.append(draw(pipeline.nodes, pipeline.edges))\n\n return \"\\n\".join(ret)\n\n\ndef _show_dot(G):\n import io\n\n from networkx.drawing.nx_pydot import write_dot\n\n dot_file = io.StringIO()\n write_dot(G, dot_file)\n return dot_file.getvalue()\n\n\ndef _build(G, target=None, full=False):\n import networkx as nx\n\n from dvc.repo.graph import get_pipeline, get_pipelines\n\n if target:\n H = get_pipeline(get_pipelines(G), target)\n if not full:\n descendants = nx.descendants(G, target)\n descendants.add(target)\n H.remove_nodes_from(set(G.nodes()) - descendants)\n else:\n H = G\n\n def _relabel(stage):\n return stage.addressing\n\n return nx.relabel_nodes(H, _relabel, copy=False)\n\n\nclass CmdDAG(CmdBase):\n def run(self):\n try:\n target = None\n if self.args.target:\n stages = self.repo.collect(self.args.target)\n if len(stages) > 1:\n logger.error(\n f\"'{self.args.target}' contains more than one stage \"\n \"{stages}, please specify one stage\"\n )\n return 1\n target = stages[0]\n\n G = _build(self.repo.graph, target=target, full=self.args.full,)\n\n if self.args.dot:\n logger.info(_show_dot(G))\n else:\n from dvc.utils.pager import pager\n\n pager(_show_ascii(G))\n\n return 0\n except DvcException:\n msg = \"failed to show \"\n if self.args.target:\n msg += f\"a pipeline for '{target}'\"\n else:\n msg += \"pipelines\"\n logger.exception(msg)\n return 1\n\n\ndef add_parser(subparsers, parent_parser):\n DAG_HELP = \"Visualize DVC project DAG.\"\n dag_parser = subparsers.add_parser(\n \"dag\",\n parents=[parent_parser],\n description=append_doc_link(DAG_HELP, \"dag\"),\n help=DAG_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n dag_parser.add_argument(\n \"--dot\",\n action=\"store_true\",\n default=False,\n help=\"Print DAG with .dot format.\",\n )\n dag_parser.add_argument(\n \"--full\",\n action=\"store_true\",\n default=False,\n help=(\n \"Show full DAG that the target belongs too, instead of \"\n \"showing DAG consisting only of ancestors.\"\n ),\n )\n dag_parser.add_argument(\n \"target\",\n nargs=\"?\",\n help=\"Stage or output to show pipeline for (optional). \"\n \"Finds all stages in the workspace by default.\",\n )\n dag_parser.set_defaults(func=CmdDAG)\n", "path": "dvc/command/dag.py"}], "after_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.command.base import CmdBase, append_doc_link\nfrom dvc.exceptions import DvcException\n\nlogger = logging.getLogger(__name__)\n\n\ndef _show_ascii(G):\n from dvc.dagascii import draw\n from dvc.repo.graph import get_pipelines\n\n pipelines = get_pipelines(G)\n\n ret = []\n for pipeline in pipelines:\n ret.append(draw(pipeline.nodes, pipeline.edges))\n\n return \"\\n\".join(ret)\n\n\ndef _show_dot(G):\n import io\n\n from networkx.drawing.nx_pydot import write_dot\n\n dot_file = io.StringIO()\n write_dot(G, dot_file)\n return dot_file.getvalue()\n\n\ndef _build(G, target=None, full=False, outs=False):\n import networkx as nx\n\n from dvc.repo.graph import get_pipeline, get_pipelines\n\n if target:\n H = get_pipeline(get_pipelines(G), target)\n if not full:\n descendants = nx.descendants(G, target)\n descendants.add(target)\n H.remove_nodes_from(set(G.nodes()) - descendants)\n else:\n H = G\n\n if outs:\n G = nx.DiGraph()\n for stage in H.nodes:\n G.add_nodes_from(stage.outs)\n\n for from_stage, to_stage in nx.edge_dfs(H):\n G.add_edges_from(\n [\n (from_out, to_out)\n for from_out in from_stage.outs\n for to_out in to_stage.outs\n ]\n )\n H = G\n\n def _relabel(node):\n from dvc.stage import Stage\n\n return node.addressing if isinstance(node, Stage) else str(node)\n\n return nx.relabel_nodes(H, _relabel, copy=False)\n\n\nclass CmdDAG(CmdBase):\n def run(self):\n try:\n target = None\n if self.args.target:\n stages = self.repo.collect(self.args.target)\n if len(stages) > 1:\n logger.error(\n f\"'{self.args.target}' contains more than one stage \"\n \"{stages}, please specify one stage\"\n )\n return 1\n target = stages[0]\n\n G = _build(\n self.repo.graph,\n target=target,\n full=self.args.full,\n outs=self.args.outs,\n )\n\n if self.args.dot:\n logger.info(_show_dot(G))\n else:\n from dvc.utils.pager import pager\n\n pager(_show_ascii(G))\n\n return 0\n except DvcException:\n msg = \"failed to show \"\n if self.args.target:\n msg += f\"a pipeline for '{target}'\"\n else:\n msg += \"pipelines\"\n logger.exception(msg)\n return 1\n\n\ndef add_parser(subparsers, parent_parser):\n DAG_HELP = \"Visualize DVC project DAG.\"\n dag_parser = subparsers.add_parser(\n \"dag\",\n parents=[parent_parser],\n description=append_doc_link(DAG_HELP, \"dag\"),\n help=DAG_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n dag_parser.add_argument(\n \"--dot\",\n action=\"store_true\",\n default=False,\n help=\"Print DAG with .dot format.\",\n )\n dag_parser.add_argument(\n \"--full\",\n action=\"store_true\",\n default=False,\n help=(\n \"Show full DAG that the target belongs too, instead of \"\n \"showing DAG consisting only of ancestors.\"\n ),\n )\n dag_parser.add_argument(\n \"-o\",\n \"--outs\",\n action=\"store_true\",\n default=False,\n help=\"Print output files instead of stages.\",\n )\n dag_parser.add_argument(\n \"target\",\n nargs=\"?\",\n help=\"Stage or output to show pipeline for (optional). \"\n \"Finds all stages in the workspace by default.\",\n )\n dag_parser.set_defaults(func=CmdDAG)\n", "path": "dvc/command/dag.py"}]} | 1,394 | 471 |
gh_patches_debug_32659 | rasdani/github-patches | git_diff | pypi__warehouse-12343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.1 classifier filtering is broken
When [filtering by the `Programming Language :: Python :: 3.1` classifier on pypi.org][1], the results include packages which don't have that classifier - any package with a classifier matching `Programming Language :: Python :: 3.1*` is included. That is, packages for 3.10, 3.11, 3.12, and so on are included in the results.
[1]: https://pypi.org/search/?q=&o=&c=Programming+Language+%3A%3A+Python+%3A%3A+3.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/search/queries.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import re
14
15 from elasticsearch_dsl import Q
16
17 SEARCH_FIELDS = [
18 "author",
19 "author_email",
20 "description",
21 "download_url",
22 "home_page",
23 "keywords",
24 "license",
25 "maintainer",
26 "maintainer_email",
27 "normalized_name",
28 "platform",
29 "summary",
30 ]
31 SEARCH_BOOSTS = {
32 "name": 10,
33 "normalized_name": 10,
34 "description": 5,
35 "keywords": 5,
36 "summary": 5,
37 }
38 SEARCH_FILTER_ORDER = (
39 "Framework",
40 "Topic",
41 "Development Status",
42 "License",
43 "Programming Language",
44 "Operating System",
45 "Environment",
46 "Intended Audience",
47 "Natural Language",
48 )
49
50
51 def get_es_query(es, terms, order, classifiers):
52 """
53 Returns an Elasticsearch query from data from the request.
54 """
55 if not terms:
56 query = es.query()
57 else:
58 bool_query = gather_es_queries(terms)
59 query = es.query(bool_query)
60 query = query.suggest("name_suggestion", terms, term={"field": "name"})
61
62 # Require match to all specified classifiers
63 for classifier in classifiers:
64 query = query.query("prefix", classifiers=classifier)
65
66 query = query_for_order(query, order)
67 return query
68
69
70 def gather_es_queries(q):
71 quoted_string, unquoted_string = filter_query(q)
72 must = [form_query("phrase", i) for i in quoted_string] + [
73 form_query("best_fields", i) for i in unquoted_string
74 ]
75
76 bool_query = Q("bool", must=must)
77
78 # Allow to optionally match on prefix
79 # if ``q`` is longer than one character.
80 if len(q) > 1:
81 bool_query = bool_query | Q("prefix", normalized_name=q)
82 return bool_query
83
84
85 def filter_query(s):
86 """
87 Filters given query with the below regex
88 and returns lists of quoted and unquoted strings
89 """
90 matches = re.findall(r'(?:"([^"]*)")|([^"]*)', s)
91 result_quoted = [t[0].strip() for t in matches if t[0]]
92 result_unquoted = [t[1].strip() for t in matches if t[1]]
93 return result_quoted, result_unquoted
94
95
96 def form_query(query_type, query):
97 """
98 Returns a multi match query
99 """
100 fields = [
101 field + "^" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field
102 for field in SEARCH_FIELDS
103 ]
104 return Q("multi_match", fields=fields, query=query, type=query_type)
105
106
107 def query_for_order(query, order):
108 """
109 Applies transformations on the ES query based on the search order.
110
111 Order is assumed to be a string with the name of a field with an optional
112 hyphen to indicate descending sort order.
113 """
114 if order == "": # relevance should not sort
115 return query
116
117 field = order[order.find("-") + 1 :]
118 sort_info = {
119 field: {
120 "order": "desc" if order.startswith("-") else "asc",
121 "unmapped_type": "long",
122 }
123 }
124 query = query.sort(sort_info)
125 return query
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/search/queries.py b/warehouse/search/queries.py
--- a/warehouse/search/queries.py
+++ b/warehouse/search/queries.py
@@ -52,36 +52,45 @@
"""
Returns an Elasticsearch query from data from the request.
"""
+ classifier_q = Q(
+ "bool",
+ # Theh results must have all selected classifiers
+ must=[
+ Q(
+ "bool",
+ should=[
+ # Term search for the exact classifier
+ Q("term", classifiers=classifier),
+ # Prefix search for potential children classifiers
+ Q("prefix", classifiers=classifier + " :: "),
+ ],
+ )
+ for classifier in classifiers
+ ],
+ )
if not terms:
- query = es.query()
+ query = es.query(classifier_q) if classifiers else es.query()
else:
- bool_query = gather_es_queries(terms)
+ quoted_string, unquoted_string = filter_query(terms)
+ bool_query = Q(
+ "bool",
+ must=[form_query("phrase", i) for i in quoted_string]
+ + [form_query("best_fields", i) for i in unquoted_string]
+ + ([classifier_q] if classifiers else []),
+ )
+
+ # Allow to optionally match on prefix
+ # if ``q`` is longer than one character.
+ if len(terms) > 1:
+ bool_query = bool_query | Q("prefix", normalized_name=terms)
+
query = es.query(bool_query)
query = query.suggest("name_suggestion", terms, term={"field": "name"})
- # Require match to all specified classifiers
- for classifier in classifiers:
- query = query.query("prefix", classifiers=classifier)
-
query = query_for_order(query, order)
return query
-def gather_es_queries(q):
- quoted_string, unquoted_string = filter_query(q)
- must = [form_query("phrase", i) for i in quoted_string] + [
- form_query("best_fields", i) for i in unquoted_string
- ]
-
- bool_query = Q("bool", must=must)
-
- # Allow to optionally match on prefix
- # if ``q`` is longer than one character.
- if len(q) > 1:
- bool_query = bool_query | Q("prefix", normalized_name=q)
- return bool_query
-
-
def filter_query(s):
"""
Filters given query with the below regex
| {"golden_diff": "diff --git a/warehouse/search/queries.py b/warehouse/search/queries.py\n--- a/warehouse/search/queries.py\n+++ b/warehouse/search/queries.py\n@@ -52,36 +52,45 @@\n \"\"\"\n Returns an Elasticsearch query from data from the request.\n \"\"\"\n+ classifier_q = Q(\n+ \"bool\",\n+ # Theh results must have all selected classifiers\n+ must=[\n+ Q(\n+ \"bool\",\n+ should=[\n+ # Term search for the exact classifier\n+ Q(\"term\", classifiers=classifier),\n+ # Prefix search for potential children classifiers\n+ Q(\"prefix\", classifiers=classifier + \" :: \"),\n+ ],\n+ )\n+ for classifier in classifiers\n+ ],\n+ )\n if not terms:\n- query = es.query()\n+ query = es.query(classifier_q) if classifiers else es.query()\n else:\n- bool_query = gather_es_queries(terms)\n+ quoted_string, unquoted_string = filter_query(terms)\n+ bool_query = Q(\n+ \"bool\",\n+ must=[form_query(\"phrase\", i) for i in quoted_string]\n+ + [form_query(\"best_fields\", i) for i in unquoted_string]\n+ + ([classifier_q] if classifiers else []),\n+ )\n+\n+ # Allow to optionally match on prefix\n+ # if ``q`` is longer than one character.\n+ if len(terms) > 1:\n+ bool_query = bool_query | Q(\"prefix\", normalized_name=terms)\n+\n query = es.query(bool_query)\n query = query.suggest(\"name_suggestion\", terms, term={\"field\": \"name\"})\n \n- # Require match to all specified classifiers\n- for classifier in classifiers:\n- query = query.query(\"prefix\", classifiers=classifier)\n-\n query = query_for_order(query, order)\n return query\n \n \n-def gather_es_queries(q):\n- quoted_string, unquoted_string = filter_query(q)\n- must = [form_query(\"phrase\", i) for i in quoted_string] + [\n- form_query(\"best_fields\", i) for i in unquoted_string\n- ]\n-\n- bool_query = Q(\"bool\", must=must)\n-\n- # Allow to optionally match on prefix\n- # if ``q`` is longer than one character.\n- if len(q) > 1:\n- bool_query = bool_query | Q(\"prefix\", normalized_name=q)\n- return bool_query\n-\n-\n def filter_query(s):\n \"\"\"\n Filters given query with the below regex\n", "issue": "Python 3.1 classifier filtering is broken\nWhen [filtering by the `Programming Language :: Python :: 3.1` classifier on pypi.org][1], the results include packages which don't have that classifier - any package with a classifier matching `Programming Language :: Python :: 3.1*` is included. That is, packages for 3.10, 3.11, 3.12, and so on are included in the results.\r\n\r\n[1]: https://pypi.org/search/?q=&o=&c=Programming+Language+%3A%3A+Python+%3A%3A+3.1\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nfrom elasticsearch_dsl import Q\n\nSEARCH_FIELDS = [\n \"author\",\n \"author_email\",\n \"description\",\n \"download_url\",\n \"home_page\",\n \"keywords\",\n \"license\",\n \"maintainer\",\n \"maintainer_email\",\n \"normalized_name\",\n \"platform\",\n \"summary\",\n]\nSEARCH_BOOSTS = {\n \"name\": 10,\n \"normalized_name\": 10,\n \"description\": 5,\n \"keywords\": 5,\n \"summary\": 5,\n}\nSEARCH_FILTER_ORDER = (\n \"Framework\",\n \"Topic\",\n \"Development Status\",\n \"License\",\n \"Programming Language\",\n \"Operating System\",\n \"Environment\",\n \"Intended Audience\",\n \"Natural Language\",\n)\n\n\ndef get_es_query(es, terms, order, classifiers):\n \"\"\"\n Returns an Elasticsearch query from data from the request.\n \"\"\"\n if not terms:\n query = es.query()\n else:\n bool_query = gather_es_queries(terms)\n query = es.query(bool_query)\n query = query.suggest(\"name_suggestion\", terms, term={\"field\": \"name\"})\n\n # Require match to all specified classifiers\n for classifier in classifiers:\n query = query.query(\"prefix\", classifiers=classifier)\n\n query = query_for_order(query, order)\n return query\n\n\ndef gather_es_queries(q):\n quoted_string, unquoted_string = filter_query(q)\n must = [form_query(\"phrase\", i) for i in quoted_string] + [\n form_query(\"best_fields\", i) for i in unquoted_string\n ]\n\n bool_query = Q(\"bool\", must=must)\n\n # Allow to optionally match on prefix\n # if ``q`` is longer than one character.\n if len(q) > 1:\n bool_query = bool_query | Q(\"prefix\", normalized_name=q)\n return bool_query\n\n\ndef filter_query(s):\n \"\"\"\n Filters given query with the below regex\n and returns lists of quoted and unquoted strings\n \"\"\"\n matches = re.findall(r'(?:\"([^\"]*)\")|([^\"]*)', s)\n result_quoted = [t[0].strip() for t in matches if t[0]]\n result_unquoted = [t[1].strip() for t in matches if t[1]]\n return result_quoted, result_unquoted\n\n\ndef form_query(query_type, query):\n \"\"\"\n Returns a multi match query\n \"\"\"\n fields = [\n field + \"^\" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field\n for field in SEARCH_FIELDS\n ]\n return Q(\"multi_match\", fields=fields, query=query, type=query_type)\n\n\ndef query_for_order(query, order):\n \"\"\"\n Applies transformations on the ES query based on the search order.\n\n Order is assumed to be a string with the name of a field with an optional\n hyphen to indicate descending sort order.\n \"\"\"\n if order == \"\": # relevance should not sort\n return query\n\n field = order[order.find(\"-\") + 1 :]\n sort_info = {\n field: {\n \"order\": \"desc\" if order.startswith(\"-\") else \"asc\",\n \"unmapped_type\": \"long\",\n }\n }\n query = query.sort(sort_info)\n return query\n", "path": "warehouse/search/queries.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nfrom elasticsearch_dsl import Q\n\nSEARCH_FIELDS = [\n \"author\",\n \"author_email\",\n \"description\",\n \"download_url\",\n \"home_page\",\n \"keywords\",\n \"license\",\n \"maintainer\",\n \"maintainer_email\",\n \"normalized_name\",\n \"platform\",\n \"summary\",\n]\nSEARCH_BOOSTS = {\n \"name\": 10,\n \"normalized_name\": 10,\n \"description\": 5,\n \"keywords\": 5,\n \"summary\": 5,\n}\nSEARCH_FILTER_ORDER = (\n \"Framework\",\n \"Topic\",\n \"Development Status\",\n \"License\",\n \"Programming Language\",\n \"Operating System\",\n \"Environment\",\n \"Intended Audience\",\n \"Natural Language\",\n)\n\n\ndef get_es_query(es, terms, order, classifiers):\n \"\"\"\n Returns an Elasticsearch query from data from the request.\n \"\"\"\n classifier_q = Q(\n \"bool\",\n # Theh results must have all selected classifiers\n must=[\n Q(\n \"bool\",\n should=[\n # Term search for the exact classifier\n Q(\"term\", classifiers=classifier),\n # Prefix search for potential children classifiers\n Q(\"prefix\", classifiers=classifier + \" :: \"),\n ],\n )\n for classifier in classifiers\n ],\n )\n if not terms:\n query = es.query(classifier_q) if classifiers else es.query()\n else:\n quoted_string, unquoted_string = filter_query(terms)\n bool_query = Q(\n \"bool\",\n must=[form_query(\"phrase\", i) for i in quoted_string]\n + [form_query(\"best_fields\", i) for i in unquoted_string]\n + ([classifier_q] if classifiers else []),\n )\n\n # Allow to optionally match on prefix\n # if ``q`` is longer than one character.\n if len(terms) > 1:\n bool_query = bool_query | Q(\"prefix\", normalized_name=terms)\n\n query = es.query(bool_query)\n query = query.suggest(\"name_suggestion\", terms, term={\"field\": \"name\"})\n\n query = query_for_order(query, order)\n return query\n\n\ndef filter_query(s):\n \"\"\"\n Filters given query with the below regex\n and returns lists of quoted and unquoted strings\n \"\"\"\n matches = re.findall(r'(?:\"([^\"]*)\")|([^\"]*)', s)\n result_quoted = [t[0].strip() for t in matches if t[0]]\n result_unquoted = [t[1].strip() for t in matches if t[1]]\n return result_quoted, result_unquoted\n\n\ndef form_query(query_type, query):\n \"\"\"\n Returns a multi match query\n \"\"\"\n fields = [\n field + \"^\" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field\n for field in SEARCH_FIELDS\n ]\n return Q(\"multi_match\", fields=fields, query=query, type=query_type)\n\n\ndef query_for_order(query, order):\n \"\"\"\n Applies transformations on the ES query based on the search order.\n\n Order is assumed to be a string with the name of a field with an optional\n hyphen to indicate descending sort order.\n \"\"\"\n if order == \"\": # relevance should not sort\n return query\n\n field = order[order.find(\"-\") + 1 :]\n sort_info = {\n field: {\n \"order\": \"desc\" if order.startswith(\"-\") else \"asc\",\n \"unmapped_type\": \"long\",\n }\n }\n query = query.sort(sort_info)\n return query\n", "path": "warehouse/search/queries.py"}]} | 1,512 | 561 |
gh_patches_debug_29422 | rasdani/github-patches | git_diff | freedomofpress__securedrop-7035 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
determine post-upgrade failure-mode for a SHA-1-signed submission key
## Description
After #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature.
After #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948.
What will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0?
## Possible approaches
| Option | Documentation changes | Code changes | Implication |
| --- | --- | --- | --- |
| Fail open, but log | optional | ✓ | Admin must monitor logs and/or OSSEC alerts. |
| Fail open, but document | ✓ | ✗ | Admin must monitor release notes or check documentation. |
| Fail closed | optional | ✓[1] | Admin can contact us for help. |
**Notes:**
1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/journalist.py`
Content:
```
1 from encryption import EncryptionManager, GpgKeyNotFoundError
2 from execution import asynchronous
3 from journalist_app import create_app
4 from models import Source
5 from sdconfig import SecureDropConfig
6
7 config = SecureDropConfig.get_current()
8 # app is imported by journalist.wsgi
9 app = create_app(config)
10
11
12 @asynchronous
13 def prime_keycache() -> None:
14 """Pre-load the source public keys into Redis."""
15 with app.app_context():
16 encryption_mgr = EncryptionManager.get_default()
17 for source in Source.query.filter_by(pending=False, deleted_at=None).all():
18 try:
19 encryption_mgr.get_source_public_key(source.filesystem_id)
20 except GpgKeyNotFoundError:
21 pass
22
23
24 prime_keycache()
25
26
27 if __name__ == "__main__": # pragma: no cover
28 debug = getattr(config, "env", "prod") != "prod"
29 # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host
30 app.run(debug=debug, host="0.0.0.0", port=8081)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -1,9 +1,13 @@
+import sys
+
from encryption import EncryptionManager, GpgKeyNotFoundError
from execution import asynchronous
from journalist_app import create_app
from models import Source
from sdconfig import SecureDropConfig
+import redwood
+
config = SecureDropConfig.get_current()
# app is imported by journalist.wsgi
app = create_app(config)
@@ -21,10 +25,28 @@
pass
-prime_keycache()
+def validate_journalist_key() -> None:
+ """Verify the journalist PGP key is valid"""
+ encryption_mgr = EncryptionManager.get_default()
+ # First check that we can read it
+ try:
+ journalist_key = encryption_mgr.get_journalist_public_key()
+ except Exception as e:
+ print(f"ERROR: Unable to read journalist public key: {e}", file=sys.stderr)
+ app.logger.error(f"ERROR: Unable to read journalist public key: {e}")
+ sys.exit(1)
+ # And then what we read is valid
+ try:
+ redwood.is_valid_public_key(journalist_key)
+ except redwood.RedwoodError as e:
+ print(f"ERROR: Journalist public key is not valid: {e}", file=sys.stderr)
+ app.logger.error(f"ERROR: Journalist public key is not valid: {e}")
+ sys.exit(1)
if __name__ == "__main__": # pragma: no cover
+ validate_journalist_key()
+ prime_keycache()
debug = getattr(config, "env", "prod") != "prod"
# nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host
app.run(debug=debug, host="0.0.0.0", port=8081)
| {"golden_diff": "diff --git a/securedrop/journalist.py b/securedrop/journalist.py\n--- a/securedrop/journalist.py\n+++ b/securedrop/journalist.py\n@@ -1,9 +1,13 @@\n+import sys\n+\n from encryption import EncryptionManager, GpgKeyNotFoundError\n from execution import asynchronous\n from journalist_app import create_app\n from models import Source\n from sdconfig import SecureDropConfig\n \n+import redwood\n+\n config = SecureDropConfig.get_current()\n # app is imported by journalist.wsgi\n app = create_app(config)\n@@ -21,10 +25,28 @@\n pass\n \n \n-prime_keycache()\n+def validate_journalist_key() -> None:\n+ \"\"\"Verify the journalist PGP key is valid\"\"\"\n+ encryption_mgr = EncryptionManager.get_default()\n+ # First check that we can read it\n+ try:\n+ journalist_key = encryption_mgr.get_journalist_public_key()\n+ except Exception as e:\n+ print(f\"ERROR: Unable to read journalist public key: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Unable to read journalist public key: {e}\")\n+ sys.exit(1)\n+ # And then what we read is valid\n+ try:\n+ redwood.is_valid_public_key(journalist_key)\n+ except redwood.RedwoodError as e:\n+ print(f\"ERROR: Journalist public key is not valid: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Journalist public key is not valid: {e}\")\n+ sys.exit(1)\n \n \n if __name__ == \"__main__\": # pragma: no cover\n+ validate_journalist_key()\n+ prime_keycache()\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "issue": "determine post-upgrade failure-mode for a SHA-1-signed submission key\n## Description\r\n\r\nAfter #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature.\r\n\r\nAfter #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948.\r\n\r\nWhat will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0?\r\n\r\n## Possible approaches\r\n\r\n| Option | Documentation changes | Code changes | Implication |\r\n| --- | --- | --- | --- |\r\n| Fail open, but log | optional | \u2713 | Admin must monitor logs and/or OSSEC alerts. |\r\n| Fail open, but document | \u2713 | \u2717 | Admin must monitor release notes or check documentation. |\r\n| Fail closed | optional | \u2713[1] | Admin can contact us for help. |\r\n\r\n**Notes:**\r\n1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`.\n", "before_files": [{"content": "from encryption import EncryptionManager, GpgKeyNotFoundError\nfrom execution import asynchronous\nfrom journalist_app import create_app\nfrom models import Source\nfrom sdconfig import SecureDropConfig\n\nconfig = SecureDropConfig.get_current()\n# app is imported by journalist.wsgi\napp = create_app(config)\n\n\n@asynchronous\ndef prime_keycache() -> None:\n \"\"\"Pre-load the source public keys into Redis.\"\"\"\n with app.app_context():\n encryption_mgr = EncryptionManager.get_default()\n for source in Source.query.filter_by(pending=False, deleted_at=None).all():\n try:\n encryption_mgr.get_source_public_key(source.filesystem_id)\n except GpgKeyNotFoundError:\n pass\n\n\nprime_keycache()\n\n\nif __name__ == \"__main__\": # pragma: no cover\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "path": "securedrop/journalist.py"}], "after_files": [{"content": "import sys\n\nfrom encryption import EncryptionManager, GpgKeyNotFoundError\nfrom execution import asynchronous\nfrom journalist_app import create_app\nfrom models import Source\nfrom sdconfig import SecureDropConfig\n\nimport redwood\n\nconfig = SecureDropConfig.get_current()\n# app is imported by journalist.wsgi\napp = create_app(config)\n\n\n@asynchronous\ndef prime_keycache() -> None:\n \"\"\"Pre-load the source public keys into Redis.\"\"\"\n with app.app_context():\n encryption_mgr = EncryptionManager.get_default()\n for source in Source.query.filter_by(pending=False, deleted_at=None).all():\n try:\n encryption_mgr.get_source_public_key(source.filesystem_id)\n except GpgKeyNotFoundError:\n pass\n\n\ndef validate_journalist_key() -> None:\n \"\"\"Verify the journalist PGP key is valid\"\"\"\n encryption_mgr = EncryptionManager.get_default()\n # First check that we can read it\n try:\n journalist_key = encryption_mgr.get_journalist_public_key()\n except Exception as e:\n print(f\"ERROR: Unable to read journalist public key: {e}\", file=sys.stderr)\n app.logger.error(f\"ERROR: Unable to read journalist public key: {e}\")\n sys.exit(1)\n # And then what we read is valid\n try:\n redwood.is_valid_public_key(journalist_key)\n except redwood.RedwoodError as e:\n print(f\"ERROR: Journalist public key is not valid: {e}\", file=sys.stderr)\n app.logger.error(f\"ERROR: Journalist public key is not valid: {e}\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n validate_journalist_key()\n prime_keycache()\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "path": "securedrop/journalist.py"}]} | 797 | 440 |
gh_patches_debug_7869 | rasdani/github-patches | git_diff | freedomofpress__securedrop-3756 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Builder image needs updates
## Description
sd-builder image needs to be updated.
## Steps to Reproduce
`make build-debs` and observe error.
## Expected Behavior
`make build-debs` should exit without error.
## Actual Behavior
`make-build-debs` returns an error, security updates are needed for the container.
## Comments
Instructions are available here:
https://docs.securedrop.org/en/latest/development/dockerbuildmaint.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/version.py`
Content:
```
1 __version__ = '0.9.0~rc1'
2
```
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # SecureDrop documentation build configuration file, created by
4 # sphinx-quickstart on Tue Oct 13 12:08:52 2015.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 import os
16
17 # Detect if we're being built by Read the Docs
18 # https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs
19 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
20
21 # If extensions (or modules to document with autodoc) are in another directory,
22 # add these directories to sys.path here. If the directory is relative to the
23 # documentation root, use os.path.abspath to make it absolute, like shown here.
24 # sys.path.insert(0, os.path.abspath('.'))
25
26 # -- General configuration ------------------------------------------------
27
28 # If your documentation needs a minimal Sphinx version, state it here.
29 # needs_sphinx = '1.0'
30
31 # Add any Sphinx extension module names here, as strings. They can be
32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 # ones.
34 extensions = ['sphinx.ext.todo', ]
35
36 # Add any paths that contain templates here, relative to this directory.
37 templates_path = ['_templates']
38
39 # The suffix(es) of source filenames.
40 # You can specify multiple suffix as a list of string:
41 # source_suffix = ['.rst', '.md']
42 source_suffix = '.rst'
43
44 # The encoding of source files.
45 # source_encoding = 'utf-8-sig'
46
47 # The master toctree document.
48 master_doc = 'index'
49
50 # General information about the project.
51 project = u'SecureDrop'
52 copyright = u'2017, Freedom of the Press Foundation'
53 author = u'SecureDrop Team and Contributors'
54
55 # The version info for the project you're documenting, acts as replacement for
56 # |version| and |release|, also used in various other places throughout the
57 # built documents.
58 #
59 # The short X.Y version.
60 version = '0.9.0~rc1'
61 # The full version, including alpha/beta/rc tags.
62 release = '0.9.0~rc1'
63
64 # The language for content autogenerated by Sphinx. Refer to documentation
65 # for a list of supported languages.
66 #
67 # This is also used if you do content translation via gettext catalogs.
68 # Usually you set "language" from the command line for these cases.
69 language = None
70
71 # There are two options for replacing |today|: either, you set today to some
72 # non-false value, then it is used:
73 # today = ''
74 # Else, today_fmt is used as the format for a strftime call.
75 # today_fmt = '%B %d, %Y'
76
77 # List of patterns, relative to source directory, that match files and
78 # directories to ignore when looking for source files.
79 exclude_patterns = ['_build']
80
81 # The reST default role (used for this markup: `text`) to use for all
82 # documents.
83 # default_role = None
84
85 # If true, '()' will be appended to :func: etc. cross-reference text.
86 # add_function_parentheses = True
87
88 # If true, the current module name will be prepended to all description
89 # unit titles (such as .. function::).
90 # add_module_names = True
91
92 # If true, sectionauthor and moduleauthor directives will be shown in the
93 # output. They are ignored by default.
94 # show_authors = False
95
96 # The name of the Pygments (syntax highlighting) style to use.
97 pygments_style = 'sphinx'
98
99 # A list of ignored prefixes for module index sorting.
100 # modindex_common_prefix = []
101
102 # If true, keep warnings as "system message" paragraphs in the built documents.
103 # keep_warnings = False
104
105 # If true, `todo` and `todoList` produce output, else they produce nothing.
106 todo_include_todos = False
107
108
109 # -- Options for HTML output ----------------------------------------------
110
111 # The theme to use for HTML and HTML Help pages. See the documentation for
112 # a list of builtin themes.
113 if on_rtd:
114 html_theme = 'default'
115 else:
116 try:
117 # If you want to build the docs locally using the RTD theme,
118 # you may need to install it: ``pip install sphinx_rtd_theme``.
119 # https://github.com/snide/sphinx_rtd_theme#via-package
120 import sphinx_rtd_theme
121 html_theme = "sphinx_rtd_theme"
122 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
123 except ImportError:
124 # This theme is included with Sphinx and is quite nice (based
125 # on the Pocoo themes), but since we're using the RTD theme
126 # for the production docs, it's best to use that to avoid
127 # issues due to discrepancies between the themes.
128 html_theme = 'alabaster'
129
130 # Theme options are theme-specific and customize the look and feel of a theme
131 # further. For a list of options available for each theme, see the
132 # documentation.
133 # html_theme_options = {}
134
135 # Add any paths that contain custom themes here, relative to this directory.
136 # html_theme_path = []
137
138 # The name for this set of Sphinx documents. If None, it defaults to
139 # "<project> v<release> documentation".
140 # html_title = None
141
142 # A shorter title for the navigation bar. Default is the same as html_title.
143 # html_short_title = None
144
145 # The name of an image file (relative to this directory) to place at the top
146 # of the sidebar.
147 html_logo = '../securedrop/static/i/favicon.png'
148
149 # The name of an image file (within the static path) to use as favicon of the
150 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
151 # pixels large.
152 # html_favicon = None
153
154 # Add any paths that contain custom static files (such as style sheets) here,
155 # relative to this directory. They are copied after the builtin static files,
156 # so a file named "default.css" will overwrite the builtin "default.css".
157 # html_static_path = ['_static']
158
159 # Add any extra paths that contain custom files (such as robots.txt or
160 # .htaccess) here, relative to this directory. These files are copied
161 # directly to the root of the documentation.
162 # html_extra_path = []
163
164 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
165 # using the given strftime format.
166 # html_last_updated_fmt = '%b %d, %Y'
167
168 # If true, SmartyPants will be used to convert quotes and dashes to
169 # typographically correct entities.
170 # html_use_smartypants = True
171
172 # Custom sidebar templates, maps document names to template names.
173 # html_sidebars = {}
174
175 # Additional templates that should be rendered to pages, maps page names to
176 # template names.
177 # html_additional_pages = {}
178
179 # If false, no module index is generated.
180 # html_domain_indices = True
181
182 # If false, no index is generated.
183 # html_use_index = True
184
185 # If true, the index is split into individual pages for each letter.
186 # html_split_index = False
187
188 # If true, links to the reST sources are added to the pages.
189 # html_show_sourcelink = True
190
191 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
192 # html_show_sphinx = True
193
194 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
195 # html_show_copyright = True
196
197 # If true, an OpenSearch description file will be output, and all pages will
198 # contain a <link> tag referring to it. The value of this option must be the
199 # base URL from which the finished HTML is served.
200 # html_use_opensearch = ''
201
202 # This is the file name suffix for HTML files (e.g. ".xhtml").
203 # html_file_suffix = None
204
205 # Language to be used for generating the HTML full-text search index.
206 # Sphinx supports the following languages:
207 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
208 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
209 # html_search_language = 'en'
210
211 # A dictionary with options for the search language support, empty by default.
212 # Now only 'ja' uses this config value
213 # html_search_options = {'type': 'default'}
214
215 # The name of a javascript file (relative to the configuration directory) that
216 # implements a search results scorer. If empty, the default will be used.
217 # html_search_scorer = 'scorer.js'
218
219 # Output file base name for HTML help builder.
220 htmlhelp_basename = 'SecureDropdoc'
221
222 # -- Options for LaTeX output ---------------------------------------------
223
224 latex_elements = {
225 # The paper size ('letterpaper' or 'a4paper').
226 # 'papersize': 'letterpaper',
227
228 # The font size ('10pt', '11pt' or '12pt').
229 # 'pointsize': '10pt',
230
231 # Additional stuff for the LaTeX preamble.
232 # 'preamble': '',
233
234 # Latex figure (float) alignment
235 # 'figure_align': 'htbp',
236 }
237
238 # Grouping the document tree into LaTeX files. List of tuples
239 # (source start file, target name, title,
240 # author, documentclass [howto, manual, or own class]).
241 latex_documents = [
242 (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',
243 author, 'manual'),
244 ]
245
246 # The name of an image file (relative to this directory) to place at the top of
247 # the title page.
248 # latex_logo = None
249
250 # For "manual" documents, if this is true, then toplevel headings are parts,
251 # not chapters.
252 # latex_use_parts = False
253
254 # If true, show page references after internal links.
255 # latex_show_pagerefs = False
256
257 # If true, show URL addresses after external links.
258 # latex_show_urls = False
259
260 # Documents to append as an appendix to all manuals.
261 # latex_appendices = []
262
263 # If false, no module index is generated.
264 # latex_domain_indices = True
265
266
267 # -- Options for manual page output ---------------------------------------
268
269 # One entry per manual page. List of tuples
270 # (source start file, name, description, authors, manual section).
271 man_pages = [
272 (master_doc, 'securedrop', u'SecureDrop Documentation',
273 [author], 1)
274 ]
275
276 # If true, show URL addresses after external links.
277 # man_show_urls = False
278
279
280 # -- Options for Texinfo output -------------------------------------------
281
282 # Grouping the document tree into Texinfo files. List of tuples
283 # (source start file, target name, title, author,
284 # dir menu entry, description, category)
285 texinfo_documents = [
286 (master_doc, 'SecureDrop', u'SecureDrop Documentation',
287 author, 'SecureDrop', 'One line description of project.',
288 'Miscellaneous'),
289 ]
290
291 # Documents to append as an appendix to all manuals.
292 # texinfo_appendices = []
293
294 # If false, no module index is generated.
295 # texinfo_domain_indices = True
296
297 # How to display URL addresses: 'footnote', 'no', or 'inline'.
298 # texinfo_show_urls = 'footnote'
299
300 # If true, do not generate a @detailmenu in the "Top" node's menu.
301 # texinfo_no_detailmenu = False
302
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -57,9 +57,9 @@
# built documents.
#
# The short X.Y version.
-version = '0.9.0~rc1'
+version = '0.9.0~rc2'
# The full version, including alpha/beta/rc tags.
-release = '0.9.0~rc1'
+release = '0.9.0~rc2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
diff --git a/securedrop/version.py b/securedrop/version.py
--- a/securedrop/version.py
+++ b/securedrop/version.py
@@ -1 +1 @@
-__version__ = '0.9.0~rc1'
+__version__ = '0.9.0~rc2'
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -57,9 +57,9 @@\n # built documents.\n #\n # The short X.Y version.\n-version = '0.9.0~rc1'\n+version = '0.9.0~rc2'\n # The full version, including alpha/beta/rc tags.\n-release = '0.9.0~rc1'\n+release = '0.9.0~rc2'\n \n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\ndiff --git a/securedrop/version.py b/securedrop/version.py\n--- a/securedrop/version.py\n+++ b/securedrop/version.py\n@@ -1 +1 @@\n-__version__ = '0.9.0~rc1'\n+__version__ = '0.9.0~rc2'\n", "issue": "Builder image needs updates\n## Description\r\n\r\nsd-builder image needs to be updated.\r\n\r\n## Steps to Reproduce\r\n\r\n`make build-debs` and observe error.\r\n\r\n## Expected Behavior\r\n\r\n`make build-debs` should exit without error.\r\n\r\n## Actual Behavior\r\n\r\n`make-build-debs` returns an error, security updates are needed for the container.\r\n## Comments\r\nInstructions are available here:\r\nhttps://docs.securedrop.org/en/latest/development/dockerbuildmaint.html\n", "before_files": [{"content": "__version__ = '0.9.0~rc1'\n", "path": "securedrop/version.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2017, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.9.0~rc1'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.9.0~rc1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '../securedrop/static/i/favicon.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n", "path": "docs/conf.py"}], "after_files": [{"content": "__version__ = '0.9.0~rc2'\n", "path": "securedrop/version.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# SecureDrop documentation build configuration file, created by\n# sphinx-quickstart on Tue Oct 13 12:08:52 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\n\n# Detect if we're being built by Read the Docs\n# https://docs.readthedocs.org/en/latest/faq.html#how-do-i-change-behavior-for-read-the-docs\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.todo', ]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'SecureDrop'\ncopyright = u'2017, Freedom of the Press Foundation'\nauthor = u'SecureDrop Team and Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.9.0~rc2'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.9.0~rc2'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nif on_rtd:\n html_theme = 'default'\nelse:\n try:\n # If you want to build the docs locally using the RTD theme,\n # you may need to install it: ``pip install sphinx_rtd_theme``.\n # https://github.com/snide/sphinx_rtd_theme#via-package\n import sphinx_rtd_theme\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n except ImportError:\n # This theme is included with Sphinx and is quite nice (based\n # on the Pocoo themes), but since we're using the RTD theme\n # for the production docs, it's best to use that to avoid\n # issues due to discrepancies between the themes.\n html_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '../securedrop/static/i/favicon.png'\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'SecureDropdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'SecureDrop.tex', u'SecureDrop Documentation',\n author, 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'securedrop', u'SecureDrop Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'SecureDrop', u'SecureDrop Documentation',\n author, 'SecureDrop', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n", "path": "docs/conf.py"}]} | 3,749 | 200 |
gh_patches_debug_12398 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1590 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
improve `healthy()` check for node
See pre-commit/actions#45
for `language_version: system` hooks this looks like:
```
eslint...................................................................Failed
- hook id: eslint
- exit code: 127
/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory
/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory
##[error]The process '/opt/hostedtoolcache/Python/3.8.5/x64/bin/pre-commit' failed with exit code 1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/node.py`
Content:
```
1 import contextlib
2 import functools
3 import os
4 import sys
5 from typing import Generator
6 from typing import Sequence
7 from typing import Tuple
8
9 import pre_commit.constants as C
10 from pre_commit import parse_shebang
11 from pre_commit.envcontext import envcontext
12 from pre_commit.envcontext import PatchesT
13 from pre_commit.envcontext import UNSET
14 from pre_commit.envcontext import Var
15 from pre_commit.hook import Hook
16 from pre_commit.languages import helpers
17 from pre_commit.languages.python import bin_dir
18 from pre_commit.prefix import Prefix
19 from pre_commit.util import clean_path_on_failure
20 from pre_commit.util import cmd_output
21 from pre_commit.util import cmd_output_b
22
23 ENVIRONMENT_DIR = 'node_env'
24 healthy = helpers.basic_healthy
25
26
27 @functools.lru_cache(maxsize=1)
28 def get_default_version() -> str:
29 # nodeenv does not yet support `-n system` on windows
30 if sys.platform == 'win32':
31 return C.DEFAULT
32 # if node is already installed, we can save a bunch of setup time by
33 # using the installed version
34 elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')):
35 return 'system'
36 else:
37 return C.DEFAULT
38
39
40 def _envdir(prefix: Prefix, version: str) -> str:
41 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
42 return prefix.path(directory)
43
44
45 def get_env_patch(venv: str) -> PatchesT:
46 if sys.platform == 'cygwin': # pragma: no cover
47 _, win_venv, _ = cmd_output('cygpath', '-w', venv)
48 install_prefix = fr'{win_venv.strip()}\bin'
49 lib_dir = 'lib'
50 elif sys.platform == 'win32': # pragma: no cover
51 install_prefix = bin_dir(venv)
52 lib_dir = 'Scripts'
53 else: # pragma: win32 no cover
54 install_prefix = venv
55 lib_dir = 'lib'
56 return (
57 ('NODE_VIRTUAL_ENV', venv),
58 ('NPM_CONFIG_PREFIX', install_prefix),
59 ('npm_config_prefix', install_prefix),
60 ('NPM_CONFIG_USERCONFIG', UNSET),
61 ('npm_config_userconfig', UNSET),
62 ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),
63 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),
64 )
65
66
67 @contextlib.contextmanager
68 def in_env(
69 prefix: Prefix,
70 language_version: str,
71 ) -> Generator[None, None, None]:
72 with envcontext(get_env_patch(_envdir(prefix, language_version))):
73 yield
74
75
76 def install_environment(
77 prefix: Prefix, version: str, additional_dependencies: Sequence[str],
78 ) -> None:
79 additional_dependencies = tuple(additional_dependencies)
80 assert prefix.exists('package.json')
81 envdir = _envdir(prefix, version)
82
83 # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath
84 if sys.platform == 'win32': # pragma: no cover
85 envdir = fr'\\?\{os.path.normpath(envdir)}'
86 with clean_path_on_failure(envdir):
87 cmd = [
88 sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,
89 ]
90 if version != C.DEFAULT:
91 cmd.extend(['-n', version])
92 cmd_output_b(*cmd)
93
94 with in_env(prefix, version):
95 # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449
96 # install as if we installed from git
97 helpers.run_setup_cmd(prefix, ('npm', 'install'))
98 helpers.run_setup_cmd(
99 prefix,
100 ('npm', 'install', '-g', '.', *additional_dependencies),
101 )
102
103
104 def run_hook(
105 hook: Hook,
106 file_args: Sequence[str],
107 color: bool,
108 ) -> Tuple[int, bytes]:
109 with in_env(hook.prefix, hook.language_version):
110 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py
--- a/pre_commit/languages/node.py
+++ b/pre_commit/languages/node.py
@@ -21,7 +21,6 @@
from pre_commit.util import cmd_output_b
ENVIRONMENT_DIR = 'node_env'
-healthy = helpers.basic_healthy
@functools.lru_cache(maxsize=1)
@@ -73,6 +72,12 @@
yield
+def healthy(prefix: Prefix, language_version: str) -> bool:
+ with in_env(prefix, language_version):
+ retcode, _, _ = cmd_output_b('node', '--version', retcode=None)
+ return retcode == 0
+
+
def install_environment(
prefix: Prefix, version: str, additional_dependencies: Sequence[str],
) -> None:
| {"golden_diff": "diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -21,7 +21,6 @@\n from pre_commit.util import cmd_output_b\n \n ENVIRONMENT_DIR = 'node_env'\n-healthy = helpers.basic_healthy\n \n \n @functools.lru_cache(maxsize=1)\n@@ -73,6 +72,12 @@\n yield\n \n \n+def healthy(prefix: Prefix, language_version: str) -> bool:\n+ with in_env(prefix, language_version):\n+ retcode, _, _ = cmd_output_b('node', '--version', retcode=None)\n+ return retcode == 0\n+\n+\n def install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n ) -> None:\n", "issue": "improve `healthy()` check for node\nSee pre-commit/actions#45\r\n\r\nfor `language_version: system` hooks this looks like:\r\n\r\n```\r\neslint...................................................................Failed\r\n- hook id: eslint\r\n- exit code: 127\r\n\r\n/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory\r\n/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory\r\n\r\n##[error]The process '/opt/hostedtoolcache/Python/3.8.5/x64/bin/pre-commit' failed with exit code 1\r\n```\r\n\r\n\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import parse_shebang\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'node_env'\nhealthy = helpers.basic_healthy\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n # nodeenv does not yet support `-n system` on windows\n if sys.platform == 'win32':\n return C.DEFAULT\n # if node is already installed, we can save a bunch of setup time by\n # using the installed version\n elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef _envdir(prefix: Prefix, version: str) -> str:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n return prefix.path(directory)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = fr'{win_venv.strip()}\\bin'\n lib_dir = 'lib'\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n lib_dir = 'Scripts'\n else: # pragma: win32 no cover\n install_prefix = venv\n lib_dir = 'lib'\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NPM_CONFIG_USERCONFIG', UNSET),\n ('npm_config_userconfig', UNSET),\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n with envcontext(get_env_patch(_envdir(prefix, language_version))):\n yield\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n assert prefix.exists('package.json')\n envdir = _envdir(prefix, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = fr'\\\\?\\{os.path.normpath(envdir)}'\n with clean_path_on_failure(envdir):\n cmd = [\n sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,\n ]\n if version != C.DEFAULT:\n cmd.extend(['-n', version])\n cmd_output_b(*cmd)\n\n with in_env(prefix, version):\n # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\n # install as if we installed from git\n helpers.run_setup_cmd(prefix, ('npm', 'install'))\n helpers.run_setup_cmd(\n prefix,\n ('npm', 'install', '-g', '.', *additional_dependencies),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/node.py"}], "after_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import parse_shebang\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'node_env'\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n # nodeenv does not yet support `-n system` on windows\n if sys.platform == 'win32':\n return C.DEFAULT\n # if node is already installed, we can save a bunch of setup time by\n # using the installed version\n elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef _envdir(prefix: Prefix, version: str) -> str:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n return prefix.path(directory)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = fr'{win_venv.strip()}\\bin'\n lib_dir = 'lib'\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n lib_dir = 'Scripts'\n else: # pragma: win32 no cover\n install_prefix = venv\n lib_dir = 'lib'\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NPM_CONFIG_USERCONFIG', UNSET),\n ('npm_config_userconfig', UNSET),\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n with envcontext(get_env_patch(_envdir(prefix, language_version))):\n yield\n\n\ndef healthy(prefix: Prefix, language_version: str) -> bool:\n with in_env(prefix, language_version):\n retcode, _, _ = cmd_output_b('node', '--version', retcode=None)\n return retcode == 0\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n assert prefix.exists('package.json')\n envdir = _envdir(prefix, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = fr'\\\\?\\{os.path.normpath(envdir)}'\n with clean_path_on_failure(envdir):\n cmd = [\n sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,\n ]\n if version != C.DEFAULT:\n cmd.extend(['-n', version])\n cmd_output_b(*cmd)\n\n with in_env(prefix, version):\n # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\n # install as if we installed from git\n helpers.run_setup_cmd(prefix, ('npm', 'install'))\n helpers.run_setup_cmd(\n prefix,\n ('npm', 'install', '-g', '.', *additional_dependencies),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/node.py"}]} | 1,608 | 183 |
gh_patches_debug_1654 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-1515 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add readthedocs documentation for remoulade instrumentation
Part of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Usage
17 -----
18
19 * Start broker backend
20
21 ::
22
23 docker run -p 5672:5672 rabbitmq
24
25 * Run instrumented actor
26
27 .. code-block:: python
28
29 from remoulade.brokers.rabbitmq import RabbitmqBroker
30 import remoulade
31
32 RemouladeInstrumentor().instrument()
33
34 broker = RabbitmqBroker()
35 remoulade.set_broker(broker)
36
37 @remoulade.actor
38 def multiply(x, y):
39 return x * y
40
41 broker.declare_actor(count_words)
42
43 multiply.send(43, 51)
44
45 """
46 from typing import Collection
47
48 from remoulade import Middleware, broker
49
50 from opentelemetry import trace
51 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
52 from opentelemetry.instrumentation.remoulade import utils
53 from opentelemetry.instrumentation.remoulade.package import _instruments
54 from opentelemetry.instrumentation.remoulade.version import __version__
55 from opentelemetry.propagate import extract, inject
56 from opentelemetry.semconv.trace import SpanAttributes
57
58 _REMOULADE_MESSAGE_TAG_KEY = "remoulade.action"
59 _REMOULADE_MESSAGE_SEND = "send"
60 _REMOULADE_MESSAGE_RUN = "run"
61
62 _REMOULADE_MESSAGE_NAME_KEY = "remoulade.actor_name"
63
64 _REMOULADE_MESSAGE_RETRY_COUNT_KEY = "remoulade.retry_count"
65
66
67 class _InstrumentationMiddleware(Middleware):
68 def __init__(self, _tracer):
69 self._tracer = _tracer
70 self._span_registry = {}
71
72 def before_process_message(self, _broker, message):
73 if "trace_ctx" not in message.options:
74 return
75
76 trace_ctx = extract(message.options["trace_ctx"])
77 retry_count = message.options.get("retries", 0)
78 operation_name = utils.get_operation_name(
79 "before_process_message", retry_count
80 )
81 span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}
82
83 span = self._tracer.start_span(
84 operation_name,
85 kind=trace.SpanKind.CONSUMER,
86 context=trace_ctx,
87 attributes=span_attributes,
88 )
89
90 activation = trace.use_span(span, end_on_exit=True)
91 activation.__enter__() # pylint: disable=E1101
92
93 utils.attach_span(
94 self._span_registry, message.message_id, (span, activation)
95 )
96
97 def after_process_message(
98 self, _broker, message, *, result=None, exception=None
99 ):
100 span, activation = utils.retrieve_span(
101 self._span_registry, message.message_id
102 )
103
104 if span is None:
105 # no existing span found for message_id
106 return
107
108 if span.is_recording():
109 span.set_attributes(
110 {
111 _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_RUN,
112 _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,
113 SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,
114 }
115 )
116
117 activation.__exit__(None, None, None)
118 utils.detach_span(self._span_registry, message.message_id)
119
120 def before_enqueue(self, _broker, message, delay):
121 retry_count = message.options.get("retries", 0)
122 operation_name = utils.get_operation_name(
123 "before_enqueue", retry_count
124 )
125 span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}
126
127 span = self._tracer.start_span(
128 operation_name,
129 kind=trace.SpanKind.PRODUCER,
130 attributes=span_attributes,
131 )
132
133 if span.is_recording():
134 span.set_attributes(
135 {
136 _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_SEND,
137 _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,
138 SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,
139 }
140 )
141
142 activation = trace.use_span(span, end_on_exit=True)
143 activation.__enter__() # pylint: disable=E1101
144
145 utils.attach_span(
146 self._span_registry,
147 message.message_id,
148 (span, activation),
149 is_publish=True,
150 )
151
152 if "trace_ctx" not in message.options:
153 message.options["trace_ctx"] = {}
154 inject(message.options["trace_ctx"])
155
156 def after_enqueue(self, _broker, message, delay, exception=None):
157 _, activation = utils.retrieve_span(
158 self._span_registry, message.message_id, is_publish=True
159 )
160
161 if activation is None:
162 # no existing span found for message_id
163 return
164
165 activation.__exit__(None, None, None)
166 utils.detach_span(
167 self._span_registry, message.message_id, is_publish=True
168 )
169
170
171 class RemouladeInstrumentor(BaseInstrumentor):
172 def instrumentation_dependencies(self) -> Collection[str]:
173 return _instruments
174
175 def _instrument(self, **kwargs):
176 tracer_provider = kwargs.get("tracer_provider")
177
178 # pylint: disable=attribute-defined-outside-init
179 self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)
180 instrumentation_middleware = _InstrumentationMiddleware(self._tracer)
181
182 broker.add_extra_default_middleware(instrumentation_middleware)
183
184 def _uninstrument(self, **kwargs):
185 broker.remove_extra_default_middleware(_InstrumentationMiddleware)
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py
@@ -16,13 +16,13 @@
Usage
-----
-* Start broker backend
+Start broker backend
::
docker run -p 5672:5672 rabbitmq
-* Run instrumented actor
+Run instrumented actor
.. code-block:: python
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py\n@@ -16,13 +16,13 @@\n Usage\n -----\n \n-* Start broker backend\n+Start broker backend\n \n ::\n \n docker run -p 5672:5672 rabbitmq\n \n-* Run instrumented actor\n+Run instrumented actor\n \n .. code-block:: python\n", "issue": "Add readthedocs documentation for remoulade instrumentation\nPart of [1491](https://github.com/open-telemetry/opentelemetry-python-contrib/issues/1491)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUsage\n-----\n\n* Start broker backend\n\n::\n\n docker run -p 5672:5672 rabbitmq\n\n* Run instrumented actor\n\n.. code-block:: python\n\n from remoulade.brokers.rabbitmq import RabbitmqBroker\n import remoulade\n\n RemouladeInstrumentor().instrument()\n\n broker = RabbitmqBroker()\n remoulade.set_broker(broker)\n\n @remoulade.actor\n def multiply(x, y):\n return x * y\n\n broker.declare_actor(count_words)\n\n multiply.send(43, 51)\n\n\"\"\"\nfrom typing import Collection\n\nfrom remoulade import Middleware, broker\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.remoulade import utils\nfrom opentelemetry.instrumentation.remoulade.package import _instruments\nfrom opentelemetry.instrumentation.remoulade.version import __version__\nfrom opentelemetry.propagate import extract, inject\nfrom opentelemetry.semconv.trace import SpanAttributes\n\n_REMOULADE_MESSAGE_TAG_KEY = \"remoulade.action\"\n_REMOULADE_MESSAGE_SEND = \"send\"\n_REMOULADE_MESSAGE_RUN = \"run\"\n\n_REMOULADE_MESSAGE_NAME_KEY = \"remoulade.actor_name\"\n\n_REMOULADE_MESSAGE_RETRY_COUNT_KEY = \"remoulade.retry_count\"\n\n\nclass _InstrumentationMiddleware(Middleware):\n def __init__(self, _tracer):\n self._tracer = _tracer\n self._span_registry = {}\n\n def before_process_message(self, _broker, message):\n if \"trace_ctx\" not in message.options:\n return\n\n trace_ctx = extract(message.options[\"trace_ctx\"])\n retry_count = message.options.get(\"retries\", 0)\n operation_name = utils.get_operation_name(\n \"before_process_message\", retry_count\n )\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n\n span = self._tracer.start_span(\n operation_name,\n kind=trace.SpanKind.CONSUMER,\n context=trace_ctx,\n attributes=span_attributes,\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(\n self._span_registry, message.message_id, (span, activation)\n )\n\n def after_process_message(\n self, _broker, message, *, result=None, exception=None\n ):\n span, activation = utils.retrieve_span(\n self._span_registry, message.message_id\n )\n\n if span is None:\n # no existing span found for message_id\n return\n\n if span.is_recording():\n span.set_attributes(\n {\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_RUN,\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n }\n )\n\n activation.__exit__(None, None, None)\n utils.detach_span(self._span_registry, message.message_id)\n\n def before_enqueue(self, _broker, message, delay):\n retry_count = message.options.get(\"retries\", 0)\n operation_name = utils.get_operation_name(\n \"before_enqueue\", retry_count\n )\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n\n span = self._tracer.start_span(\n operation_name,\n kind=trace.SpanKind.PRODUCER,\n attributes=span_attributes,\n )\n\n if span.is_recording():\n span.set_attributes(\n {\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_SEND,\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n }\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(\n self._span_registry,\n message.message_id,\n (span, activation),\n is_publish=True,\n )\n\n if \"trace_ctx\" not in message.options:\n message.options[\"trace_ctx\"] = {}\n inject(message.options[\"trace_ctx\"])\n\n def after_enqueue(self, _broker, message, delay, exception=None):\n _, activation = utils.retrieve_span(\n self._span_registry, message.message_id, is_publish=True\n )\n\n if activation is None:\n # no existing span found for message_id\n return\n\n activation.__exit__(None, None, None)\n utils.detach_span(\n self._span_registry, message.message_id, is_publish=True\n )\n\n\nclass RemouladeInstrumentor(BaseInstrumentor):\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n\n # pylint: disable=attribute-defined-outside-init\n self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n instrumentation_middleware = _InstrumentationMiddleware(self._tracer)\n\n broker.add_extra_default_middleware(instrumentation_middleware)\n\n def _uninstrument(self, **kwargs):\n broker.remove_extra_default_middleware(_InstrumentationMiddleware)\n", "path": "instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUsage\n-----\n\nStart broker backend\n\n::\n\n docker run -p 5672:5672 rabbitmq\n\nRun instrumented actor\n\n.. code-block:: python\n\n from remoulade.brokers.rabbitmq import RabbitmqBroker\n import remoulade\n\n RemouladeInstrumentor().instrument()\n\n broker = RabbitmqBroker()\n remoulade.set_broker(broker)\n\n @remoulade.actor\n def multiply(x, y):\n return x * y\n\n broker.declare_actor(count_words)\n\n multiply.send(43, 51)\n\n\"\"\"\nfrom typing import Collection\n\nfrom remoulade import Middleware, broker\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.remoulade import utils\nfrom opentelemetry.instrumentation.remoulade.package import _instruments\nfrom opentelemetry.instrumentation.remoulade.version import __version__\nfrom opentelemetry.propagate import extract, inject\nfrom opentelemetry.semconv.trace import SpanAttributes\n\n_REMOULADE_MESSAGE_TAG_KEY = \"remoulade.action\"\n_REMOULADE_MESSAGE_SEND = \"send\"\n_REMOULADE_MESSAGE_RUN = \"run\"\n\n_REMOULADE_MESSAGE_NAME_KEY = \"remoulade.actor_name\"\n\n_REMOULADE_MESSAGE_RETRY_COUNT_KEY = \"remoulade.retry_count\"\n\n\nclass _InstrumentationMiddleware(Middleware):\n def __init__(self, _tracer):\n self._tracer = _tracer\n self._span_registry = {}\n\n def before_process_message(self, _broker, message):\n if \"trace_ctx\" not in message.options:\n return\n\n trace_ctx = extract(message.options[\"trace_ctx\"])\n retry_count = message.options.get(\"retries\", 0)\n operation_name = utils.get_operation_name(\n \"before_process_message\", retry_count\n )\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n\n span = self._tracer.start_span(\n operation_name,\n kind=trace.SpanKind.CONSUMER,\n context=trace_ctx,\n attributes=span_attributes,\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(\n self._span_registry, message.message_id, (span, activation)\n )\n\n def after_process_message(\n self, _broker, message, *, result=None, exception=None\n ):\n span, activation = utils.retrieve_span(\n self._span_registry, message.message_id\n )\n\n if span is None:\n # no existing span found for message_id\n return\n\n if span.is_recording():\n span.set_attributes(\n {\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_RUN,\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n }\n )\n\n activation.__exit__(None, None, None)\n utils.detach_span(self._span_registry, message.message_id)\n\n def before_enqueue(self, _broker, message, delay):\n retry_count = message.options.get(\"retries\", 0)\n operation_name = utils.get_operation_name(\n \"before_enqueue\", retry_count\n )\n span_attributes = {_REMOULADE_MESSAGE_RETRY_COUNT_KEY: retry_count}\n\n span = self._tracer.start_span(\n operation_name,\n kind=trace.SpanKind.PRODUCER,\n attributes=span_attributes,\n )\n\n if span.is_recording():\n span.set_attributes(\n {\n _REMOULADE_MESSAGE_TAG_KEY: _REMOULADE_MESSAGE_SEND,\n _REMOULADE_MESSAGE_NAME_KEY: message.actor_name,\n SpanAttributes.MESSAGING_MESSAGE_ID: message.message_id,\n }\n )\n\n activation = trace.use_span(span, end_on_exit=True)\n activation.__enter__() # pylint: disable=E1101\n\n utils.attach_span(\n self._span_registry,\n message.message_id,\n (span, activation),\n is_publish=True,\n )\n\n if \"trace_ctx\" not in message.options:\n message.options[\"trace_ctx\"] = {}\n inject(message.options[\"trace_ctx\"])\n\n def after_enqueue(self, _broker, message, delay, exception=None):\n _, activation = utils.retrieve_span(\n self._span_registry, message.message_id, is_publish=True\n )\n\n if activation is None:\n # no existing span found for message_id\n return\n\n activation.__exit__(None, None, None)\n utils.detach_span(\n self._span_registry, message.message_id, is_publish=True\n )\n\n\nclass RemouladeInstrumentor(BaseInstrumentor):\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n tracer_provider = kwargs.get(\"tracer_provider\")\n\n # pylint: disable=attribute-defined-outside-init\n self._tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n instrumentation_middleware = _InstrumentationMiddleware(self._tracer)\n\n broker.add_extra_default_middleware(instrumentation_middleware)\n\n def _uninstrument(self, **kwargs):\n broker.remove_extra_default_middleware(_InstrumentationMiddleware)\n", "path": "instrumentation/opentelemetry-instrumentation-remoulade/src/opentelemetry/instrumentation/remoulade/__init__.py"}]} | 2,050 | 186 |
gh_patches_debug_22938 | rasdani/github-patches | git_diff | bridgecrewio__checkov-215 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't crash on IAM Policies that do not have an "effect"
**Describe the bug**
[`Effect`](https://www.terraform.io/docs/providers/aws/d/iam_policy_document.html#effect) is an optional token on AWS IAM policy documents. It will default to Allow.
While it is perhaps good to be explicit, it should not crash Checkov.
```
ERROR:checkov.terraform.checks.data.aws.AdminPolicyDocument:Failed to run check: Ensure IAM policies that allow full "*-*" administrative privileges are not created for configuration: {'statement': ....
Traceback (most recent call last):
File "/usr/local/bin/checkov", line 5, in <module>
run()
File "/usr/local/lib/python3.8/site-packages/checkov/main.py", line 47, in run
scan_reports = runner_registry.run(root_folder, external_checks_dir=args.external_checks_dir, files=file)
File "/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py", line 20, in run
scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py", line 38, in run
self.check_tf_definition(report, root_folder, tf_definitions)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py", line 66, in check_tf_definition
self.run_block(definition[1][block_type], definitions_context, full_file_path, report, scanned_file,
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py", line 88, in run_block
results = registry.scan(scanned_file, entity,
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py", line 48, in scan
result = check.run(scanned_file=scanned_file, entity_configuration=entity_configuration,
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py", line 44, in run
raise e
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py", line 33, in run
check_result['result'] = self.scan_entity_conf(entity_configuration)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/base_check.py", line 19, in scan_entity_conf
return self.scan_data_conf(conf)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/aws/AdminPolicyDocument.py", line 23, in scan_data_conf
if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:
KeyError: 'effect'
```
**To Reproduce**
Steps to reproduce the behavior:
1. Write a terraform file:
```
provider "aws" {
region = "us-east-1"
}
data "aws_iam_policy_document" "test-policy" {
statement {
actions = ["s3:ListBucket"]
resources = ["arn:aws:s3:::*"]
}
}
resource "aws_iam_policy" "test-policy" {
name = "test-policy"
policy = data.aws_iam_policy_document.test-policy.json
}
```
**Expected behavior**
This should not crash
**Additional context**
When trying to debug this, I started getting a *different* error.
```Traceback (most recent call last):
File "/home/matthew/.local/bin/checkov", line 2, in <module>
from checkov.main import run
File "/home/matthew/.local/lib/python3.7/site-packages/checkov/main.py", line 5, in <module>
from checkov.common.util.docs_generator import print_checks
File "/home/matthew/.local/lib/python3.7/site-packages/checkov/common/util/docs_generator.py", line 5, in <module>
from checkov.terraform.checks.data.registry import data_registry
File "/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/__init__.py", line 1, in <module>
from checkov.terraform.checks.resource import *
File "/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/__init__.py", line 1, in <module>
from checkov.terraform.checks.resource.aws import *
File "/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py", line 27, in <module>
check = AdminPolicyDocument()
File "/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py", line 11, in __init__
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)
File "/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/base_check.py", line 18, in __init__
resource_registry.register(self)
File "/home/matthew/.local/lib/python3.7/site-packages/checkov/common/checks/base_check_registry.py", line 16, in register
for entity in check.supported_entities:
AttributeError: 'AdminPolicyDocument' object has no attribute 'supported_entities'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/data/aws/AdminPolicyDocument.py`
Content:
```
1 from checkov.terraform.checks.data.base_check import BaseDataCheck
2 from checkov.common.models.enums import CheckResult, CheckCategories
3
4
5 class AdminPolicyDocument(BaseDataCheck):
6 def __init__(self):
7 name = "Ensure IAM policies that allow full \"*-*\" administrative privileges are not created"
8 id = "CKV_AWS_1"
9 supported_data = ['aws_iam_policy_document']
10 categories = [CheckCategories.IAM]
11 super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)
12
13 def scan_data_conf(self, conf):
14 """
15 validates iam policy document
16 https://learn.hashicorp.com/terraform/aws/iam-policy
17 :param conf: aws_kms_key configuration
18 :return: <CheckResult>
19 """
20 key = 'statement'
21 if key in conf.keys():
22 for statement in conf[key]:
23 if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:
24 return CheckResult.FAILED
25 return CheckResult.PASSED
26
27
28 check = AdminPolicyDocument()
29
```
Path: `checkov/terraform/checks/data/aws/StarActionPolicyDocument.py`
Content:
```
1 from checkov.terraform.checks.data.base_check import BaseDataCheck
2 from checkov.common.models.enums import CheckResult, CheckCategories
3
4
5 class StarActionPolicyDocument(BaseDataCheck):
6 def __init__(self):
7 name = "Ensure no IAM policies documents allow \"*\" as a statement's actions"
8 id = "CKV_AWS_49"
9 supported_data = ['aws_iam_policy_document']
10 categories = [CheckCategories.IAM]
11 super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)
12
13 def scan_data_conf(self, conf):
14 """
15 validates iam policy document
16 https://learn.hashicorp.com/terraform/aws/iam-policy
17 :param conf: aws_kms_key configuration
18 :return: <CheckResult>
19 """
20 key = 'statement'
21 if key in conf.keys():
22 for statement in conf['statement']:
23 if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == "Allow":
24 return CheckResult.FAILED
25 return CheckResult.PASSED
26
27
28 check = StarActionPolicyDocument()
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py
--- a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py
+++ b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py
@@ -20,7 +20,8 @@
key = 'statement'
if key in conf.keys():
for statement in conf[key]:
- if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:
+ if 'actions' in statement and statement.get('effect', ['Allow'])[0] == 'Allow' and '*' in statement['actions'][0] \
+ and '*' in statement['resources'][0]:
return CheckResult.FAILED
return CheckResult.PASSED
diff --git a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py
--- a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py
+++ b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py
@@ -20,7 +20,7 @@
key = 'statement'
if key in conf.keys():
for statement in conf['statement']:
- if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == "Allow":
+ if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':
return CheckResult.FAILED
return CheckResult.PASSED
| {"golden_diff": "diff --git a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\n--- a/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\n+++ b/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\n@@ -20,7 +20,8 @@\n key = 'statement'\n if key in conf.keys():\n for statement in conf[key]:\n- if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\n+ if 'actions' in statement and statement.get('effect', ['Allow'])[0] == 'Allow' and '*' in statement['actions'][0] \\\n+ and '*' in statement['resources'][0]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n \ndiff --git a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n--- a/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n+++ b/checkov/terraform/checks/data/aws/StarActionPolicyDocument.py\n@@ -20,7 +20,7 @@\n key = 'statement'\n if key in conf.keys():\n for statement in conf['statement']:\n- if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == \"Allow\":\n+ if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':\n return CheckResult.FAILED\n return CheckResult.PASSED\n", "issue": "Don't crash on IAM Policies that do not have an \"effect\"\n**Describe the bug**\r\n[`Effect`](https://www.terraform.io/docs/providers/aws/d/iam_policy_document.html#effect) is an optional token on AWS IAM policy documents. It will default to Allow.\r\nWhile it is perhaps good to be explicit, it should not crash Checkov.\r\n\r\n```\r\nERROR:checkov.terraform.checks.data.aws.AdminPolicyDocument:Failed to run check: Ensure IAM policies that allow full \"*-*\" administrative privileges are not created for configuration: {'statement': ....\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/checkov\", line 5, in <module>\r\n run()\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/main.py\", line 47, in run\r\n scan_reports = runner_registry.run(root_folder, external_checks_dir=args.external_checks_dir, files=file)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py\", line 20, in run\r\n scan_report = runner.run(root_folder, external_checks_dir=external_checks_dir, files=files)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 38, in run\r\n self.check_tf_definition(report, root_folder, tf_definitions)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 66, in check_tf_definition\r\n self.run_block(definition[1][block_type], definitions_context, full_file_path, report, scanned_file,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/runner.py\", line 88, in run_block\r\n results = registry.scan(scanned_file, entity,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py\", line 48, in scan\r\n result = check.run(scanned_file=scanned_file, entity_configuration=entity_configuration,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\", line 44, in run\r\n raise e\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\", line 33, in run\r\n check_result['result'] = self.scan_entity_conf(entity_configuration)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/base_check.py\", line 19, in scan_entity_conf\r\n return self.scan_data_conf(conf)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/data/aws/AdminPolicyDocument.py\", line 23, in scan_data_conf\r\n if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\r\nKeyError: 'effect'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Write a terraform file:\r\n```\r\nprovider \"aws\" {\r\n region = \"us-east-1\"\r\n}\r\n\r\ndata \"aws_iam_policy_document\" \"test-policy\" {\r\n statement {\r\n actions = [\"s3:ListBucket\"]\r\n resources = [\"arn:aws:s3:::*\"]\r\n }\r\n}\r\n\r\nresource \"aws_iam_policy\" \"test-policy\" {\r\n name = \"test-policy\"\r\n policy = data.aws_iam_policy_document.test-policy.json\r\n}\r\n\r\n```\r\n\r\n**Expected behavior**\r\nThis should not crash\r\n\r\n**Additional context**\r\nWhen trying to debug this, I started getting a *different* error.\r\n```Traceback (most recent call last):\r\n File \"/home/matthew/.local/bin/checkov\", line 2, in <module>\r\n from checkov.main import run\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/main.py\", line 5, in <module>\r\n from checkov.common.util.docs_generator import print_checks\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/common/util/docs_generator.py\", line 5, in <module>\r\n from checkov.terraform.checks.data.registry import data_registry\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/__init__.py\", line 1, in <module>\r\n from checkov.terraform.checks.resource import *\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/__init__.py\", line 1, in <module>\r\n from checkov.terraform.checks.resource.aws import *\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py\", line 27, in <module>\r\n check = AdminPolicyDocument()\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/AdminPolicyDocument.py\", line 11, in __init__\r\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resource)\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/terraform/checks/resource/base_check.py\", line 18, in __init__\r\n resource_registry.register(self)\r\n File \"/home/matthew/.local/lib/python3.7/site-packages/checkov/common/checks/base_check_registry.py\", line 16, in register\r\n for entity in check.supported_entities:\r\nAttributeError: 'AdminPolicyDocument' object has no attribute 'supported_entities'\r\n```\r\n\n", "before_files": [{"content": "from checkov.terraform.checks.data.base_check import BaseDataCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\n\nclass AdminPolicyDocument(BaseDataCheck):\n def __init__(self):\n name = \"Ensure IAM policies that allow full \\\"*-*\\\" administrative privileges are not created\"\n id = \"CKV_AWS_1\"\n supported_data = ['aws_iam_policy_document']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\n\n def scan_data_conf(self, conf):\n \"\"\"\n validates iam policy document\n https://learn.hashicorp.com/terraform/aws/iam-policy\n :param conf: aws_kms_key configuration\n :return: <CheckResult>\n \"\"\"\n key = 'statement'\n if key in conf.keys():\n for statement in conf[key]:\n if 'actions' in statement and statement['effect'][0] == 'Allow' and '*' in statement['actions'][0] and '*' in statement['resources'][0]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = AdminPolicyDocument()\n", "path": "checkov/terraform/checks/data/aws/AdminPolicyDocument.py"}, {"content": "from checkov.terraform.checks.data.base_check import BaseDataCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\n\nclass StarActionPolicyDocument(BaseDataCheck):\n def __init__(self):\n name = \"Ensure no IAM policies documents allow \\\"*\\\" as a statement's actions\"\n id = \"CKV_AWS_49\"\n supported_data = ['aws_iam_policy_document']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\n\n def scan_data_conf(self, conf):\n \"\"\"\n validates iam policy document\n https://learn.hashicorp.com/terraform/aws/iam-policy\n :param conf: aws_kms_key configuration\n :return: <CheckResult>\n \"\"\"\n key = 'statement'\n if key in conf.keys():\n for statement in conf['statement']:\n if 'actions' in statement and '*' in statement['actions'][0] and statement['effect'][0] == \"Allow\":\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = StarActionPolicyDocument()\n", "path": "checkov/terraform/checks/data/aws/StarActionPolicyDocument.py"}], "after_files": [{"content": "from checkov.terraform.checks.data.base_check import BaseDataCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\n\nclass AdminPolicyDocument(BaseDataCheck):\n def __init__(self):\n name = \"Ensure IAM policies that allow full \\\"*-*\\\" administrative privileges are not created\"\n id = \"CKV_AWS_1\"\n supported_data = ['aws_iam_policy_document']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\n\n def scan_data_conf(self, conf):\n \"\"\"\n validates iam policy document\n https://learn.hashicorp.com/terraform/aws/iam-policy\n :param conf: aws_kms_key configuration\n :return: <CheckResult>\n \"\"\"\n key = 'statement'\n if key in conf.keys():\n for statement in conf[key]:\n if 'actions' in statement and statement.get('effect', ['Allow'])[0] == 'Allow' and '*' in statement['actions'][0] \\\n and '*' in statement['resources'][0]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = AdminPolicyDocument()\n", "path": "checkov/terraform/checks/data/aws/AdminPolicyDocument.py"}, {"content": "from checkov.terraform.checks.data.base_check import BaseDataCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\n\nclass StarActionPolicyDocument(BaseDataCheck):\n def __init__(self):\n name = \"Ensure no IAM policies documents allow \\\"*\\\" as a statement's actions\"\n id = \"CKV_AWS_49\"\n supported_data = ['aws_iam_policy_document']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_data=supported_data)\n\n def scan_data_conf(self, conf):\n \"\"\"\n validates iam policy document\n https://learn.hashicorp.com/terraform/aws/iam-policy\n :param conf: aws_kms_key configuration\n :return: <CheckResult>\n \"\"\"\n key = 'statement'\n if key in conf.keys():\n for statement in conf['statement']:\n if 'actions' in statement and '*' in statement['actions'][0] and statement.get('effect', ['Allow'])[0] == 'Allow':\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = StarActionPolicyDocument()\n", "path": "checkov/terraform/checks/data/aws/StarActionPolicyDocument.py"}]} | 2,063 | 366 |
gh_patches_debug_41694 | rasdani/github-patches | git_diff | deepset-ai__haystack-7994 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `max_retries` and `timeout` params to all `AzureOpenAI` classes
**Is your feature request related to a problem? Please describe.**
Currently all `OpenAI` related classes (e.g. `OpenAIDocumentEmbedder`, `OpenAIChatGenerator`) can be initialised by setting `max_retries` and `timeout` params.
The corresponding `AzureOpenAI` don't always have the same params.
**Describe the solution you'd like**
It would be nice to have these params in the `AzureOpenAI` classes
**Describe alternatives you've considered**
Subclass `AzureOpenAI` and create custom components.
**Additional context**
cc @anakin87 :)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/components/embedders/azure_document_embedder.py`
Content:
```
1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>
2 #
3 # SPDX-License-Identifier: Apache-2.0
4
5 import os
6 from typing import Any, Dict, List, Optional, Tuple
7
8 from openai.lib.azure import AzureOpenAI
9 from tqdm import tqdm
10
11 from haystack import Document, component, default_from_dict, default_to_dict
12 from haystack.utils import Secret, deserialize_secrets_inplace
13
14
15 @component
16 class AzureOpenAIDocumentEmbedder:
17 """
18 A component for computing Document embeddings using OpenAI models on Azure.
19
20 Usage example:
21 ```python
22 from haystack import Document
23 from haystack.components.embedders import AzureOpenAIDocumentEmbedder
24
25 doc = Document(content="I love pizza!")
26
27 document_embedder = AzureOpenAIDocumentEmbedder()
28
29 result = document_embedder.run([doc])
30 print(result['documents'][0].embedding)
31
32 # [0.017020374536514282, -0.023255806416273117, ...]
33 ```
34 """
35
36 def __init__(
37 self,
38 azure_endpoint: Optional[str] = None,
39 api_version: Optional[str] = "2023-05-15",
40 azure_deployment: str = "text-embedding-ada-002",
41 dimensions: Optional[int] = None,
42 api_key: Optional[Secret] = Secret.from_env_var("AZURE_OPENAI_API_KEY", strict=False),
43 azure_ad_token: Optional[Secret] = Secret.from_env_var("AZURE_OPENAI_AD_TOKEN", strict=False),
44 organization: Optional[str] = None,
45 prefix: str = "",
46 suffix: str = "",
47 batch_size: int = 32,
48 progress_bar: bool = True,
49 meta_fields_to_embed: Optional[List[str]] = None,
50 embedding_separator: str = "\n",
51 ):
52 """
53 Create an AzureOpenAIDocumentEmbedder component.
54
55 :param azure_endpoint:
56 The endpoint of the deployed model.
57 :param api_version:
58 The version of the API to use.
59 :param azure_deployment:
60 The deployment of the model, usually matches the model name.
61 :param dimensions:
62 The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3
63 and later models.
64 :param api_key:
65 The API key used for authentication.
66 :param azure_ad_token:
67 Microsoft Entra ID token, see Microsoft's official
68 [Entra ID](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id)
69 documentation for more information.
70 Used to be called Azure Active Directory.
71 :param organization:
72 The Organization ID. See OpenAI's
73 [production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization)
74 for more information.
75 :param prefix:
76 A string to add at the beginning of each text.
77 :param suffix:
78 A string to add at the end of each text.
79 :param batch_size:
80 Number of Documents to encode at once.
81 :param progress_bar:
82 If True shows a progress bar when running.
83 :param meta_fields_to_embed:
84 List of meta fields that will be embedded along with the Document text.
85 :param embedding_separator:
86 Separator used to concatenate the meta fields to the Document text.
87 """
88 # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT
89 azure_endpoint = azure_endpoint or os.environ.get("AZURE_OPENAI_ENDPOINT")
90 if not azure_endpoint:
91 raise ValueError("Please provide an Azure endpoint or set the environment variable AZURE_OPENAI_ENDPOINT.")
92
93 if api_key is None and azure_ad_token is None:
94 raise ValueError("Please provide an API key or an Azure Active Directory token.")
95
96 self.api_key = api_key
97 self.azure_ad_token = azure_ad_token
98 self.api_version = api_version
99 self.azure_endpoint = azure_endpoint
100 self.azure_deployment = azure_deployment
101 self.dimensions = dimensions
102 self.organization = organization
103 self.prefix = prefix
104 self.suffix = suffix
105 self.batch_size = batch_size
106 self.progress_bar = progress_bar
107 self.meta_fields_to_embed = meta_fields_to_embed or []
108 self.embedding_separator = embedding_separator
109
110 self._client = AzureOpenAI(
111 api_version=api_version,
112 azure_endpoint=azure_endpoint,
113 azure_deployment=azure_deployment,
114 api_key=api_key.resolve_value() if api_key is not None else None,
115 azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,
116 organization=organization,
117 )
118
119 def _get_telemetry_data(self) -> Dict[str, Any]:
120 """
121 Data that is sent to Posthog for usage analytics.
122 """
123 return {"model": self.azure_deployment}
124
125 def to_dict(self) -> Dict[str, Any]:
126 """
127 Serializes the component to a dictionary.
128
129 :returns:
130 Dictionary with serialized data.
131 """
132 return default_to_dict(
133 self,
134 azure_endpoint=self.azure_endpoint,
135 azure_deployment=self.azure_deployment,
136 dimensions=self.dimensions,
137 organization=self.organization,
138 api_version=self.api_version,
139 prefix=self.prefix,
140 suffix=self.suffix,
141 batch_size=self.batch_size,
142 progress_bar=self.progress_bar,
143 meta_fields_to_embed=self.meta_fields_to_embed,
144 embedding_separator=self.embedding_separator,
145 api_key=self.api_key.to_dict() if self.api_key is not None else None,
146 azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,
147 )
148
149 @classmethod
150 def from_dict(cls, data: Dict[str, Any]) -> "AzureOpenAIDocumentEmbedder":
151 """
152 Deserializes the component from a dictionary.
153
154 :param data:
155 Dictionary to deserialize from.
156 :returns:
157 Deserialized component.
158 """
159 deserialize_secrets_inplace(data["init_parameters"], keys=["api_key", "azure_ad_token"])
160 return default_from_dict(cls, data)
161
162 def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:
163 """
164 Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.
165 """
166 texts_to_embed = []
167 for doc in documents:
168 meta_values_to_embed = [
169 str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None
170 ]
171
172 text_to_embed = (
173 self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or ""]) + self.suffix
174 ).replace("\n", " ")
175
176 texts_to_embed.append(text_to_embed)
177 return texts_to_embed
178
179 def _embed_batch(self, texts_to_embed: List[str], batch_size: int) -> Tuple[List[List[float]], Dict[str, Any]]:
180 """
181 Embed a list of texts in batches.
182 """
183
184 all_embeddings: List[List[float]] = []
185 meta: Dict[str, Any] = {"model": "", "usage": {"prompt_tokens": 0, "total_tokens": 0}}
186 for i in tqdm(range(0, len(texts_to_embed), batch_size), desc="Embedding Texts"):
187 batch = texts_to_embed[i : i + batch_size]
188 if self.dimensions is not None:
189 response = self._client.embeddings.create(
190 model=self.azure_deployment, dimensions=self.dimensions, input=batch
191 )
192 else:
193 response = self._client.embeddings.create(model=self.azure_deployment, input=batch)
194
195 # Append embeddings to the list
196 all_embeddings.extend(el.embedding for el in response.data)
197
198 # Update the meta information only once if it's empty
199 if not meta["model"]:
200 meta["model"] = response.model
201 meta["usage"] = dict(response.usage)
202 else:
203 # Update the usage tokens
204 meta["usage"]["prompt_tokens"] += response.usage.prompt_tokens
205 meta["usage"]["total_tokens"] += response.usage.total_tokens
206
207 return all_embeddings, meta
208
209 @component.output_types(documents=List[Document], meta=Dict[str, Any])
210 def run(self, documents: List[Document]) -> Dict[str, Any]:
211 """
212 Embed a list of Documents.
213
214 :param documents:
215 Documents to embed.
216
217 :returns:
218 A dictionary with the following keys:
219 - `documents`: Documents with embeddings
220 - `meta`: Information about the usage of the model.
221 """
222 if not (isinstance(documents, list) and all(isinstance(doc, Document) for doc in documents)):
223 raise TypeError("Input must be a list of Document instances. For strings, use AzureOpenAITextEmbedder.")
224
225 texts_to_embed = self._prepare_texts_to_embed(documents=documents)
226 embeddings, meta = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)
227
228 # Assign the corresponding embeddings to each document
229 for doc, emb in zip(documents, embeddings):
230 doc.embedding = emb
231
232 return {"documents": documents, "meta": meta}
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/components/embedders/azure_document_embedder.py b/haystack/components/embedders/azure_document_embedder.py
--- a/haystack/components/embedders/azure_document_embedder.py
+++ b/haystack/components/embedders/azure_document_embedder.py
@@ -33,7 +33,7 @@
```
"""
- def __init__(
+ def __init__( # noqa: PLR0913 (too-many-arguments)
self,
azure_endpoint: Optional[str] = None,
api_version: Optional[str] = "2023-05-15",
@@ -48,6 +48,8 @@
progress_bar: bool = True,
meta_fields_to_embed: Optional[List[str]] = None,
embedding_separator: str = "\n",
+ timeout: Optional[float] = None,
+ max_retries: Optional[int] = None,
):
"""
Create an AzureOpenAIDocumentEmbedder component.
@@ -84,6 +86,10 @@
List of meta fields that will be embedded along with the Document text.
:param embedding_separator:
Separator used to concatenate the meta fields to the Document text.
+ :param timeout: The timeout in seconds to be passed to the underlying `AzureOpenAI` client, if not set it is
+ inferred from the `OPENAI_TIMEOUT` environment variable or set to 30.
+ :param max_retries: Maximum retries to establish a connection with AzureOpenAI if it returns an internal error,
+ if not set it is inferred from the `OPENAI_MAX_RETRIES` environment variable or set to 5.
"""
# if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT
azure_endpoint = azure_endpoint or os.environ.get("AZURE_OPENAI_ENDPOINT")
@@ -106,6 +112,8 @@
self.progress_bar = progress_bar
self.meta_fields_to_embed = meta_fields_to_embed or []
self.embedding_separator = embedding_separator
+ self.timeout = timeout or float(os.environ.get("OPENAI_TIMEOUT", 30.0))
+ self.max_retries = max_retries or int(os.environ.get("OPENAI_MAX_RETRIES", 5))
self._client = AzureOpenAI(
api_version=api_version,
@@ -114,6 +122,8 @@
api_key=api_key.resolve_value() if api_key is not None else None,
azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,
organization=organization,
+ timeout=self.timeout,
+ max_retries=self.max_retries,
)
def _get_telemetry_data(self) -> Dict[str, Any]:
@@ -144,6 +154,8 @@
embedding_separator=self.embedding_separator,
api_key=self.api_key.to_dict() if self.api_key is not None else None,
azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,
+ timeout=self.timeout,
+ max_retries=self.max_retries,
)
@classmethod
| {"golden_diff": "diff --git a/haystack/components/embedders/azure_document_embedder.py b/haystack/components/embedders/azure_document_embedder.py\n--- a/haystack/components/embedders/azure_document_embedder.py\n+++ b/haystack/components/embedders/azure_document_embedder.py\n@@ -33,7 +33,7 @@\n ```\n \"\"\"\n \n- def __init__(\n+ def __init__( # noqa: PLR0913 (too-many-arguments)\n self,\n azure_endpoint: Optional[str] = None,\n api_version: Optional[str] = \"2023-05-15\",\n@@ -48,6 +48,8 @@\n progress_bar: bool = True,\n meta_fields_to_embed: Optional[List[str]] = None,\n embedding_separator: str = \"\\n\",\n+ timeout: Optional[float] = None,\n+ max_retries: Optional[int] = None,\n ):\n \"\"\"\n Create an AzureOpenAIDocumentEmbedder component.\n@@ -84,6 +86,10 @@\n List of meta fields that will be embedded along with the Document text.\n :param embedding_separator:\n Separator used to concatenate the meta fields to the Document text.\n+ :param timeout: The timeout in seconds to be passed to the underlying `AzureOpenAI` client, if not set it is\n+ inferred from the `OPENAI_TIMEOUT` environment variable or set to 30.\n+ :param max_retries: Maximum retries to establish a connection with AzureOpenAI if it returns an internal error,\n+ if not set it is inferred from the `OPENAI_MAX_RETRIES` environment variable or set to 5.\n \"\"\"\n # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT\n azure_endpoint = azure_endpoint or os.environ.get(\"AZURE_OPENAI_ENDPOINT\")\n@@ -106,6 +112,8 @@\n self.progress_bar = progress_bar\n self.meta_fields_to_embed = meta_fields_to_embed or []\n self.embedding_separator = embedding_separator\n+ self.timeout = timeout or float(os.environ.get(\"OPENAI_TIMEOUT\", 30.0))\n+ self.max_retries = max_retries or int(os.environ.get(\"OPENAI_MAX_RETRIES\", 5))\n \n self._client = AzureOpenAI(\n api_version=api_version,\n@@ -114,6 +122,8 @@\n api_key=api_key.resolve_value() if api_key is not None else None,\n azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,\n organization=organization,\n+ timeout=self.timeout,\n+ max_retries=self.max_retries,\n )\n \n def _get_telemetry_data(self) -> Dict[str, Any]:\n@@ -144,6 +154,8 @@\n embedding_separator=self.embedding_separator,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n+ timeout=self.timeout,\n+ max_retries=self.max_retries,\n )\n \n @classmethod\n", "issue": "Add `max_retries` and `timeout` params to all `AzureOpenAI` classes\n**Is your feature request related to a problem? Please describe.**\r\n\r\nCurrently all `OpenAI` related classes (e.g. `OpenAIDocumentEmbedder`, `OpenAIChatGenerator`) can be initialised by setting `max_retries` and `timeout` params.\r\n\r\nThe corresponding `AzureOpenAI` don't always have the same params.\r\n\r\n**Describe the solution you'd like**\r\n\r\nIt would be nice to have these params in the `AzureOpenAI` classes\r\n\r\n**Describe alternatives you've considered**\r\n\r\nSubclass `AzureOpenAI` and create custom components.\r\n\r\n**Additional context**\r\n\r\ncc @anakin87 :)\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom openai.lib.azure import AzureOpenAI\nfrom tqdm import tqdm\n\nfrom haystack import Document, component, default_from_dict, default_to_dict\nfrom haystack.utils import Secret, deserialize_secrets_inplace\n\n\n@component\nclass AzureOpenAIDocumentEmbedder:\n \"\"\"\n A component for computing Document embeddings using OpenAI models on Azure.\n\n Usage example:\n ```python\n from haystack import Document\n from haystack.components.embedders import AzureOpenAIDocumentEmbedder\n\n doc = Document(content=\"I love pizza!\")\n\n document_embedder = AzureOpenAIDocumentEmbedder()\n\n result = document_embedder.run([doc])\n print(result['documents'][0].embedding)\n\n # [0.017020374536514282, -0.023255806416273117, ...]\n ```\n \"\"\"\n\n def __init__(\n self,\n azure_endpoint: Optional[str] = None,\n api_version: Optional[str] = \"2023-05-15\",\n azure_deployment: str = \"text-embedding-ada-002\",\n dimensions: Optional[int] = None,\n api_key: Optional[Secret] = Secret.from_env_var(\"AZURE_OPENAI_API_KEY\", strict=False),\n azure_ad_token: Optional[Secret] = Secret.from_env_var(\"AZURE_OPENAI_AD_TOKEN\", strict=False),\n organization: Optional[str] = None,\n prefix: str = \"\",\n suffix: str = \"\",\n batch_size: int = 32,\n progress_bar: bool = True,\n meta_fields_to_embed: Optional[List[str]] = None,\n embedding_separator: str = \"\\n\",\n ):\n \"\"\"\n Create an AzureOpenAIDocumentEmbedder component.\n\n :param azure_endpoint:\n The endpoint of the deployed model.\n :param api_version:\n The version of the API to use.\n :param azure_deployment:\n The deployment of the model, usually matches the model name.\n :param dimensions:\n The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3\n and later models.\n :param api_key:\n The API key used for authentication.\n :param azure_ad_token:\n Microsoft Entra ID token, see Microsoft's official\n [Entra ID](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id)\n documentation for more information.\n Used to be called Azure Active Directory.\n :param organization:\n The Organization ID. See OpenAI's\n [production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization)\n for more information.\n :param prefix:\n A string to add at the beginning of each text.\n :param suffix:\n A string to add at the end of each text.\n :param batch_size:\n Number of Documents to encode at once.\n :param progress_bar:\n If True shows a progress bar when running.\n :param meta_fields_to_embed:\n List of meta fields that will be embedded along with the Document text.\n :param embedding_separator:\n Separator used to concatenate the meta fields to the Document text.\n \"\"\"\n # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT\n azure_endpoint = azure_endpoint or os.environ.get(\"AZURE_OPENAI_ENDPOINT\")\n if not azure_endpoint:\n raise ValueError(\"Please provide an Azure endpoint or set the environment variable AZURE_OPENAI_ENDPOINT.\")\n\n if api_key is None and azure_ad_token is None:\n raise ValueError(\"Please provide an API key or an Azure Active Directory token.\")\n\n self.api_key = api_key\n self.azure_ad_token = azure_ad_token\n self.api_version = api_version\n self.azure_endpoint = azure_endpoint\n self.azure_deployment = azure_deployment\n self.dimensions = dimensions\n self.organization = organization\n self.prefix = prefix\n self.suffix = suffix\n self.batch_size = batch_size\n self.progress_bar = progress_bar\n self.meta_fields_to_embed = meta_fields_to_embed or []\n self.embedding_separator = embedding_separator\n\n self._client = AzureOpenAI(\n api_version=api_version,\n azure_endpoint=azure_endpoint,\n azure_deployment=azure_deployment,\n api_key=api_key.resolve_value() if api_key is not None else None,\n azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,\n organization=organization,\n )\n\n def _get_telemetry_data(self) -> Dict[str, Any]:\n \"\"\"\n Data that is sent to Posthog for usage analytics.\n \"\"\"\n return {\"model\": self.azure_deployment}\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n dimensions=self.dimensions,\n organization=self.organization,\n api_version=self.api_version,\n prefix=self.prefix,\n suffix=self.suffix,\n batch_size=self.batch_size,\n progress_bar=self.progress_bar,\n meta_fields_to_embed=self.meta_fields_to_embed,\n embedding_separator=self.embedding_separator,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"AzureOpenAIDocumentEmbedder\":\n \"\"\"\n Deserializes the component from a dictionary.\n\n :param data:\n Dictionary to deserialize from.\n :returns:\n Deserialized component.\n \"\"\"\n deserialize_secrets_inplace(data[\"init_parameters\"], keys=[\"api_key\", \"azure_ad_token\"])\n return default_from_dict(cls, data)\n\n def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:\n \"\"\"\n Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.\n \"\"\"\n texts_to_embed = []\n for doc in documents:\n meta_values_to_embed = [\n str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None\n ]\n\n text_to_embed = (\n self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or \"\"]) + self.suffix\n ).replace(\"\\n\", \" \")\n\n texts_to_embed.append(text_to_embed)\n return texts_to_embed\n\n def _embed_batch(self, texts_to_embed: List[str], batch_size: int) -> Tuple[List[List[float]], Dict[str, Any]]:\n \"\"\"\n Embed a list of texts in batches.\n \"\"\"\n\n all_embeddings: List[List[float]] = []\n meta: Dict[str, Any] = {\"model\": \"\", \"usage\": {\"prompt_tokens\": 0, \"total_tokens\": 0}}\n for i in tqdm(range(0, len(texts_to_embed), batch_size), desc=\"Embedding Texts\"):\n batch = texts_to_embed[i : i + batch_size]\n if self.dimensions is not None:\n response = self._client.embeddings.create(\n model=self.azure_deployment, dimensions=self.dimensions, input=batch\n )\n else:\n response = self._client.embeddings.create(model=self.azure_deployment, input=batch)\n\n # Append embeddings to the list\n all_embeddings.extend(el.embedding for el in response.data)\n\n # Update the meta information only once if it's empty\n if not meta[\"model\"]:\n meta[\"model\"] = response.model\n meta[\"usage\"] = dict(response.usage)\n else:\n # Update the usage tokens\n meta[\"usage\"][\"prompt_tokens\"] += response.usage.prompt_tokens\n meta[\"usage\"][\"total_tokens\"] += response.usage.total_tokens\n\n return all_embeddings, meta\n\n @component.output_types(documents=List[Document], meta=Dict[str, Any])\n def run(self, documents: List[Document]) -> Dict[str, Any]:\n \"\"\"\n Embed a list of Documents.\n\n :param documents:\n Documents to embed.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: Documents with embeddings\n - `meta`: Information about the usage of the model.\n \"\"\"\n if not (isinstance(documents, list) and all(isinstance(doc, Document) for doc in documents)):\n raise TypeError(\"Input must be a list of Document instances. For strings, use AzureOpenAITextEmbedder.\")\n\n texts_to_embed = self._prepare_texts_to_embed(documents=documents)\n embeddings, meta = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)\n\n # Assign the corresponding embeddings to each document\n for doc, emb in zip(documents, embeddings):\n doc.embedding = emb\n\n return {\"documents\": documents, \"meta\": meta}\n", "path": "haystack/components/embedders/azure_document_embedder.py"}], "after_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom openai.lib.azure import AzureOpenAI\nfrom tqdm import tqdm\n\nfrom haystack import Document, component, default_from_dict, default_to_dict\nfrom haystack.utils import Secret, deserialize_secrets_inplace\n\n\n@component\nclass AzureOpenAIDocumentEmbedder:\n \"\"\"\n A component for computing Document embeddings using OpenAI models on Azure.\n\n Usage example:\n ```python\n from haystack import Document\n from haystack.components.embedders import AzureOpenAIDocumentEmbedder\n\n doc = Document(content=\"I love pizza!\")\n\n document_embedder = AzureOpenAIDocumentEmbedder()\n\n result = document_embedder.run([doc])\n print(result['documents'][0].embedding)\n\n # [0.017020374536514282, -0.023255806416273117, ...]\n ```\n \"\"\"\n\n def __init__( # noqa: PLR0913 (too-many-arguments)\n self,\n azure_endpoint: Optional[str] = None,\n api_version: Optional[str] = \"2023-05-15\",\n azure_deployment: str = \"text-embedding-ada-002\",\n dimensions: Optional[int] = None,\n api_key: Optional[Secret] = Secret.from_env_var(\"AZURE_OPENAI_API_KEY\", strict=False),\n azure_ad_token: Optional[Secret] = Secret.from_env_var(\"AZURE_OPENAI_AD_TOKEN\", strict=False),\n organization: Optional[str] = None,\n prefix: str = \"\",\n suffix: str = \"\",\n batch_size: int = 32,\n progress_bar: bool = True,\n meta_fields_to_embed: Optional[List[str]] = None,\n embedding_separator: str = \"\\n\",\n timeout: Optional[float] = None,\n max_retries: Optional[int] = None,\n ):\n \"\"\"\n Create an AzureOpenAIDocumentEmbedder component.\n\n :param azure_endpoint:\n The endpoint of the deployed model.\n :param api_version:\n The version of the API to use.\n :param azure_deployment:\n The deployment of the model, usually matches the model name.\n :param dimensions:\n The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3\n and later models.\n :param api_key:\n The API key used for authentication.\n :param azure_ad_token:\n Microsoft Entra ID token, see Microsoft's official\n [Entra ID](https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id)\n documentation for more information.\n Used to be called Azure Active Directory.\n :param organization:\n The Organization ID. See OpenAI's\n [production best practices](https://platform.openai.com/docs/guides/production-best-practices/setting-up-your-organization)\n for more information.\n :param prefix:\n A string to add at the beginning of each text.\n :param suffix:\n A string to add at the end of each text.\n :param batch_size:\n Number of Documents to encode at once.\n :param progress_bar:\n If True shows a progress bar when running.\n :param meta_fields_to_embed:\n List of meta fields that will be embedded along with the Document text.\n :param embedding_separator:\n Separator used to concatenate the meta fields to the Document text.\n :param timeout: The timeout in seconds to be passed to the underlying `AzureOpenAI` client, if not set it is\n inferred from the `OPENAI_TIMEOUT` environment variable or set to 30.\n :param max_retries: Maximum retries to establish a connection with AzureOpenAI if it returns an internal error,\n if not set it is inferred from the `OPENAI_MAX_RETRIES` environment variable or set to 5.\n \"\"\"\n # if not provided as a parameter, azure_endpoint is read from the env var AZURE_OPENAI_ENDPOINT\n azure_endpoint = azure_endpoint or os.environ.get(\"AZURE_OPENAI_ENDPOINT\")\n if not azure_endpoint:\n raise ValueError(\"Please provide an Azure endpoint or set the environment variable AZURE_OPENAI_ENDPOINT.\")\n\n if api_key is None and azure_ad_token is None:\n raise ValueError(\"Please provide an API key or an Azure Active Directory token.\")\n\n self.api_key = api_key\n self.azure_ad_token = azure_ad_token\n self.api_version = api_version\n self.azure_endpoint = azure_endpoint\n self.azure_deployment = azure_deployment\n self.dimensions = dimensions\n self.organization = organization\n self.prefix = prefix\n self.suffix = suffix\n self.batch_size = batch_size\n self.progress_bar = progress_bar\n self.meta_fields_to_embed = meta_fields_to_embed or []\n self.embedding_separator = embedding_separator\n self.timeout = timeout or float(os.environ.get(\"OPENAI_TIMEOUT\", 30.0))\n self.max_retries = max_retries or int(os.environ.get(\"OPENAI_MAX_RETRIES\", 5))\n\n self._client = AzureOpenAI(\n api_version=api_version,\n azure_endpoint=azure_endpoint,\n azure_deployment=azure_deployment,\n api_key=api_key.resolve_value() if api_key is not None else None,\n azure_ad_token=azure_ad_token.resolve_value() if azure_ad_token is not None else None,\n organization=organization,\n timeout=self.timeout,\n max_retries=self.max_retries,\n )\n\n def _get_telemetry_data(self) -> Dict[str, Any]:\n \"\"\"\n Data that is sent to Posthog for usage analytics.\n \"\"\"\n return {\"model\": self.azure_deployment}\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n dimensions=self.dimensions,\n organization=self.organization,\n api_version=self.api_version,\n prefix=self.prefix,\n suffix=self.suffix,\n batch_size=self.batch_size,\n progress_bar=self.progress_bar,\n meta_fields_to_embed=self.meta_fields_to_embed,\n embedding_separator=self.embedding_separator,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n timeout=self.timeout,\n max_retries=self.max_retries,\n )\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"AzureOpenAIDocumentEmbedder\":\n \"\"\"\n Deserializes the component from a dictionary.\n\n :param data:\n Dictionary to deserialize from.\n :returns:\n Deserialized component.\n \"\"\"\n deserialize_secrets_inplace(data[\"init_parameters\"], keys=[\"api_key\", \"azure_ad_token\"])\n return default_from_dict(cls, data)\n\n def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:\n \"\"\"\n Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.\n \"\"\"\n texts_to_embed = []\n for doc in documents:\n meta_values_to_embed = [\n str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None\n ]\n\n text_to_embed = (\n self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or \"\"]) + self.suffix\n ).replace(\"\\n\", \" \")\n\n texts_to_embed.append(text_to_embed)\n return texts_to_embed\n\n def _embed_batch(self, texts_to_embed: List[str], batch_size: int) -> Tuple[List[List[float]], Dict[str, Any]]:\n \"\"\"\n Embed a list of texts in batches.\n \"\"\"\n\n all_embeddings: List[List[float]] = []\n meta: Dict[str, Any] = {\"model\": \"\", \"usage\": {\"prompt_tokens\": 0, \"total_tokens\": 0}}\n for i in tqdm(range(0, len(texts_to_embed), batch_size), desc=\"Embedding Texts\"):\n batch = texts_to_embed[i : i + batch_size]\n if self.dimensions is not None:\n response = self._client.embeddings.create(\n model=self.azure_deployment, dimensions=self.dimensions, input=batch\n )\n else:\n response = self._client.embeddings.create(model=self.azure_deployment, input=batch)\n\n # Append embeddings to the list\n all_embeddings.extend(el.embedding for el in response.data)\n\n # Update the meta information only once if it's empty\n if not meta[\"model\"]:\n meta[\"model\"] = response.model\n meta[\"usage\"] = dict(response.usage)\n else:\n # Update the usage tokens\n meta[\"usage\"][\"prompt_tokens\"] += response.usage.prompt_tokens\n meta[\"usage\"][\"total_tokens\"] += response.usage.total_tokens\n\n return all_embeddings, meta\n\n @component.output_types(documents=List[Document], meta=Dict[str, Any])\n def run(self, documents: List[Document]) -> Dict[str, Any]:\n \"\"\"\n Embed a list of Documents.\n\n :param documents:\n Documents to embed.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: Documents with embeddings\n - `meta`: Information about the usage of the model.\n \"\"\"\n if not (isinstance(documents, list) and all(isinstance(doc, Document) for doc in documents)):\n raise TypeError(\"Input must be a list of Document instances. For strings, use AzureOpenAITextEmbedder.\")\n\n texts_to_embed = self._prepare_texts_to_embed(documents=documents)\n embeddings, meta = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)\n\n # Assign the corresponding embeddings to each document\n for doc, emb in zip(documents, embeddings):\n doc.embedding = emb\n\n return {\"documents\": documents, \"meta\": meta}\n", "path": "haystack/components/embedders/azure_document_embedder.py"}]} | 3,003 | 690 |
gh_patches_debug_16984 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1872 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider sallybeauty is broken
During the global build at 2021-05-26-14-42-23, spider **sallybeauty** failed with **2712 features** and **5 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/sallybeauty.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/sallybeauty.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 from locations.items import GeojsonPointItem
4 from urllib.parse import urlencode
5 import json
6 import csv
7 from locations.hours import OpeningHours
8 from scrapy.selector import Selector
9
10
11 class SallySpider(scrapy.Spider):
12 name = "sallybeauty"
13 item_attributes = { 'brand': "Sally Beauty" }
14 allowed_domains = ["sallybeauty.com"]
15
16 def start_requests(self):
17 base_url = "https://www.sallybeauty.com/on/demandware.store/Sites-SA-Site/default/Stores-FindStores?"
18
19 point_files = [
20 './locations/searchable_points/us_centroids_100mile_radius.csv',
21 './locations/searchable_points/ca_centroids_100mile_radius.csv'
22 ]
23
24 params = {
25 "showmap": "true",
26 "radius": "100",
27 }
28
29 for point_file in point_files:
30 with open(point_file) as points:
31 next(points)
32 for point in points:
33 _, lat, lon = point.strip().split(',')
34 params.update({"lat": lat, "long": lon})
35 yield scrapy.Request(url=base_url + urlencode(params))
36
37 def parse_hours(self, hours):
38 hrs = Selector(text=hours)
39 days = hrs.xpath('//div[@class="store-hours-day"]/text()').extract()
40 hours = hrs.xpath('//div[@class="store-hours-day"]/span/text()').extract()
41
42 opening_hours = OpeningHours()
43
44 for d, h in zip(days, hours):
45 try:
46 day = d.strip(': ')
47 open_time, close_time = h.split(' - ')
48 open_time = open_time.lstrip('0')
49 opening_hours.add_range(day=day[:2],
50 open_time=open_time,
51 close_time=close_time,
52 time_format="%I:%M %p")
53 except:
54 continue
55
56 return opening_hours.as_opening_hours()
57
58 def parse(self, response):
59 jdata = json.loads(response.body_as_unicode())
60
61 for row in jdata.get('stores',[]):
62
63 properties = {
64 'ref': row["ID"],
65 'name': row["name"],
66 'addr_full': " ".join([row["address1"], row.get("address2", "") or ""]).strip(),
67 'city': row["city"],
68 'postcode': row["postalCode"],
69 'lat': row["latitude"],
70 'lon': row["longitude"],
71 'phone': row["phone"],
72 'state': row["stateCode"],
73 }
74
75 hours = self.parse_hours(row["storeHours"])
76 if hours:
77 properties['opening_hours'] = hours
78
79 yield GeojsonPointItem(**properties)
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/sallybeauty.py b/locations/spiders/sallybeauty.py
--- a/locations/spiders/sallybeauty.py
+++ b/locations/spiders/sallybeauty.py
@@ -58,7 +58,7 @@
def parse(self, response):
jdata = json.loads(response.body_as_unicode())
- for row in jdata.get('stores',[]):
+ for row in jdata.get('stores', []):
properties = {
'ref': row["ID"],
@@ -72,8 +72,11 @@
'state': row["stateCode"],
}
- hours = self.parse_hours(row["storeHours"])
- if hours:
- properties['opening_hours'] = hours
+ store_hours = row.get("storeHours")
+ if store_hours:
+ hours = self.parse_hours(store_hours)
+
+ if hours:
+ properties['opening_hours'] = hours
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/sallybeauty.py b/locations/spiders/sallybeauty.py\n--- a/locations/spiders/sallybeauty.py\n+++ b/locations/spiders/sallybeauty.py\n@@ -58,7 +58,7 @@\n def parse(self, response):\n jdata = json.loads(response.body_as_unicode())\n \n- for row in jdata.get('stores',[]):\n+ for row in jdata.get('stores', []):\n \n properties = {\n 'ref': row[\"ID\"],\n@@ -72,8 +72,11 @@\n 'state': row[\"stateCode\"],\n }\n \n- hours = self.parse_hours(row[\"storeHours\"])\n- if hours:\n- properties['opening_hours'] = hours\n+ store_hours = row.get(\"storeHours\")\n+ if store_hours:\n+ hours = self.parse_hours(store_hours)\n+\n+ if hours:\n+ properties['opening_hours'] = hours\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider sallybeauty is broken\nDuring the global build at 2021-05-26-14-42-23, spider **sallybeauty** failed with **2712 features** and **5 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/sallybeauty.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom urllib.parse import urlencode\nimport json\nimport csv\nfrom locations.hours import OpeningHours\nfrom scrapy.selector import Selector\n\n\nclass SallySpider(scrapy.Spider):\n name = \"sallybeauty\"\n item_attributes = { 'brand': \"Sally Beauty\" }\n allowed_domains = [\"sallybeauty.com\"]\n\n def start_requests(self):\n base_url = \"https://www.sallybeauty.com/on/demandware.store/Sites-SA-Site/default/Stores-FindStores?\"\n\n point_files = [\n './locations/searchable_points/us_centroids_100mile_radius.csv',\n './locations/searchable_points/ca_centroids_100mile_radius.csv'\n ]\n\n params = {\n \"showmap\": \"true\",\n \"radius\": \"100\",\n }\n\n for point_file in point_files:\n with open(point_file) as points:\n next(points)\n for point in points:\n _, lat, lon = point.strip().split(',')\n params.update({\"lat\": lat, \"long\": lon})\n yield scrapy.Request(url=base_url + urlencode(params))\n\n def parse_hours(self, hours):\n hrs = Selector(text=hours)\n days = hrs.xpath('//div[@class=\"store-hours-day\"]/text()').extract()\n hours = hrs.xpath('//div[@class=\"store-hours-day\"]/span/text()').extract()\n\n opening_hours = OpeningHours()\n\n for d, h in zip(days, hours):\n try:\n day = d.strip(': ')\n open_time, close_time = h.split(' - ')\n open_time = open_time.lstrip('0')\n opening_hours.add_range(day=day[:2],\n open_time=open_time,\n close_time=close_time,\n time_format=\"%I:%M %p\")\n except:\n continue\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n jdata = json.loads(response.body_as_unicode())\n\n for row in jdata.get('stores',[]):\n\n properties = {\n 'ref': row[\"ID\"],\n 'name': row[\"name\"],\n 'addr_full': \" \".join([row[\"address1\"], row.get(\"address2\", \"\") or \"\"]).strip(),\n 'city': row[\"city\"],\n 'postcode': row[\"postalCode\"],\n 'lat': row[\"latitude\"],\n 'lon': row[\"longitude\"],\n 'phone': row[\"phone\"],\n 'state': row[\"stateCode\"],\n }\n\n hours = self.parse_hours(row[\"storeHours\"])\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/sallybeauty.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom urllib.parse import urlencode\nimport json\nimport csv\nfrom locations.hours import OpeningHours\nfrom scrapy.selector import Selector\n\n\nclass SallySpider(scrapy.Spider):\n name = \"sallybeauty\"\n item_attributes = { 'brand': \"Sally Beauty\" }\n allowed_domains = [\"sallybeauty.com\"]\n\n def start_requests(self):\n base_url = \"https://www.sallybeauty.com/on/demandware.store/Sites-SA-Site/default/Stores-FindStores?\"\n\n point_files = [\n './locations/searchable_points/us_centroids_100mile_radius.csv',\n './locations/searchable_points/ca_centroids_100mile_radius.csv'\n ]\n\n params = {\n \"showmap\": \"true\",\n \"radius\": \"100\",\n }\n\n for point_file in point_files:\n with open(point_file) as points:\n next(points)\n for point in points:\n _, lat, lon = point.strip().split(',')\n params.update({\"lat\": lat, \"long\": lon})\n yield scrapy.Request(url=base_url + urlencode(params))\n\n def parse_hours(self, hours):\n hrs = Selector(text=hours)\n days = hrs.xpath('//div[@class=\"store-hours-day\"]/text()').extract()\n hours = hrs.xpath('//div[@class=\"store-hours-day\"]/span/text()').extract()\n\n opening_hours = OpeningHours()\n\n for d, h in zip(days, hours):\n try:\n day = d.strip(': ')\n open_time, close_time = h.split(' - ')\n open_time = open_time.lstrip('0')\n opening_hours.add_range(day=day[:2],\n open_time=open_time,\n close_time=close_time,\n time_format=\"%I:%M %p\")\n except:\n continue\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n jdata = json.loads(response.body_as_unicode())\n\n for row in jdata.get('stores', []):\n\n properties = {\n 'ref': row[\"ID\"],\n 'name': row[\"name\"],\n 'addr_full': \" \".join([row[\"address1\"], row.get(\"address2\", \"\") or \"\"]).strip(),\n 'city': row[\"city\"],\n 'postcode': row[\"postalCode\"],\n 'lat': row[\"latitude\"],\n 'lon': row[\"longitude\"],\n 'phone': row[\"phone\"],\n 'state': row[\"stateCode\"],\n }\n\n store_hours = row.get(\"storeHours\")\n if store_hours:\n hours = self.parse_hours(store_hours)\n\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/sallybeauty.py"}]} | 1,187 | 222 |
gh_patches_debug_55584 | rasdani/github-patches | git_diff | wagtail__wagtail-1873 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Migrating to 1.1 Migration File Errors
I am attempting to migrate to 1.1 and I am getting an error involving the migration files.
```
Migration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')
```
The last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.
I tried manually applying 0016, but the error is preventing that from happening.
I know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.
Migrating to 1.1 Migration File Errors
I am attempting to migrate to 1.1 and I am getting an error involving the migration files.
```
Migration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')
```
The last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.
I tried manually applying 0016, but the error is preventing that from happening.
I know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.db import models, migrations
5
6
7 class Migration(migrations.Migration):
8
9 dependencies = [
10 ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
11 ]
12
13 operations = [
14 migrations.AlterField(
15 model_name='grouppagepermission',
16 name='permission_type',
17 field=models.CharField(choices=[('add', 'Add/edit pages you own'), ('edit', 'Edit any page'), ('publish', 'Publish any page'), ('lock', 'Lock/unlock any page')], max_length=20, verbose_name='Permission type'),
18 preserve_default=True,
19 ),
20 ]
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py
--- a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py
+++ b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py
@@ -7,7 +7,7 @@
class Migration(migrations.Migration):
dependencies = [
- ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
+ ('wagtailcore', '0016_change_page_url_path_to_text_field'),
]
operations = [
| {"golden_diff": "diff --git a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n--- a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n+++ b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n@@ -7,7 +7,7 @@\n class Migration(migrations.Migration):\n \n dependencies = [\n- ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),\n+ ('wagtailcore', '0016_change_page_url_path_to_text_field'),\n ]\n \n operations = [\n", "issue": "Migrating to 1.1 Migration File Errors\nI am attempting to migrate to 1.1 and I am getting an error involving the migration files.\n\n```\nMigration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')\n```\n\nThe last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.\n\nI tried manually applying 0016, but the error is preventing that from happening.\n\nI know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.\n\nMigrating to 1.1 Migration File Errors\nI am attempting to migrate to 1.1 and I am getting an error involving the migration files.\n\n```\nMigration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')\n```\n\nThe last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.\n\nI tried manually applying 0016, but the error is preventing that from happening.\n\nI know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='grouppagepermission',\n name='permission_type',\n field=models.CharField(choices=[('add', 'Add/edit pages you own'), ('edit', 'Edit any page'), ('publish', 'Publish any page'), ('lock', 'Lock/unlock any page')], max_length=20, verbose_name='Permission type'),\n preserve_default=True,\n ),\n ]\n", "path": "wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('wagtailcore', '0016_change_page_url_path_to_text_field'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='grouppagepermission',\n name='permission_type',\n field=models.CharField(choices=[('add', 'Add/edit pages you own'), ('edit', 'Edit any page'), ('publish', 'Publish any page'), ('lock', 'Lock/unlock any page')], max_length=20, verbose_name='Permission type'),\n preserve_default=True,\n ),\n ]\n", "path": "wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py"}]} | 902 | 171 |
gh_patches_debug_36962 | rasdani/github-patches | git_diff | medtagger__MedTagger-188 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Disable (temporarily) conversion in X & Y axes
## Expected Behavior
Frontend won't be enabled soon, so backend should disable such conversion (for now).
## Actual Behavior
Backend always create views from X & Y axes but are not used anywhere.
## Additional comment
Let's make it optional and controllable somehow by users (ideally from the UI & API side?).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/medtagger/workers/conversion.py`
Content:
```
1 """Module responsible for asynchronous data conversion."""
2 import io
3 import os
4 import tempfile
5 from subprocess import call
6 from typing import List, Optional
7
8 import numpy as np
9 import pydicom
10 from pydicom.dataset import FileDataset
11 from PIL import Image
12 from celery.utils.log import get_task_logger
13
14 from medtagger.types import ScanID
15 from medtagger.workers import celery_app
16 from medtagger.conversion import convert_slice_to_normalized_8bit_array, convert_scan_to_normalized_8bit_array
17 from medtagger.database.models import SliceOrientation, Slice, Scan
18 from medtagger.repositories.scans import ScansRepository
19 from medtagger.repositories.slices import SlicesRepository
20
21 logger = get_task_logger(__name__)
22
23 MAX_PREVIEW_X_SIZE = 256
24
25
26 @celery_app.task
27 def convert_scan_to_png(scan_id: ScanID) -> None:
28 """Store Scan in HBase database.
29
30 :param scan_id: ID of a Scan
31 """
32 logger.info('Starting Scan (%s) conversion.', scan_id)
33 temp_files_to_remove: List[str] = []
34 scan = ScansRepository.get_scan_by_id(scan_id)
35 slices = SlicesRepository.get_slices_by_scan_id(scan_id)
36 if scan.declared_number_of_slices == 0:
37 logger.error('This Scan is empty! Removing from database...')
38 ScansRepository.delete_scan_by_id(scan_id)
39 return
40
41 # At first, collect all Dicom images for given Scan
42 logger.info('Reading all Slices for this Scan... This may take a while...')
43 dicom_images = []
44 for _slice in slices:
45 image = SlicesRepository.get_slice_original_image(_slice.id)
46 dicom_image, files_to_remove = _get_dicom_image(image)
47 dicom_images.append(dicom_image)
48 temp_files_to_remove.extend(files_to_remove)
49
50 # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation
51 logger.info('Converting each Slice in Z axis.')
52 for dicom_image, _slice in zip(dicom_images, slices):
53 slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)
54 _convert_to_png_and_store(_slice, slice_pixels)
55
56 # Prepare a preview size and convert 3D scan to fit its max X's axis shape
57 logger.info('Normalizing Scan in 3D. This may take a while...')
58 normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)
59
60 # Prepare Slices in other orientations
61 logger.info('Preparing Slices in other axis.')
62 _prepare_slices_in_y_orientation(normalized_scan, scan)
63 _prepare_slices_in_x_orientation(normalized_scan, scan)
64
65 logger.info('Marking whole Scan as converted.')
66 scan.mark_as_converted()
67
68 # Remove all temporarily created files for applying workaround
69 for file_name in temp_files_to_remove:
70 os.remove(file_name)
71
72
73 def _get_dicom_image(image: bytes) -> FileDataset:
74 """Return PyDICOM image based on image from HBase.
75
76 This workaround enables support for compressed DICOMs as GDCM wrapper does not support Python3 well.
77
78 :param image: bytes with DICOM image (eg. from HBase)
79 :return: PyDICOM Image
80 """
81 # UGLY WORKAROUND FOR COMPRESSED DICOMs - Start
82 temp_file_name = _create_temporary_file(image)
83 try:
84 dicom_image = pydicom.read_file(temp_file_name, force=True)
85 dicom_image.pixel_array # pylint: disable=pointless-statement; Try to read pixel array from DICOM...
86 return dicom_image, [temp_file_name]
87 except Exception: # pylint: disable=broad-except; Intended - too much cases to cover...
88 # In case of any Exception - try to uncompress data from DICOM first
89 temp_file_uncompressed = _create_temporary_file()
90 call(["gdcmconv", "--raw", "-i", temp_file_name, "-o", temp_file_uncompressed]) # Convert to RAW DICOMs
91 dicom_image = pydicom.read_file(temp_file_uncompressed, force=True)
92 return dicom_image, [temp_file_name, temp_file_uncompressed]
93 # UGLY WORKAROUND - Stop
94
95
96 def _create_temporary_file(image: Optional[bytes] = None) -> str:
97 """Create new temporary file based on given DICOM image.
98
99 This workaround enable support for compressed DICOMs that will be read by the GDCM
100 low-level library. Please remove this workaround as soon as this FIX ME notice
101 will be removed:
102 https://github.com/pydicom/pydicom/blob/master/pydicom/pixel_data_handlers/gdcm_handler.py#L77
103 and this Issue will be closed:
104 https://github.com/pydicom/pydicom/issues/233
105
106 :param image: (optional) bytes with DICOM image
107 :return: path to temporary file
108 """
109 with tempfile.NamedTemporaryFile(delete=False) as temp_file:
110 temp_file_name = temp_file.name
111 if image:
112 temp_file.write(image)
113 return temp_file_name
114
115
116 def _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:
117 """Prepare and save Slices in Y orientation.
118
119 :param normalized_scan: Numpy array with 3D normalized Scan
120 :param scan: Scan object to which new Slices should be added
121 """
122 for y in range(normalized_scan.shape[1]):
123 location = 100.0 * y / normalized_scan.shape[1]
124 slice_pixels = normalized_scan[:, y, :]
125 _slice = scan.add_slice(SliceOrientation.Y)
126 _slice.update_location(location)
127 _convert_to_png_and_store(_slice, slice_pixels)
128
129
130 def _prepare_slices_in_x_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:
131 """Prepare and save Slices in Y orientation.
132
133 :param normalized_scan: Numpy array with 3D normalized Scan
134 :param scan: Scan object to which new Slices should be added
135 """
136 for x in range(normalized_scan.shape[2]):
137 location = 100.0 * x / normalized_scan.shape[2]
138 slice_pixels = normalized_scan[:, :, x]
139 _slice = scan.add_slice(SliceOrientation.X)
140 _slice.update_location(location)
141 _convert_to_png_and_store(_slice, slice_pixels)
142
143
144 def _convert_to_png_and_store(_slice: Slice, slice_pixels: np.ndarray) -> None:
145 """Convert given Slice's pixel array and store in databases.
146
147 :param _slice: Slice database object
148 :param slice_pixels: numpy array with Slice data
149 """
150 converted_image = _convert_slice_pixels_to_png(slice_pixels)
151 SlicesRepository.store_converted_image(_slice.id, converted_image)
152 _slice.mark_as_converted()
153 logger.info('%s converted and stored.', _slice)
154
155
156 def _convert_slice_pixels_to_png(slice_pixels: np.ndarray) -> bytes:
157 """Convert given Slice's pixel array to the PNG format in bytes.
158
159 :param slice_pixels: Slice's pixel array
160 :return: bytes with Slice formatted in PNG
161 """
162 png_image = io.BytesIO()
163 Image.fromarray(slice_pixels, 'L').save(png_image, 'PNG')
164 png_image.seek(0)
165 return png_image.getvalue()
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/medtagger/workers/conversion.py b/backend/medtagger/workers/conversion.py
--- a/backend/medtagger/workers/conversion.py
+++ b/backend/medtagger/workers/conversion.py
@@ -20,6 +20,7 @@
logger = get_task_logger(__name__)
+CONVERT_IN_OTHER_AXES = False # Disabled until Frontend will enable support for such Slices
MAX_PREVIEW_X_SIZE = 256
@@ -47,20 +48,8 @@
dicom_images.append(dicom_image)
temp_files_to_remove.extend(files_to_remove)
- # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation
- logger.info('Converting each Slice in Z axis.')
- for dicom_image, _slice in zip(dicom_images, slices):
- slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)
- _convert_to_png_and_store(_slice, slice_pixels)
-
- # Prepare a preview size and convert 3D scan to fit its max X's axis shape
- logger.info('Normalizing Scan in 3D. This may take a while...')
- normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)
-
- # Prepare Slices in other orientations
- logger.info('Preparing Slices in other axis.')
- _prepare_slices_in_y_orientation(normalized_scan, scan)
- _prepare_slices_in_x_orientation(normalized_scan, scan)
+ # Correlate Dicom files with Slices and convert all Slices
+ _convert_scan_in_all_axes(dicom_images, slices, scan)
logger.info('Marking whole Scan as converted.')
scan.mark_as_converted()
@@ -113,6 +102,32 @@
return temp_file_name
+def _convert_scan_in_all_axes(dicom_images: List[FileDataset], slices: List[Slice], scan: Scan) -> None:
+ """Convert Scan in X, Y and Z axes.
+
+ NOTE: X & Y axes are now disabled (until Frontend will support it).
+
+ :param dicom_images: list of all Dicom images
+ :param slices: list of all Slices in given Scan
+ :param scan: Scan object to which new Slices should be added
+ """
+ logger.info('Converting each Slice in Z axis.')
+ for dicom_image, _slice in zip(dicom_images, slices):
+ slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)
+ _convert_to_png_and_store(_slice, slice_pixels)
+
+ # Convert only if it's enabled
+ if CONVERT_IN_OTHER_AXES:
+ # Prepare a preview size and convert 3D scan to fit its max X's axis shape
+ logger.info('Normalizing Scan in 3D. This may take a while...')
+ normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)
+
+ # Prepare Slices in other orientations
+ logger.info('Preparing Slices in other axis.')
+ _prepare_slices_in_y_orientation(normalized_scan, scan)
+ _prepare_slices_in_x_orientation(normalized_scan, scan)
+
+
def _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:
"""Prepare and save Slices in Y orientation.
| {"golden_diff": "diff --git a/backend/medtagger/workers/conversion.py b/backend/medtagger/workers/conversion.py\n--- a/backend/medtagger/workers/conversion.py\n+++ b/backend/medtagger/workers/conversion.py\n@@ -20,6 +20,7 @@\n \n logger = get_task_logger(__name__)\n \n+CONVERT_IN_OTHER_AXES = False # Disabled until Frontend will enable support for such Slices\n MAX_PREVIEW_X_SIZE = 256\n \n \n@@ -47,20 +48,8 @@\n dicom_images.append(dicom_image)\n temp_files_to_remove.extend(files_to_remove)\n \n- # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation\n- logger.info('Converting each Slice in Z axis.')\n- for dicom_image, _slice in zip(dicom_images, slices):\n- slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\n- _convert_to_png_and_store(_slice, slice_pixels)\n-\n- # Prepare a preview size and convert 3D scan to fit its max X's axis shape\n- logger.info('Normalizing Scan in 3D. This may take a while...')\n- normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\n-\n- # Prepare Slices in other orientations\n- logger.info('Preparing Slices in other axis.')\n- _prepare_slices_in_y_orientation(normalized_scan, scan)\n- _prepare_slices_in_x_orientation(normalized_scan, scan)\n+ # Correlate Dicom files with Slices and convert all Slices\n+ _convert_scan_in_all_axes(dicom_images, slices, scan)\n \n logger.info('Marking whole Scan as converted.')\n scan.mark_as_converted()\n@@ -113,6 +102,32 @@\n return temp_file_name\n \n \n+def _convert_scan_in_all_axes(dicom_images: List[FileDataset], slices: List[Slice], scan: Scan) -> None:\n+ \"\"\"Convert Scan in X, Y and Z axes.\n+\n+ NOTE: X & Y axes are now disabled (until Frontend will support it).\n+\n+ :param dicom_images: list of all Dicom images\n+ :param slices: list of all Slices in given Scan\n+ :param scan: Scan object to which new Slices should be added\n+ \"\"\"\n+ logger.info('Converting each Slice in Z axis.')\n+ for dicom_image, _slice in zip(dicom_images, slices):\n+ slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\n+ _convert_to_png_and_store(_slice, slice_pixels)\n+\n+ # Convert only if it's enabled\n+ if CONVERT_IN_OTHER_AXES:\n+ # Prepare a preview size and convert 3D scan to fit its max X's axis shape\n+ logger.info('Normalizing Scan in 3D. This may take a while...')\n+ normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\n+\n+ # Prepare Slices in other orientations\n+ logger.info('Preparing Slices in other axis.')\n+ _prepare_slices_in_y_orientation(normalized_scan, scan)\n+ _prepare_slices_in_x_orientation(normalized_scan, scan)\n+\n+\n def _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\n \"\"\"Prepare and save Slices in Y orientation.\n", "issue": "Disable (temporarily) conversion in X & Y axes\n## Expected Behavior\r\n\r\nFrontend won't be enabled soon, so backend should disable such conversion (for now).\r\n\r\n## Actual Behavior\r\n\r\nBackend always create views from X & Y axes but are not used anywhere.\r\n\r\n## Additional comment\r\n\r\nLet's make it optional and controllable somehow by users (ideally from the UI & API side?).\r\n\n", "before_files": [{"content": "\"\"\"Module responsible for asynchronous data conversion.\"\"\"\nimport io\nimport os\nimport tempfile\nfrom subprocess import call\nfrom typing import List, Optional\n\nimport numpy as np\nimport pydicom\nfrom pydicom.dataset import FileDataset\nfrom PIL import Image\nfrom celery.utils.log import get_task_logger\n\nfrom medtagger.types import ScanID\nfrom medtagger.workers import celery_app\nfrom medtagger.conversion import convert_slice_to_normalized_8bit_array, convert_scan_to_normalized_8bit_array\nfrom medtagger.database.models import SliceOrientation, Slice, Scan\nfrom medtagger.repositories.scans import ScansRepository\nfrom medtagger.repositories.slices import SlicesRepository\n\nlogger = get_task_logger(__name__)\n\nMAX_PREVIEW_X_SIZE = 256\n\n\n@celery_app.task\ndef convert_scan_to_png(scan_id: ScanID) -> None:\n \"\"\"Store Scan in HBase database.\n\n :param scan_id: ID of a Scan\n \"\"\"\n logger.info('Starting Scan (%s) conversion.', scan_id)\n temp_files_to_remove: List[str] = []\n scan = ScansRepository.get_scan_by_id(scan_id)\n slices = SlicesRepository.get_slices_by_scan_id(scan_id)\n if scan.declared_number_of_slices == 0:\n logger.error('This Scan is empty! Removing from database...')\n ScansRepository.delete_scan_by_id(scan_id)\n return\n\n # At first, collect all Dicom images for given Scan\n logger.info('Reading all Slices for this Scan... This may take a while...')\n dicom_images = []\n for _slice in slices:\n image = SlicesRepository.get_slice_original_image(_slice.id)\n dicom_image, files_to_remove = _get_dicom_image(image)\n dicom_images.append(dicom_image)\n temp_files_to_remove.extend(files_to_remove)\n\n # Correlate Dicom files with Slices and convert all Slices in the Z axis orientation\n logger.info('Converting each Slice in Z axis.')\n for dicom_image, _slice in zip(dicom_images, slices):\n slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\n _convert_to_png_and_store(_slice, slice_pixels)\n\n # Prepare a preview size and convert 3D scan to fit its max X's axis shape\n logger.info('Normalizing Scan in 3D. This may take a while...')\n normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\n\n # Prepare Slices in other orientations\n logger.info('Preparing Slices in other axis.')\n _prepare_slices_in_y_orientation(normalized_scan, scan)\n _prepare_slices_in_x_orientation(normalized_scan, scan)\n\n logger.info('Marking whole Scan as converted.')\n scan.mark_as_converted()\n\n # Remove all temporarily created files for applying workaround\n for file_name in temp_files_to_remove:\n os.remove(file_name)\n\n\ndef _get_dicom_image(image: bytes) -> FileDataset:\n \"\"\"Return PyDICOM image based on image from HBase.\n\n This workaround enables support for compressed DICOMs as GDCM wrapper does not support Python3 well.\n\n :param image: bytes with DICOM image (eg. from HBase)\n :return: PyDICOM Image\n \"\"\"\n # UGLY WORKAROUND FOR COMPRESSED DICOMs - Start\n temp_file_name = _create_temporary_file(image)\n try:\n dicom_image = pydicom.read_file(temp_file_name, force=True)\n dicom_image.pixel_array # pylint: disable=pointless-statement; Try to read pixel array from DICOM...\n return dicom_image, [temp_file_name]\n except Exception: # pylint: disable=broad-except; Intended - too much cases to cover...\n # In case of any Exception - try to uncompress data from DICOM first\n temp_file_uncompressed = _create_temporary_file()\n call([\"gdcmconv\", \"--raw\", \"-i\", temp_file_name, \"-o\", temp_file_uncompressed]) # Convert to RAW DICOMs\n dicom_image = pydicom.read_file(temp_file_uncompressed, force=True)\n return dicom_image, [temp_file_name, temp_file_uncompressed]\n # UGLY WORKAROUND - Stop\n\n\ndef _create_temporary_file(image: Optional[bytes] = None) -> str:\n \"\"\"Create new temporary file based on given DICOM image.\n\n This workaround enable support for compressed DICOMs that will be read by the GDCM\n low-level library. Please remove this workaround as soon as this FIX ME notice\n will be removed:\n https://github.com/pydicom/pydicom/blob/master/pydicom/pixel_data_handlers/gdcm_handler.py#L77\n and this Issue will be closed:\n https://github.com/pydicom/pydicom/issues/233\n\n :param image: (optional) bytes with DICOM image\n :return: path to temporary file\n \"\"\"\n with tempfile.NamedTemporaryFile(delete=False) as temp_file:\n temp_file_name = temp_file.name\n if image:\n temp_file.write(image)\n return temp_file_name\n\n\ndef _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\n \"\"\"Prepare and save Slices in Y orientation.\n\n :param normalized_scan: Numpy array with 3D normalized Scan\n :param scan: Scan object to which new Slices should be added\n \"\"\"\n for y in range(normalized_scan.shape[1]):\n location = 100.0 * y / normalized_scan.shape[1]\n slice_pixels = normalized_scan[:, y, :]\n _slice = scan.add_slice(SliceOrientation.Y)\n _slice.update_location(location)\n _convert_to_png_and_store(_slice, slice_pixels)\n\n\ndef _prepare_slices_in_x_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\n \"\"\"Prepare and save Slices in Y orientation.\n\n :param normalized_scan: Numpy array with 3D normalized Scan\n :param scan: Scan object to which new Slices should be added\n \"\"\"\n for x in range(normalized_scan.shape[2]):\n location = 100.0 * x / normalized_scan.shape[2]\n slice_pixels = normalized_scan[:, :, x]\n _slice = scan.add_slice(SliceOrientation.X)\n _slice.update_location(location)\n _convert_to_png_and_store(_slice, slice_pixels)\n\n\ndef _convert_to_png_and_store(_slice: Slice, slice_pixels: np.ndarray) -> None:\n \"\"\"Convert given Slice's pixel array and store in databases.\n\n :param _slice: Slice database object\n :param slice_pixels: numpy array with Slice data\n \"\"\"\n converted_image = _convert_slice_pixels_to_png(slice_pixels)\n SlicesRepository.store_converted_image(_slice.id, converted_image)\n _slice.mark_as_converted()\n logger.info('%s converted and stored.', _slice)\n\n\ndef _convert_slice_pixels_to_png(slice_pixels: np.ndarray) -> bytes:\n \"\"\"Convert given Slice's pixel array to the PNG format in bytes.\n\n :param slice_pixels: Slice's pixel array\n :return: bytes with Slice formatted in PNG\n \"\"\"\n png_image = io.BytesIO()\n Image.fromarray(slice_pixels, 'L').save(png_image, 'PNG')\n png_image.seek(0)\n return png_image.getvalue()\n", "path": "backend/medtagger/workers/conversion.py"}], "after_files": [{"content": "\"\"\"Module responsible for asynchronous data conversion.\"\"\"\nimport io\nimport os\nimport tempfile\nfrom subprocess import call\nfrom typing import List, Optional\n\nimport numpy as np\nimport pydicom\nfrom pydicom.dataset import FileDataset\nfrom PIL import Image\nfrom celery.utils.log import get_task_logger\n\nfrom medtagger.types import ScanID\nfrom medtagger.workers import celery_app\nfrom medtagger.conversion import convert_slice_to_normalized_8bit_array, convert_scan_to_normalized_8bit_array\nfrom medtagger.database.models import SliceOrientation, Slice, Scan\nfrom medtagger.repositories.scans import ScansRepository\nfrom medtagger.repositories.slices import SlicesRepository\n\nlogger = get_task_logger(__name__)\n\nCONVERT_IN_OTHER_AXES = False # Disabled until Frontend will enable support for such Slices\nMAX_PREVIEW_X_SIZE = 256\n\n\n@celery_app.task\ndef convert_scan_to_png(scan_id: ScanID) -> None:\n \"\"\"Store Scan in HBase database.\n\n :param scan_id: ID of a Scan\n \"\"\"\n logger.info('Starting Scan (%s) conversion.', scan_id)\n temp_files_to_remove: List[str] = []\n scan = ScansRepository.get_scan_by_id(scan_id)\n slices = SlicesRepository.get_slices_by_scan_id(scan_id)\n if scan.declared_number_of_slices == 0:\n logger.error('This Scan is empty! Removing from database...')\n ScansRepository.delete_scan_by_id(scan_id)\n return\n\n # At first, collect all Dicom images for given Scan\n logger.info('Reading all Slices for this Scan... This may take a while...')\n dicom_images = []\n for _slice in slices:\n image = SlicesRepository.get_slice_original_image(_slice.id)\n dicom_image, files_to_remove = _get_dicom_image(image)\n dicom_images.append(dicom_image)\n temp_files_to_remove.extend(files_to_remove)\n\n # Correlate Dicom files with Slices and convert all Slices\n _convert_scan_in_all_axes(dicom_images, slices, scan)\n\n logger.info('Marking whole Scan as converted.')\n scan.mark_as_converted()\n\n # Remove all temporarily created files for applying workaround\n for file_name in temp_files_to_remove:\n os.remove(file_name)\n\n\ndef _get_dicom_image(image: bytes) -> FileDataset:\n \"\"\"Return PyDICOM image based on image from HBase.\n\n This workaround enables support for compressed DICOMs as GDCM wrapper does not support Python3 well.\n\n :param image: bytes with DICOM image (eg. from HBase)\n :return: PyDICOM Image\n \"\"\"\n # UGLY WORKAROUND FOR COMPRESSED DICOMs - Start\n temp_file_name = _create_temporary_file(image)\n try:\n dicom_image = pydicom.read_file(temp_file_name, force=True)\n dicom_image.pixel_array # pylint: disable=pointless-statement; Try to read pixel array from DICOM...\n return dicom_image, [temp_file_name]\n except Exception: # pylint: disable=broad-except; Intended - too much cases to cover...\n # In case of any Exception - try to uncompress data from DICOM first\n temp_file_uncompressed = _create_temporary_file()\n call([\"gdcmconv\", \"--raw\", \"-i\", temp_file_name, \"-o\", temp_file_uncompressed]) # Convert to RAW DICOMs\n dicom_image = pydicom.read_file(temp_file_uncompressed, force=True)\n return dicom_image, [temp_file_name, temp_file_uncompressed]\n # UGLY WORKAROUND - Stop\n\n\ndef _create_temporary_file(image: Optional[bytes] = None) -> str:\n \"\"\"Create new temporary file based on given DICOM image.\n\n This workaround enable support for compressed DICOMs that will be read by the GDCM\n low-level library. Please remove this workaround as soon as this FIX ME notice\n will be removed:\n https://github.com/pydicom/pydicom/blob/master/pydicom/pixel_data_handlers/gdcm_handler.py#L77\n and this Issue will be closed:\n https://github.com/pydicom/pydicom/issues/233\n\n :param image: (optional) bytes with DICOM image\n :return: path to temporary file\n \"\"\"\n with tempfile.NamedTemporaryFile(delete=False) as temp_file:\n temp_file_name = temp_file.name\n if image:\n temp_file.write(image)\n return temp_file_name\n\n\ndef _convert_scan_in_all_axes(dicom_images: List[FileDataset], slices: List[Slice], scan: Scan) -> None:\n \"\"\"Convert Scan in X, Y and Z axes.\n\n NOTE: X & Y axes are now disabled (until Frontend will support it).\n\n :param dicom_images: list of all Dicom images\n :param slices: list of all Slices in given Scan\n :param scan: Scan object to which new Slices should be added\n \"\"\"\n logger.info('Converting each Slice in Z axis.')\n for dicom_image, _slice in zip(dicom_images, slices):\n slice_pixels = convert_slice_to_normalized_8bit_array(dicom_image)\n _convert_to_png_and_store(_slice, slice_pixels)\n\n # Convert only if it's enabled\n if CONVERT_IN_OTHER_AXES:\n # Prepare a preview size and convert 3D scan to fit its max X's axis shape\n logger.info('Normalizing Scan in 3D. This may take a while...')\n normalized_scan = convert_scan_to_normalized_8bit_array(dicom_images, output_x_size=MAX_PREVIEW_X_SIZE)\n\n # Prepare Slices in other orientations\n logger.info('Preparing Slices in other axis.')\n _prepare_slices_in_y_orientation(normalized_scan, scan)\n _prepare_slices_in_x_orientation(normalized_scan, scan)\n\n\ndef _prepare_slices_in_y_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\n \"\"\"Prepare and save Slices in Y orientation.\n\n :param normalized_scan: Numpy array with 3D normalized Scan\n :param scan: Scan object to which new Slices should be added\n \"\"\"\n for y in range(normalized_scan.shape[1]):\n location = 100.0 * y / normalized_scan.shape[1]\n slice_pixels = normalized_scan[:, y, :]\n _slice = scan.add_slice(SliceOrientation.Y)\n _slice.update_location(location)\n _convert_to_png_and_store(_slice, slice_pixels)\n\n\ndef _prepare_slices_in_x_orientation(normalized_scan: np.ndarray, scan: Scan) -> None:\n \"\"\"Prepare and save Slices in Y orientation.\n\n :param normalized_scan: Numpy array with 3D normalized Scan\n :param scan: Scan object to which new Slices should be added\n \"\"\"\n for x in range(normalized_scan.shape[2]):\n location = 100.0 * x / normalized_scan.shape[2]\n slice_pixels = normalized_scan[:, :, x]\n _slice = scan.add_slice(SliceOrientation.X)\n _slice.update_location(location)\n _convert_to_png_and_store(_slice, slice_pixels)\n\n\ndef _convert_to_png_and_store(_slice: Slice, slice_pixels: np.ndarray) -> None:\n \"\"\"Convert given Slice's pixel array and store in databases.\n\n :param _slice: Slice database object\n :param slice_pixels: numpy array with Slice data\n \"\"\"\n converted_image = _convert_slice_pixels_to_png(slice_pixels)\n SlicesRepository.store_converted_image(_slice.id, converted_image)\n _slice.mark_as_converted()\n logger.info('%s converted and stored.', _slice)\n\n\ndef _convert_slice_pixels_to_png(slice_pixels: np.ndarray) -> bytes:\n \"\"\"Convert given Slice's pixel array to the PNG format in bytes.\n\n :param slice_pixels: Slice's pixel array\n :return: bytes with Slice formatted in PNG\n \"\"\"\n png_image = io.BytesIO()\n Image.fromarray(slice_pixels, 'L').save(png_image, 'PNG')\n png_image.seek(0)\n return png_image.getvalue()\n", "path": "backend/medtagger/workers/conversion.py"}]} | 2,354 | 766 |
gh_patches_debug_39711 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-824 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
VideoClassificationData.from_files() does not work
## 🐛 Bug
`VideoClassificationData.from_files()` does not work. `VideoClassificationData` objects can only be constructed using the `from_folders()` classmethod - which unfortunately doesn't work for multilabel tasks :)
### To Reproduce
I wrote a Colab notebook to reproduce this in a self-contained environment: https://colab.research.google.com/drive/1X7UvZDndCc0dzcUZ_fGdmQz0ZMTTsj_U?usp=sharing
#### Code sample
See link to colab above
### Expected behavior
A `VideoClassificationData` object should be constructed from a list of paths and their labels
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flash/video/classification/data.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import pathlib
15 from typing import Any, Callable, Dict, List, Optional, Type, TYPE_CHECKING, Union
16
17 import numpy as np
18 import torch
19 from pytorch_lightning.utilities.exceptions import MisconfigurationException
20 from torch.utils.data import Sampler
21
22 from flash.core.data.data_module import DataModule
23 from flash.core.data.data_source import (
24 DefaultDataKeys,
25 DefaultDataSources,
26 FiftyOneDataSource,
27 LabelsState,
28 PathsDataSource,
29 )
30 from flash.core.data.process import Preprocess
31 from flash.core.integrations.labelstudio.data_source import LabelStudioVideoClassificationDataSource
32 from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, lazy_import
33
34 SampleCollection = None
35 if _FIFTYONE_AVAILABLE:
36 fol = lazy_import("fiftyone.core.labels")
37 if TYPE_CHECKING:
38 from fiftyone.core.collections import SampleCollection
39 else:
40 fol = None
41
42 if _KORNIA_AVAILABLE:
43 import kornia.augmentation as K
44
45 if _PYTORCHVIDEO_AVAILABLE:
46 from pytorchvideo.data.clip_sampling import ClipSampler, make_clip_sampler
47 from pytorchvideo.data.encoded_video import EncodedVideo
48 from pytorchvideo.data.labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset
49 from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths
50 from pytorchvideo.transforms import ApplyTransformToKey, UniformTemporalSubsample
51 from torchvision.transforms import CenterCrop, Compose, RandomCrop, RandomHorizontalFlip
52 else:
53 ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None
54
55 _PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]
56
57
58 class BaseVideoClassification:
59 def __init__(
60 self,
61 clip_sampler: "ClipSampler",
62 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
63 decode_audio: bool = True,
64 decoder: str = "pyav",
65 ):
66 self.clip_sampler = clip_sampler
67 self.video_sampler = video_sampler
68 self.decode_audio = decode_audio
69 self.decoder = decoder
70
71 def load_data(self, data: str, dataset: Optional[Any] = None) -> "LabeledVideoDataset":
72 ds = self._make_encoded_video_dataset(data)
73 if self.training:
74 label_to_class_mapping = {p[1]: p[0].split("/")[-2] for p in ds._labeled_videos._paths_and_labels}
75 self.set_state(LabelsState(label_to_class_mapping))
76 dataset.num_classes = len(np.unique([s[1]["label"] for s in ds._labeled_videos]))
77 return ds
78
79 def load_sample(self, sample):
80 return sample
81
82 def predict_load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
83 video_path = sample[DefaultDataKeys.INPUT]
84 sample.update(self._encoded_video_to_dict(EncodedVideo.from_path(video_path)))
85 sample[DefaultDataKeys.METADATA] = {"filepath": video_path}
86 return sample
87
88 def _encoded_video_to_dict(self, video, annotation: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
89 (
90 clip_start,
91 clip_end,
92 clip_index,
93 aug_index,
94 is_last_clip,
95 ) = self.clip_sampler(0.0, video.duration, annotation)
96
97 loaded_clip = video.get_clip(clip_start, clip_end)
98
99 clip_is_null = (
100 loaded_clip is None or loaded_clip["video"] is None or (loaded_clip["audio"] is None and self.decode_audio)
101 )
102
103 if clip_is_null:
104 raise MisconfigurationException(
105 f"The provided video is too short {video.duration} to be clipped at {self.clip_sampler._clip_duration}"
106 )
107
108 frames = loaded_clip["video"]
109 audio_samples = loaded_clip["audio"]
110 return {
111 "video": frames,
112 "video_name": video.name,
113 "video_index": 0,
114 "clip_index": clip_index,
115 "aug_index": aug_index,
116 **({"audio": audio_samples} if audio_samples is not None else {}),
117 }
118
119 def _make_encoded_video_dataset(self, data) -> "LabeledVideoDataset":
120 raise NotImplementedError("Subclass must implement _make_encoded_video_dataset()")
121
122
123 class VideoClassificationPathsDataSource(BaseVideoClassification, PathsDataSource):
124 def __init__(
125 self,
126 clip_sampler: "ClipSampler",
127 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
128 decode_audio: bool = True,
129 decoder: str = "pyav",
130 ):
131 super().__init__(
132 clip_sampler,
133 video_sampler=video_sampler,
134 decode_audio=decode_audio,
135 decoder=decoder,
136 )
137 PathsDataSource.__init__(
138 self,
139 extensions=("mp4", "avi"),
140 )
141
142 def _make_encoded_video_dataset(self, data) -> "LabeledVideoDataset":
143 ds: LabeledVideoDataset = labeled_video_dataset(
144 pathlib.Path(data),
145 self.clip_sampler,
146 video_sampler=self.video_sampler,
147 decode_audio=self.decode_audio,
148 decoder=self.decoder,
149 )
150 return ds
151
152
153 class VideoClassificationFiftyOneDataSource(
154 BaseVideoClassification,
155 FiftyOneDataSource,
156 ):
157 def __init__(
158 self,
159 clip_sampler: "ClipSampler",
160 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
161 decode_audio: bool = True,
162 decoder: str = "pyav",
163 label_field: str = "ground_truth",
164 ):
165 super().__init__(
166 clip_sampler=clip_sampler,
167 video_sampler=video_sampler,
168 decode_audio=decode_audio,
169 decoder=decoder,
170 )
171 FiftyOneDataSource.__init__(
172 self,
173 label_field=label_field,
174 )
175
176 @property
177 def label_cls(self):
178 return fol.Classification
179
180 def _make_encoded_video_dataset(self, data: SampleCollection) -> "LabeledVideoDataset":
181 classes = self._get_classes(data)
182 label_to_class_mapping = dict(enumerate(classes))
183 class_to_label_mapping = {c: lab for lab, c in label_to_class_mapping.items()}
184
185 filepaths = data.values("filepath")
186 labels = data.values(self.label_field + ".label")
187 targets = [class_to_label_mapping[lab] for lab in labels]
188 labeled_video_paths = LabeledVideoPaths(list(zip(filepaths, targets)))
189
190 ds: LabeledVideoDataset = LabeledVideoDataset(
191 labeled_video_paths,
192 self.clip_sampler,
193 video_sampler=self.video_sampler,
194 decode_audio=self.decode_audio,
195 decoder=self.decoder,
196 )
197 return ds
198
199
200 class VideoClassificationPreprocess(Preprocess):
201 def __init__(
202 self,
203 train_transform: Optional[Dict[str, Callable]] = None,
204 val_transform: Optional[Dict[str, Callable]] = None,
205 test_transform: Optional[Dict[str, Callable]] = None,
206 predict_transform: Optional[Dict[str, Callable]] = None,
207 clip_sampler: Union[str, "ClipSampler"] = "random",
208 clip_duration: float = 2,
209 clip_sampler_kwargs: Dict[str, Any] = None,
210 video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
211 decode_audio: bool = True,
212 decoder: str = "pyav",
213 **data_source_kwargs: Any,
214 ):
215 self.clip_sampler = clip_sampler
216 self.clip_duration = clip_duration
217 self.clip_sampler_kwargs = clip_sampler_kwargs
218 self.video_sampler = video_sampler
219 self.decode_audio = decode_audio
220 self.decoder = decoder
221
222 if not _PYTORCHVIDEO_AVAILABLE:
223 raise ModuleNotFoundError("Please, run `pip install pytorchvideo`.")
224
225 if not clip_sampler_kwargs:
226 clip_sampler_kwargs = {}
227
228 if not clip_sampler:
229 raise MisconfigurationException(
230 "clip_sampler should be provided as a string or ``pytorchvideo.data.clip_sampling.ClipSampler``"
231 )
232
233 clip_sampler = make_clip_sampler(clip_sampler, clip_duration, **clip_sampler_kwargs)
234
235 super().__init__(
236 train_transform=train_transform,
237 val_transform=val_transform,
238 test_transform=test_transform,
239 predict_transform=predict_transform,
240 data_sources={
241 DefaultDataSources.FILES: VideoClassificationPathsDataSource(
242 clip_sampler,
243 video_sampler=video_sampler,
244 decode_audio=decode_audio,
245 decoder=decoder,
246 ),
247 DefaultDataSources.FOLDERS: VideoClassificationPathsDataSource(
248 clip_sampler,
249 video_sampler=video_sampler,
250 decode_audio=decode_audio,
251 decoder=decoder,
252 ),
253 DefaultDataSources.FIFTYONE: VideoClassificationFiftyOneDataSource(
254 clip_sampler,
255 video_sampler=video_sampler,
256 decode_audio=decode_audio,
257 decoder=decoder,
258 **data_source_kwargs,
259 ),
260 DefaultDataSources.LABELSTUDIO: LabelStudioVideoClassificationDataSource(
261 clip_sampler=clip_sampler,
262 video_sampler=video_sampler,
263 decode_audio=decode_audio,
264 decoder=decoder,
265 **data_source_kwargs,
266 ),
267 },
268 default_data_source=DefaultDataSources.FILES,
269 )
270
271 def get_state_dict(self) -> Dict[str, Any]:
272 return {
273 **self.transforms,
274 "clip_sampler": self.clip_sampler,
275 "clip_duration": self.clip_duration,
276 "clip_sampler_kwargs": self.clip_sampler_kwargs,
277 "video_sampler": self.video_sampler,
278 "decode_audio": self.decode_audio,
279 "decoder": self.decoder,
280 }
281
282 @classmethod
283 def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool) -> "VideoClassificationPreprocess":
284 return cls(**state_dict)
285
286 def default_transforms(self) -> Dict[str, Callable]:
287 if self.training:
288 post_tensor_transform = [
289 RandomCrop(244, pad_if_needed=True),
290 RandomHorizontalFlip(p=0.5),
291 ]
292 else:
293 post_tensor_transform = [
294 CenterCrop(244),
295 ]
296
297 return {
298 "post_tensor_transform": Compose(
299 [
300 ApplyTransformToKey(
301 key="video",
302 transform=Compose([UniformTemporalSubsample(8)] + post_tensor_transform),
303 ),
304 ]
305 ),
306 "per_batch_transform_on_device": Compose(
307 [
308 ApplyTransformToKey(
309 key="video",
310 transform=K.VideoSequential(
311 K.Normalize(torch.tensor([0.45, 0.45, 0.45]), torch.tensor([0.225, 0.225, 0.225])),
312 data_format="BCTHW",
313 same_on_frame=False,
314 ),
315 ),
316 ]
317 ),
318 }
319
320
321 class VideoClassificationData(DataModule):
322 """Data module for Video classification tasks."""
323
324 preprocess_cls = VideoClassificationPreprocess
325
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flash/video/classification/data.py b/flash/video/classification/data.py
--- a/flash/video/classification/data.py
+++ b/flash/video/classification/data.py
@@ -54,6 +54,8 @@
_PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]
+Label = Union[int, List[int]]
+
class BaseVideoClassification:
def __init__(
@@ -150,6 +152,76 @@
return ds
+class VideoClassificationListDataSource(BaseVideoClassification, PathsDataSource):
+ def __init__(
+ self,
+ clip_sampler: "ClipSampler",
+ video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,
+ decode_audio: bool = True,
+ decoder: str = "pyav",
+ ):
+ super().__init__(
+ clip_sampler,
+ video_sampler=video_sampler,
+ decode_audio=decode_audio,
+ decoder=decoder,
+ )
+ PathsDataSource.__init__(
+ self,
+ extensions=("mp4", "avi"),
+ )
+
+ def _to_multi_hot(self, label_list: List[int]) -> torch.Tensor:
+ v = torch.zeros(len(self.labels_set))
+ for label in label_list:
+ v[label] = 1
+ return v
+
+ def _make_encoded_video_dataset(self, data) -> "LabeledVideoDataset":
+ [paths, str_labels] = data
+ self.is_multilabel = any(isinstance(label, list) for label in str_labels)
+ if self.is_multilabel:
+ self.labels_set = {label for label_list in str_labels for label in label_list}
+ self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}
+ self.id_to_label = {i: label for label, i in self.label_to_id.items()}
+
+ encoded_labels = [
+ self._to_multi_hot([self.label_to_id[classname] for classname in label_list])
+ for label_list in str_labels
+ ]
+
+ data = list(
+ zip(
+ paths,
+ encoded_labels,
+ )
+ )
+ else:
+ self.labels_set = set(str_labels)
+ self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}
+ self.id_to_label = {i: label for label, i in self.label_to_id.items()}
+ data = list(
+ zip(paths, [self.label_to_id[classname] for classname in str_labels])
+ ) # List[Lists] -> List[Tuples]
+ labeled_video_paths = LabeledVideoPaths(data)
+ ds = LabeledVideoDataset(
+ labeled_video_paths,
+ self.clip_sampler,
+ video_sampler=self.video_sampler,
+ decode_audio=self.decode_audio,
+ decoder=self.decoder,
+ )
+ return ds
+
+ def load_data(self, data: str, dataset: Optional[Any] = None) -> "LabeledVideoDataset":
+ ds = self._make_encoded_video_dataset(data)
+
+ if self.training:
+ self.set_state(LabelsState(self.id_to_label))
+ dataset.num_classes = len(self.labels_set)
+ return ds
+
+
class VideoClassificationFiftyOneDataSource(
BaseVideoClassification,
FiftyOneDataSource,
@@ -238,7 +310,7 @@
test_transform=test_transform,
predict_transform=predict_transform,
data_sources={
- DefaultDataSources.FILES: VideoClassificationPathsDataSource(
+ DefaultDataSources.FILES: VideoClassificationListDataSource(
clip_sampler,
video_sampler=video_sampler,
decode_audio=decode_audio,
| {"golden_diff": "diff --git a/flash/video/classification/data.py b/flash/video/classification/data.py\n--- a/flash/video/classification/data.py\n+++ b/flash/video/classification/data.py\n@@ -54,6 +54,8 @@\n \n _PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]\n \n+Label = Union[int, List[int]]\n+\n \n class BaseVideoClassification:\n def __init__(\n@@ -150,6 +152,76 @@\n return ds\n \n \n+class VideoClassificationListDataSource(BaseVideoClassification, PathsDataSource):\n+ def __init__(\n+ self,\n+ clip_sampler: \"ClipSampler\",\n+ video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n+ decode_audio: bool = True,\n+ decoder: str = \"pyav\",\n+ ):\n+ super().__init__(\n+ clip_sampler,\n+ video_sampler=video_sampler,\n+ decode_audio=decode_audio,\n+ decoder=decoder,\n+ )\n+ PathsDataSource.__init__(\n+ self,\n+ extensions=(\"mp4\", \"avi\"),\n+ )\n+\n+ def _to_multi_hot(self, label_list: List[int]) -> torch.Tensor:\n+ v = torch.zeros(len(self.labels_set))\n+ for label in label_list:\n+ v[label] = 1\n+ return v\n+\n+ def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n+ [paths, str_labels] = data\n+ self.is_multilabel = any(isinstance(label, list) for label in str_labels)\n+ if self.is_multilabel:\n+ self.labels_set = {label for label_list in str_labels for label in label_list}\n+ self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}\n+ self.id_to_label = {i: label for label, i in self.label_to_id.items()}\n+\n+ encoded_labels = [\n+ self._to_multi_hot([self.label_to_id[classname] for classname in label_list])\n+ for label_list in str_labels\n+ ]\n+\n+ data = list(\n+ zip(\n+ paths,\n+ encoded_labels,\n+ )\n+ )\n+ else:\n+ self.labels_set = set(str_labels)\n+ self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}\n+ self.id_to_label = {i: label for label, i in self.label_to_id.items()}\n+ data = list(\n+ zip(paths, [self.label_to_id[classname] for classname in str_labels])\n+ ) # List[Lists] -> List[Tuples]\n+ labeled_video_paths = LabeledVideoPaths(data)\n+ ds = LabeledVideoDataset(\n+ labeled_video_paths,\n+ self.clip_sampler,\n+ video_sampler=self.video_sampler,\n+ decode_audio=self.decode_audio,\n+ decoder=self.decoder,\n+ )\n+ return ds\n+\n+ def load_data(self, data: str, dataset: Optional[Any] = None) -> \"LabeledVideoDataset\":\n+ ds = self._make_encoded_video_dataset(data)\n+\n+ if self.training:\n+ self.set_state(LabelsState(self.id_to_label))\n+ dataset.num_classes = len(self.labels_set)\n+ return ds\n+\n+\n class VideoClassificationFiftyOneDataSource(\n BaseVideoClassification,\n FiftyOneDataSource,\n@@ -238,7 +310,7 @@\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_sources={\n- DefaultDataSources.FILES: VideoClassificationPathsDataSource(\n+ DefaultDataSources.FILES: VideoClassificationListDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n", "issue": "VideoClassificationData.from_files() does not work\n## \ud83d\udc1b Bug\r\n`VideoClassificationData.from_files()` does not work. `VideoClassificationData` objects can only be constructed using the `from_folders()` classmethod - which unfortunately doesn't work for multilabel tasks :)\r\n\r\n### To Reproduce\r\nI wrote a Colab notebook to reproduce this in a self-contained environment: https://colab.research.google.com/drive/1X7UvZDndCc0dzcUZ_fGdmQz0ZMTTsj_U?usp=sharing\r\n\r\n#### Code sample\r\nSee link to colab above\r\n\r\n### Expected behavior\r\nA `VideoClassificationData` object should be constructed from a list of paths and their labels\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pathlib\nfrom typing import Any, Callable, Dict, List, Optional, Type, TYPE_CHECKING, Union\n\nimport numpy as np\nimport torch\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import Sampler\n\nfrom flash.core.data.data_module import DataModule\nfrom flash.core.data.data_source import (\n DefaultDataKeys,\n DefaultDataSources,\n FiftyOneDataSource,\n LabelsState,\n PathsDataSource,\n)\nfrom flash.core.data.process import Preprocess\nfrom flash.core.integrations.labelstudio.data_source import LabelStudioVideoClassificationDataSource\nfrom flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, lazy_import\n\nSampleCollection = None\nif _FIFTYONE_AVAILABLE:\n fol = lazy_import(\"fiftyone.core.labels\")\n if TYPE_CHECKING:\n from fiftyone.core.collections import SampleCollection\nelse:\n fol = None\n\nif _KORNIA_AVAILABLE:\n import kornia.augmentation as K\n\nif _PYTORCHVIDEO_AVAILABLE:\n from pytorchvideo.data.clip_sampling import ClipSampler, make_clip_sampler\n from pytorchvideo.data.encoded_video import EncodedVideo\n from pytorchvideo.data.labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset\n from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths\n from pytorchvideo.transforms import ApplyTransformToKey, UniformTemporalSubsample\n from torchvision.transforms import CenterCrop, Compose, RandomCrop, RandomHorizontalFlip\nelse:\n ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None\n\n_PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]\n\n\nclass BaseVideoClassification:\n def __init__(\n self,\n clip_sampler: \"ClipSampler\",\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n ):\n self.clip_sampler = clip_sampler\n self.video_sampler = video_sampler\n self.decode_audio = decode_audio\n self.decoder = decoder\n\n def load_data(self, data: str, dataset: Optional[Any] = None) -> \"LabeledVideoDataset\":\n ds = self._make_encoded_video_dataset(data)\n if self.training:\n label_to_class_mapping = {p[1]: p[0].split(\"/\")[-2] for p in ds._labeled_videos._paths_and_labels}\n self.set_state(LabelsState(label_to_class_mapping))\n dataset.num_classes = len(np.unique([s[1][\"label\"] for s in ds._labeled_videos]))\n return ds\n\n def load_sample(self, sample):\n return sample\n\n def predict_load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n video_path = sample[DefaultDataKeys.INPUT]\n sample.update(self._encoded_video_to_dict(EncodedVideo.from_path(video_path)))\n sample[DefaultDataKeys.METADATA] = {\"filepath\": video_path}\n return sample\n\n def _encoded_video_to_dict(self, video, annotation: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:\n (\n clip_start,\n clip_end,\n clip_index,\n aug_index,\n is_last_clip,\n ) = self.clip_sampler(0.0, video.duration, annotation)\n\n loaded_clip = video.get_clip(clip_start, clip_end)\n\n clip_is_null = (\n loaded_clip is None or loaded_clip[\"video\"] is None or (loaded_clip[\"audio\"] is None and self.decode_audio)\n )\n\n if clip_is_null:\n raise MisconfigurationException(\n f\"The provided video is too short {video.duration} to be clipped at {self.clip_sampler._clip_duration}\"\n )\n\n frames = loaded_clip[\"video\"]\n audio_samples = loaded_clip[\"audio\"]\n return {\n \"video\": frames,\n \"video_name\": video.name,\n \"video_index\": 0,\n \"clip_index\": clip_index,\n \"aug_index\": aug_index,\n **({\"audio\": audio_samples} if audio_samples is not None else {}),\n }\n\n def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n raise NotImplementedError(\"Subclass must implement _make_encoded_video_dataset()\")\n\n\nclass VideoClassificationPathsDataSource(BaseVideoClassification, PathsDataSource):\n def __init__(\n self,\n clip_sampler: \"ClipSampler\",\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n ):\n super().__init__(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n )\n PathsDataSource.__init__(\n self,\n extensions=(\"mp4\", \"avi\"),\n )\n\n def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n ds: LabeledVideoDataset = labeled_video_dataset(\n pathlib.Path(data),\n self.clip_sampler,\n video_sampler=self.video_sampler,\n decode_audio=self.decode_audio,\n decoder=self.decoder,\n )\n return ds\n\n\nclass VideoClassificationFiftyOneDataSource(\n BaseVideoClassification,\n FiftyOneDataSource,\n):\n def __init__(\n self,\n clip_sampler: \"ClipSampler\",\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n label_field: str = \"ground_truth\",\n ):\n super().__init__(\n clip_sampler=clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n )\n FiftyOneDataSource.__init__(\n self,\n label_field=label_field,\n )\n\n @property\n def label_cls(self):\n return fol.Classification\n\n def _make_encoded_video_dataset(self, data: SampleCollection) -> \"LabeledVideoDataset\":\n classes = self._get_classes(data)\n label_to_class_mapping = dict(enumerate(classes))\n class_to_label_mapping = {c: lab for lab, c in label_to_class_mapping.items()}\n\n filepaths = data.values(\"filepath\")\n labels = data.values(self.label_field + \".label\")\n targets = [class_to_label_mapping[lab] for lab in labels]\n labeled_video_paths = LabeledVideoPaths(list(zip(filepaths, targets)))\n\n ds: LabeledVideoDataset = LabeledVideoDataset(\n labeled_video_paths,\n self.clip_sampler,\n video_sampler=self.video_sampler,\n decode_audio=self.decode_audio,\n decoder=self.decoder,\n )\n return ds\n\n\nclass VideoClassificationPreprocess(Preprocess):\n def __init__(\n self,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n clip_sampler: Union[str, \"ClipSampler\"] = \"random\",\n clip_duration: float = 2,\n clip_sampler_kwargs: Dict[str, Any] = None,\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n **data_source_kwargs: Any,\n ):\n self.clip_sampler = clip_sampler\n self.clip_duration = clip_duration\n self.clip_sampler_kwargs = clip_sampler_kwargs\n self.video_sampler = video_sampler\n self.decode_audio = decode_audio\n self.decoder = decoder\n\n if not _PYTORCHVIDEO_AVAILABLE:\n raise ModuleNotFoundError(\"Please, run `pip install pytorchvideo`.\")\n\n if not clip_sampler_kwargs:\n clip_sampler_kwargs = {}\n\n if not clip_sampler:\n raise MisconfigurationException(\n \"clip_sampler should be provided as a string or ``pytorchvideo.data.clip_sampling.ClipSampler``\"\n )\n\n clip_sampler = make_clip_sampler(clip_sampler, clip_duration, **clip_sampler_kwargs)\n\n super().__init__(\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_sources={\n DefaultDataSources.FILES: VideoClassificationPathsDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n ),\n DefaultDataSources.FOLDERS: VideoClassificationPathsDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n ),\n DefaultDataSources.FIFTYONE: VideoClassificationFiftyOneDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n **data_source_kwargs,\n ),\n DefaultDataSources.LABELSTUDIO: LabelStudioVideoClassificationDataSource(\n clip_sampler=clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n **data_source_kwargs,\n ),\n },\n default_data_source=DefaultDataSources.FILES,\n )\n\n def get_state_dict(self) -> Dict[str, Any]:\n return {\n **self.transforms,\n \"clip_sampler\": self.clip_sampler,\n \"clip_duration\": self.clip_duration,\n \"clip_sampler_kwargs\": self.clip_sampler_kwargs,\n \"video_sampler\": self.video_sampler,\n \"decode_audio\": self.decode_audio,\n \"decoder\": self.decoder,\n }\n\n @classmethod\n def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool) -> \"VideoClassificationPreprocess\":\n return cls(**state_dict)\n\n def default_transforms(self) -> Dict[str, Callable]:\n if self.training:\n post_tensor_transform = [\n RandomCrop(244, pad_if_needed=True),\n RandomHorizontalFlip(p=0.5),\n ]\n else:\n post_tensor_transform = [\n CenterCrop(244),\n ]\n\n return {\n \"post_tensor_transform\": Compose(\n [\n ApplyTransformToKey(\n key=\"video\",\n transform=Compose([UniformTemporalSubsample(8)] + post_tensor_transform),\n ),\n ]\n ),\n \"per_batch_transform_on_device\": Compose(\n [\n ApplyTransformToKey(\n key=\"video\",\n transform=K.VideoSequential(\n K.Normalize(torch.tensor([0.45, 0.45, 0.45]), torch.tensor([0.225, 0.225, 0.225])),\n data_format=\"BCTHW\",\n same_on_frame=False,\n ),\n ),\n ]\n ),\n }\n\n\nclass VideoClassificationData(DataModule):\n \"\"\"Data module for Video classification tasks.\"\"\"\n\n preprocess_cls = VideoClassificationPreprocess\n", "path": "flash/video/classification/data.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pathlib\nfrom typing import Any, Callable, Dict, List, Optional, Type, TYPE_CHECKING, Union\n\nimport numpy as np\nimport torch\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom torch.utils.data import Sampler\n\nfrom flash.core.data.data_module import DataModule\nfrom flash.core.data.data_source import (\n DefaultDataKeys,\n DefaultDataSources,\n FiftyOneDataSource,\n LabelsState,\n PathsDataSource,\n)\nfrom flash.core.data.process import Preprocess\nfrom flash.core.integrations.labelstudio.data_source import LabelStudioVideoClassificationDataSource\nfrom flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, lazy_import\n\nSampleCollection = None\nif _FIFTYONE_AVAILABLE:\n fol = lazy_import(\"fiftyone.core.labels\")\n if TYPE_CHECKING:\n from fiftyone.core.collections import SampleCollection\nelse:\n fol = None\n\nif _KORNIA_AVAILABLE:\n import kornia.augmentation as K\n\nif _PYTORCHVIDEO_AVAILABLE:\n from pytorchvideo.data.clip_sampling import ClipSampler, make_clip_sampler\n from pytorchvideo.data.encoded_video import EncodedVideo\n from pytorchvideo.data.labeled_video_dataset import labeled_video_dataset, LabeledVideoDataset\n from pytorchvideo.data.labeled_video_paths import LabeledVideoPaths\n from pytorchvideo.transforms import ApplyTransformToKey, UniformTemporalSubsample\n from torchvision.transforms import CenterCrop, Compose, RandomCrop, RandomHorizontalFlip\nelse:\n ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None\n\n_PYTORCHVIDEO_DATA = Dict[str, Union[str, torch.Tensor, int, float, List]]\n\nLabel = Union[int, List[int]]\n\n\nclass BaseVideoClassification:\n def __init__(\n self,\n clip_sampler: \"ClipSampler\",\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n ):\n self.clip_sampler = clip_sampler\n self.video_sampler = video_sampler\n self.decode_audio = decode_audio\n self.decoder = decoder\n\n def load_data(self, data: str, dataset: Optional[Any] = None) -> \"LabeledVideoDataset\":\n ds = self._make_encoded_video_dataset(data)\n if self.training:\n label_to_class_mapping = {p[1]: p[0].split(\"/\")[-2] for p in ds._labeled_videos._paths_and_labels}\n self.set_state(LabelsState(label_to_class_mapping))\n dataset.num_classes = len(np.unique([s[1][\"label\"] for s in ds._labeled_videos]))\n return ds\n\n def load_sample(self, sample):\n return sample\n\n def predict_load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n video_path = sample[DefaultDataKeys.INPUT]\n sample.update(self._encoded_video_to_dict(EncodedVideo.from_path(video_path)))\n sample[DefaultDataKeys.METADATA] = {\"filepath\": video_path}\n return sample\n\n def _encoded_video_to_dict(self, video, annotation: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:\n (\n clip_start,\n clip_end,\n clip_index,\n aug_index,\n is_last_clip,\n ) = self.clip_sampler(0.0, video.duration, annotation)\n\n loaded_clip = video.get_clip(clip_start, clip_end)\n\n clip_is_null = (\n loaded_clip is None or loaded_clip[\"video\"] is None or (loaded_clip[\"audio\"] is None and self.decode_audio)\n )\n\n if clip_is_null:\n raise MisconfigurationException(\n f\"The provided video is too short {video.duration} to be clipped at {self.clip_sampler._clip_duration}\"\n )\n\n frames = loaded_clip[\"video\"]\n audio_samples = loaded_clip[\"audio\"]\n return {\n \"video\": frames,\n \"video_name\": video.name,\n \"video_index\": 0,\n \"clip_index\": clip_index,\n \"aug_index\": aug_index,\n **({\"audio\": audio_samples} if audio_samples is not None else {}),\n }\n\n def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n raise NotImplementedError(\"Subclass must implement _make_encoded_video_dataset()\")\n\n\nclass VideoClassificationPathsDataSource(BaseVideoClassification, PathsDataSource):\n def __init__(\n self,\n clip_sampler: \"ClipSampler\",\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n ):\n super().__init__(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n )\n PathsDataSource.__init__(\n self,\n extensions=(\"mp4\", \"avi\"),\n )\n\n def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n ds: LabeledVideoDataset = labeled_video_dataset(\n pathlib.Path(data),\n self.clip_sampler,\n video_sampler=self.video_sampler,\n decode_audio=self.decode_audio,\n decoder=self.decoder,\n )\n return ds\n\n\nclass VideoClassificationListDataSource(BaseVideoClassification, PathsDataSource):\n def __init__(\n self,\n clip_sampler: \"ClipSampler\",\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n ):\n super().__init__(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n )\n PathsDataSource.__init__(\n self,\n extensions=(\"mp4\", \"avi\"),\n )\n\n def _to_multi_hot(self, label_list: List[int]) -> torch.Tensor:\n v = torch.zeros(len(self.labels_set))\n for label in label_list:\n v[label] = 1\n return v\n\n def _make_encoded_video_dataset(self, data) -> \"LabeledVideoDataset\":\n [paths, str_labels] = data\n self.is_multilabel = any(isinstance(label, list) for label in str_labels)\n if self.is_multilabel:\n self.labels_set = {label for label_list in str_labels for label in label_list}\n self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}\n self.id_to_label = {i: label for label, i in self.label_to_id.items()}\n\n encoded_labels = [\n self._to_multi_hot([self.label_to_id[classname] for classname in label_list])\n for label_list in str_labels\n ]\n\n data = list(\n zip(\n paths,\n encoded_labels,\n )\n )\n else:\n self.labels_set = set(str_labels)\n self.label_to_id = {label: i for i, label in enumerate(sorted(self.labels_set))}\n self.id_to_label = {i: label for label, i in self.label_to_id.items()}\n data = list(\n zip(paths, [self.label_to_id[classname] for classname in str_labels])\n ) # List[Lists] -> List[Tuples]\n labeled_video_paths = LabeledVideoPaths(data)\n ds = LabeledVideoDataset(\n labeled_video_paths,\n self.clip_sampler,\n video_sampler=self.video_sampler,\n decode_audio=self.decode_audio,\n decoder=self.decoder,\n )\n return ds\n\n def load_data(self, data: str, dataset: Optional[Any] = None) -> \"LabeledVideoDataset\":\n ds = self._make_encoded_video_dataset(data)\n\n if self.training:\n self.set_state(LabelsState(self.id_to_label))\n dataset.num_classes = len(self.labels_set)\n return ds\n\n\nclass VideoClassificationFiftyOneDataSource(\n BaseVideoClassification,\n FiftyOneDataSource,\n):\n def __init__(\n self,\n clip_sampler: \"ClipSampler\",\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n label_field: str = \"ground_truth\",\n ):\n super().__init__(\n clip_sampler=clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n )\n FiftyOneDataSource.__init__(\n self,\n label_field=label_field,\n )\n\n @property\n def label_cls(self):\n return fol.Classification\n\n def _make_encoded_video_dataset(self, data: SampleCollection) -> \"LabeledVideoDataset\":\n classes = self._get_classes(data)\n label_to_class_mapping = dict(enumerate(classes))\n class_to_label_mapping = {c: lab for lab, c in label_to_class_mapping.items()}\n\n filepaths = data.values(\"filepath\")\n labels = data.values(self.label_field + \".label\")\n targets = [class_to_label_mapping[lab] for lab in labels]\n labeled_video_paths = LabeledVideoPaths(list(zip(filepaths, targets)))\n\n ds: LabeledVideoDataset = LabeledVideoDataset(\n labeled_video_paths,\n self.clip_sampler,\n video_sampler=self.video_sampler,\n decode_audio=self.decode_audio,\n decoder=self.decoder,\n )\n return ds\n\n\nclass VideoClassificationPreprocess(Preprocess):\n def __init__(\n self,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n clip_sampler: Union[str, \"ClipSampler\"] = \"random\",\n clip_duration: float = 2,\n clip_sampler_kwargs: Dict[str, Any] = None,\n video_sampler: Type[Sampler] = torch.utils.data.RandomSampler,\n decode_audio: bool = True,\n decoder: str = \"pyav\",\n **data_source_kwargs: Any,\n ):\n self.clip_sampler = clip_sampler\n self.clip_duration = clip_duration\n self.clip_sampler_kwargs = clip_sampler_kwargs\n self.video_sampler = video_sampler\n self.decode_audio = decode_audio\n self.decoder = decoder\n\n if not _PYTORCHVIDEO_AVAILABLE:\n raise ModuleNotFoundError(\"Please, run `pip install pytorchvideo`.\")\n\n if not clip_sampler_kwargs:\n clip_sampler_kwargs = {}\n\n if not clip_sampler:\n raise MisconfigurationException(\n \"clip_sampler should be provided as a string or ``pytorchvideo.data.clip_sampling.ClipSampler``\"\n )\n\n clip_sampler = make_clip_sampler(clip_sampler, clip_duration, **clip_sampler_kwargs)\n\n super().__init__(\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_sources={\n DefaultDataSources.FILES: VideoClassificationListDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n ),\n DefaultDataSources.FOLDERS: VideoClassificationPathsDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n ),\n DefaultDataSources.FIFTYONE: VideoClassificationFiftyOneDataSource(\n clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n **data_source_kwargs,\n ),\n DefaultDataSources.LABELSTUDIO: LabelStudioVideoClassificationDataSource(\n clip_sampler=clip_sampler,\n video_sampler=video_sampler,\n decode_audio=decode_audio,\n decoder=decoder,\n **data_source_kwargs,\n ),\n },\n default_data_source=DefaultDataSources.FILES,\n )\n\n def get_state_dict(self) -> Dict[str, Any]:\n return {\n **self.transforms,\n \"clip_sampler\": self.clip_sampler,\n \"clip_duration\": self.clip_duration,\n \"clip_sampler_kwargs\": self.clip_sampler_kwargs,\n \"video_sampler\": self.video_sampler,\n \"decode_audio\": self.decode_audio,\n \"decoder\": self.decoder,\n }\n\n @classmethod\n def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool) -> \"VideoClassificationPreprocess\":\n return cls(**state_dict)\n\n def default_transforms(self) -> Dict[str, Callable]:\n if self.training:\n post_tensor_transform = [\n RandomCrop(244, pad_if_needed=True),\n RandomHorizontalFlip(p=0.5),\n ]\n else:\n post_tensor_transform = [\n CenterCrop(244),\n ]\n\n return {\n \"post_tensor_transform\": Compose(\n [\n ApplyTransformToKey(\n key=\"video\",\n transform=Compose([UniformTemporalSubsample(8)] + post_tensor_transform),\n ),\n ]\n ),\n \"per_batch_transform_on_device\": Compose(\n [\n ApplyTransformToKey(\n key=\"video\",\n transform=K.VideoSequential(\n K.Normalize(torch.tensor([0.45, 0.45, 0.45]), torch.tensor([0.225, 0.225, 0.225])),\n data_format=\"BCTHW\",\n same_on_frame=False,\n ),\n ),\n ]\n ),\n }\n\n\nclass VideoClassificationData(DataModule):\n \"\"\"Data module for Video classification tasks.\"\"\"\n\n preprocess_cls = VideoClassificationPreprocess\n", "path": "flash/video/classification/data.py"}]} | 3,721 | 818 |
gh_patches_debug_33817 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-530 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
opentelemetry-instrument command fails if incompatible instrumentation is found
If an instrumentation is installed for a library that is not found in the environment, the instrument command raises the following exception:
```
❯ opentelemetry-instrument python main.py
Instrumenting of flask failed
Traceback (most recent call last):
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 71, in _load_instrumentors
conflict = get_dist_dependency_conflicts(entry_point.dist)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 33, in get_dist_dependency_conflicts
return get_dependency_conflicts(deps)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 41, in get_dependency_conflicts
get_distribution(str(dep))
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 482, in get_distribution
dist = get_provider(dist)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 358, in get_provider
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
IndexError: list index out of range
Failed to auto initialize opentelemetry
Traceback (most recent call last):
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 111, in initialize
_load_instrumentors(distro)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 85, in _load_instrumentors
raise exc
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 71, in _load_instrumentors
conflict = get_dist_dependency_conflicts(entry_point.dist)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 33, in get_dist_dependency_conflicts
return get_dependency_conflicts(deps)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 41, in get_dependency_conflicts
get_distribution(str(dep))
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 482, in get_distribution
dist = get_provider(dist)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 358, in get_provider
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
IndexError: list index out of range
```
bootstrap command does not install any instrumentations for libraries that are not present in the environment so this would only happen if someone manually installed an instrumentation package for a library they're not using. So this is not a deal breaker and doesn't require an immediate hotfix. That said, this IS a bug as the intended behavior of instrument command is to silently ignore such instrumentations.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py`
Content:
```
1 from typing import Collection, Optional
2
3 from pkg_resources import (
4 Distribution,
5 DistributionNotFound,
6 VersionConflict,
7 get_distribution,
8 )
9
10
11 class DependencyConflict:
12 required: str = None
13 found: Optional[str] = None
14
15 def __init__(self, required, found=None):
16 self.required = required
17 self.found = found
18
19 def __str__(self):
20 return 'DependencyConflict: requested: "{0}" but found: "{1}"'.format(
21 self.required, self.found
22 )
23
24
25 def get_dist_dependency_conflicts(
26 dist: Distribution,
27 ) -> Optional[DependencyConflict]:
28 deps = [
29 dep
30 for dep in dist.requires(("instruments",))
31 if dep not in dist.requires()
32 ]
33 return get_dependency_conflicts(deps)
34
35
36 def get_dependency_conflicts(
37 deps: Collection[str],
38 ) -> Optional[DependencyConflict]:
39 for dep in deps:
40 try:
41 get_distribution(str(dep))
42 except VersionConflict as exc:
43 return DependencyConflict(dep, exc.dist)
44 except DistributionNotFound:
45 return DependencyConflict(dep)
46 return None
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py
--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py
+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py
@@ -1,12 +1,16 @@
+from logging import getLogger
from typing import Collection, Optional
from pkg_resources import (
Distribution,
DistributionNotFound,
+ RequirementParseError,
VersionConflict,
get_distribution,
)
+logger = getLogger(__file__)
+
class DependencyConflict:
required: str = None
@@ -25,12 +29,19 @@
def get_dist_dependency_conflicts(
dist: Distribution,
) -> Optional[DependencyConflict]:
- deps = [
- dep
- for dep in dist.requires(("instruments",))
- if dep not in dist.requires()
- ]
- return get_dependency_conflicts(deps)
+ main_deps = dist.requires()
+ instrumentation_deps = []
+ for dep in dist.requires(("instruments",)):
+ if dep not in main_deps:
+ # we set marker to none so string representation of the dependency looks like
+ # requests ~= 1.0
+ # instead of
+ # requests ~= 1.0; extra = "instruments"
+ # which does not work with `get_distribution()`
+ dep.marker = None
+ instrumentation_deps.append(str(dep))
+
+ return get_dependency_conflicts(instrumentation_deps)
def get_dependency_conflicts(
@@ -38,9 +49,16 @@
) -> Optional[DependencyConflict]:
for dep in deps:
try:
- get_distribution(str(dep))
+ get_distribution(dep)
except VersionConflict as exc:
return DependencyConflict(dep, exc.dist)
except DistributionNotFound:
return DependencyConflict(dep)
+ except RequirementParseError as exc:
+ logger.warning(
+ 'error parsing dependency, reporting as a conflict: "%s" - %s',
+ dep,
+ exc,
+ )
+ return DependencyConflict(dep)
return None
| {"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n@@ -1,12 +1,16 @@\n+from logging import getLogger\n from typing import Collection, Optional\n \n from pkg_resources import (\n Distribution,\n DistributionNotFound,\n+ RequirementParseError,\n VersionConflict,\n get_distribution,\n )\n \n+logger = getLogger(__file__)\n+\n \n class DependencyConflict:\n required: str = None\n@@ -25,12 +29,19 @@\n def get_dist_dependency_conflicts(\n dist: Distribution,\n ) -> Optional[DependencyConflict]:\n- deps = [\n- dep\n- for dep in dist.requires((\"instruments\",))\n- if dep not in dist.requires()\n- ]\n- return get_dependency_conflicts(deps)\n+ main_deps = dist.requires()\n+ instrumentation_deps = []\n+ for dep in dist.requires((\"instruments\",)):\n+ if dep not in main_deps:\n+ # we set marker to none so string representation of the dependency looks like\n+ # requests ~= 1.0\n+ # instead of\n+ # requests ~= 1.0; extra = \"instruments\"\n+ # which does not work with `get_distribution()`\n+ dep.marker = None\n+ instrumentation_deps.append(str(dep))\n+\n+ return get_dependency_conflicts(instrumentation_deps)\n \n \n def get_dependency_conflicts(\n@@ -38,9 +49,16 @@\n ) -> Optional[DependencyConflict]:\n for dep in deps:\n try:\n- get_distribution(str(dep))\n+ get_distribution(dep)\n except VersionConflict as exc:\n return DependencyConflict(dep, exc.dist)\n except DistributionNotFound:\n return DependencyConflict(dep)\n+ except RequirementParseError as exc:\n+ logger.warning(\n+ 'error parsing dependency, reporting as a conflict: \"%s\" - %s',\n+ dep,\n+ exc,\n+ )\n+ return DependencyConflict(dep)\n return None\n", "issue": "opentelemetry-instrument command fails if incompatible instrumentation is found\nIf an instrumentation is installed for a library that is not found in the environment, the instrument command raises the following exception:\r\n\r\n\r\n```\r\n\u276f opentelemetry-instrument python main.py\r\nInstrumenting of flask failed\r\nTraceback (most recent call last):\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 71, in _load_instrumentors\r\n conflict = get_dist_dependency_conflicts(entry_point.dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 33, in get_dist_dependency_conflicts\r\n return get_dependency_conflicts(deps)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 41, in get_dependency_conflicts\r\n get_distribution(str(dep))\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 482, in get_distribution\r\n dist = get_provider(dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 358, in get_provider\r\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\r\nIndexError: list index out of range\r\nFailed to auto initialize opentelemetry\r\nTraceback (most recent call last):\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 111, in initialize\r\n _load_instrumentors(distro)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 85, in _load_instrumentors\r\n raise exc\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 71, in _load_instrumentors\r\n conflict = get_dist_dependency_conflicts(entry_point.dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 33, in get_dist_dependency_conflicts\r\n return get_dependency_conflicts(deps)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 41, in get_dependency_conflicts\r\n get_distribution(str(dep))\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 482, in get_distribution\r\n dist = get_provider(dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 358, in get_provider\r\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\r\nIndexError: list index out of range\r\n```\r\n\r\nbootstrap command does not install any instrumentations for libraries that are not present in the environment so this would only happen if someone manually installed an instrumentation package for a library they're not using. So this is not a deal breaker and doesn't require an immediate hotfix. That said, this IS a bug as the intended behavior of instrument command is to silently ignore such instrumentations.\n", "before_files": [{"content": "from typing import Collection, Optional\n\nfrom pkg_resources import (\n Distribution,\n DistributionNotFound,\n VersionConflict,\n get_distribution,\n)\n\n\nclass DependencyConflict:\n required: str = None\n found: Optional[str] = None\n\n def __init__(self, required, found=None):\n self.required = required\n self.found = found\n\n def __str__(self):\n return 'DependencyConflict: requested: \"{0}\" but found: \"{1}\"'.format(\n self.required, self.found\n )\n\n\ndef get_dist_dependency_conflicts(\n dist: Distribution,\n) -> Optional[DependencyConflict]:\n deps = [\n dep\n for dep in dist.requires((\"instruments\",))\n if dep not in dist.requires()\n ]\n return get_dependency_conflicts(deps)\n\n\ndef get_dependency_conflicts(\n deps: Collection[str],\n) -> Optional[DependencyConflict]:\n for dep in deps:\n try:\n get_distribution(str(dep))\n except VersionConflict as exc:\n return DependencyConflict(dep, exc.dist)\n except DistributionNotFound:\n return DependencyConflict(dep)\n return None\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py"}], "after_files": [{"content": "from logging import getLogger\nfrom typing import Collection, Optional\n\nfrom pkg_resources import (\n Distribution,\n DistributionNotFound,\n RequirementParseError,\n VersionConflict,\n get_distribution,\n)\n\nlogger = getLogger(__file__)\n\n\nclass DependencyConflict:\n required: str = None\n found: Optional[str] = None\n\n def __init__(self, required, found=None):\n self.required = required\n self.found = found\n\n def __str__(self):\n return 'DependencyConflict: requested: \"{0}\" but found: \"{1}\"'.format(\n self.required, self.found\n )\n\n\ndef get_dist_dependency_conflicts(\n dist: Distribution,\n) -> Optional[DependencyConflict]:\n main_deps = dist.requires()\n instrumentation_deps = []\n for dep in dist.requires((\"instruments\",)):\n if dep not in main_deps:\n # we set marker to none so string representation of the dependency looks like\n # requests ~= 1.0\n # instead of\n # requests ~= 1.0; extra = \"instruments\"\n # which does not work with `get_distribution()`\n dep.marker = None\n instrumentation_deps.append(str(dep))\n\n return get_dependency_conflicts(instrumentation_deps)\n\n\ndef get_dependency_conflicts(\n deps: Collection[str],\n) -> Optional[DependencyConflict]:\n for dep in deps:\n try:\n get_distribution(dep)\n except VersionConflict as exc:\n return DependencyConflict(dep, exc.dist)\n except DistributionNotFound:\n return DependencyConflict(dep)\n except RequirementParseError as exc:\n logger.warning(\n 'error parsing dependency, reporting as a conflict: \"%s\" - %s',\n dep,\n exc,\n )\n return DependencyConflict(dep)\n return None\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py"}]} | 1,454 | 483 |
gh_patches_debug_8851 | rasdani/github-patches | git_diff | yt-dlp__yt-dlp-3789 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StreamCZ extractor broken
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2022.04.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are alive and playable in a browser
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
world
### Description
Broken support for stream.cz
Example: https://www.stream.cz/autobazar/vecerni-auto-271357
### Verbose log
```shell
C:\Users\xxx>yt-dlp.lnk https://www.stream.cz/autobazar/vecerni-auto-271357 -U -v
[debug] Command-line config: ['https://www.stream.cz/autobazar/vecerni-auto-271357', '-U', '-v']
[debug] User config "C:\Users\xxx\yt-dlp.conf": ['--user-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36', '--no-check-certificate', '-o', 'D:\\Downloads\\yt-dlp\\%(autonumber)03d_%(title)s.%(ext)s', '--no-mtime', '--format', 'bestvideo[ext=mp4][height<=1080][fps<=30]+bestaudio[ext=m4a]/best[ext=mp4][height<=1080][fps<=30]/best[height<=1080][fps<=30]/best', '--merge-output-format', 'mkv', '--ffmpeg-location', 'C:\\Apps\\ffmpeg\\bin\\ffmpeg.exe', '--all-subs', '-i', '--add-metadata', '--remux-video', 'mkv', '--add-header', 'accept-language: cs,fi;q=0.9,en-US;q=0.8,en;q=0.7,en-GB;q=0.6', '--add-header', 'sec-ch-ua: "Chromium";v="94", "Google Chrome";v="94", ";Not A Brand";v="99"', '--add-header', 'sec-ch-ua-arch: "x86"', '--add-header', 'sec-ch-ua-bitness: "64"', '--add-header', 'sec-ch-ua-full-version: "94.0.4606.71"', '--add-header', 'sec-ch-ua-mobile: ?0', '--add-header', 'sec-ch-ua-model: ""', '--add-header', 'sec-ch-ua-platform: "Windows"', '--add-header', 'sec-ch-ua-platform-version: "10.0.0"', '--add-header', 'sec-fetch-dest: document', '--add-header', 'sec-fetch-mode: navigate', '--add-header', 'sec-fetch-site: none', '--add-header', 'sec-fetch-user: ?1', '--add-header', 'service-worker-navigation-preload: true', '--add-header', 'upgrade-insecure-requests: 1', '--add-header', 'alt-svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000,h3-T051=":443"; ma=2592000,h3-Q050=":443"; ma=2592000,h3-Q046=":443"; ma=2592000,h3-Q043=":443"; ma=2592000,quic=":443"; ma=2592000; v="46,43"', '--add-header', 'cache-control: no-cache, no-store, max-age=0, must-revalidate']
[debug] Encodings: locale cp1252, fs utf-8, out utf-8, err utf-8, pref cp1252
[debug] yt-dlp version 2022.04.08 [7884ade] (win_exe)
[debug] Python version 3.8.10 (CPython 64bit) - Windows-10-10.0.19044-SP0
[debug] Checking exe version: "C:\Apps\ffmpeg\bin\ffmpeg.exe" -bsfs
[debug] Checking exe version: "C:\Apps\ffmpeg\bin\ffprobe" -bsfs
[debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-full_build-www.gyan.dev
[debug] Optional libraries: brotli, certifi, Cryptodome, mutagen, sqlite, websockets
[debug] Proxy map: {}
Latest version: 2022.04.08, Current version: 2022.04.08
yt-dlp is up to date (2022.04.08)
[debug] [StreamCZ] Extracting URL: https://www.stream.cz/autobazar/vecerni-auto-271357
[StreamCZ] 271357: Downloading GraphQL result
[StreamCZ] 271357: Downloading playlist
ERROR: 'NoneType' object has no attribute 'items'
Traceback (most recent call last):
File "yt_dlp\YoutubeDL.py", line 1408, in wrapper
File "yt_dlp\YoutubeDL.py", line 1478, in __extract_info
File "yt_dlp\extractor\common.py", line 641, in extract
File "yt_dlp\extractor\streamcz.py", line 112, in _real_extract
File "yt_dlp\extractor\streamcz.py", line 58, in _extract_formats
AttributeError: 'NoneType' object has no attribute 'items'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/streamcz.py`
Content:
```
1 import json
2
3 from .common import InfoExtractor
4 from ..utils import (
5 float_or_none,
6 int_or_none,
7 parse_codecs,
8 traverse_obj,
9 urljoin,
10 )
11
12
13 class StreamCZIE(InfoExtractor):
14 _VALID_URL = r'https?://(?:www\.)?(?:stream|televizeseznam)\.cz/[^?#]+/(?P<display_id>[^?#]+)-(?P<id>[0-9]+)'
15 _TESTS = [{
16 'url': 'https://www.televizeseznam.cz/video/lajna/buh-57953890',
17 'md5': '40c41ade1464a390a0b447e333df4239',
18 'info_dict': {
19 'id': '57953890',
20 'ext': 'mp4',
21 'title': 'Bůh',
22 'display_id': 'buh',
23 'description': 'md5:8f5f09b9b7bc67df910486cdd88f7165',
24 'duration': 1369.6,
25 'view_count': int,
26 }
27 }, {
28 'url': 'https://www.stream.cz/kdo-to-mluvi/kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna-64087937',
29 'md5': '41fd358000086a1ccdb068c77809b158',
30 'info_dict': {
31 'id': '64087937',
32 'ext': 'mp4',
33 'title': 'Kdo to mluví? Velké odhalení přináší nový pořad už od 25. srpna',
34 'display_id': 'kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna',
35 'description': 'md5:97a811000a6460266029d6c1c2ebcd59',
36 'duration': 50.2,
37 'view_count': int,
38 }
39 }, {
40 'url': 'https://www.stream.cz/tajemno/znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili-64147267',
41 'md5': '3ee4d0be040e8f4a543e67e509d55e3f',
42 'info_dict': {
43 'id': '64147267',
44 'ext': 'mp4',
45 'title': 'Zničehonic jim skrz střechu prolítnul záhadný předmět. Badatelé vše objasnili',
46 'display_id': 'znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili',
47 'description': 'md5:4b8ada6718d34bb011c4e04ca4bc19bf',
48 'duration': 442.84,
49 'view_count': int,
50 }
51 }]
52
53 def _extract_formats(self, spl_url, video):
54 for ext, pref, streams in (
55 ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))),
56 ('mp4', 1, video.get('mp4'))):
57 for format_id, stream in streams.items():
58 if not stream.get('url'):
59 continue
60 yield {
61 'format_id': f'{format_id}-{ext}',
62 'ext': ext,
63 'source_preference': pref,
64 'url': urljoin(spl_url, stream['url']),
65 'tbr': float_or_none(stream.get('bandwidth'), scale=1000),
66 'duration': float_or_none(stream.get('duration'), scale=1000),
67 'width': traverse_obj(stream, ('resolution', 0)),
68 'height': traverse_obj(stream, ('resolution', 1)) or int_or_none(format_id.replace('p', '')),
69 **parse_codecs(stream.get('codec')),
70 }
71
72 def _real_extract(self, url):
73 display_id, video_id = self._match_valid_url(url).groups()
74
75 data = self._download_json(
76 'https://www.televizeseznam.cz/api/graphql', video_id, 'Downloading GraphQL result',
77 data=json.dumps({
78 'variables': {'urlName': video_id},
79 'query': '''
80 query LoadEpisode($urlName : String){ episode(urlName: $urlName){ ...VideoDetailFragmentOnEpisode } }
81 fragment VideoDetailFragmentOnEpisode on Episode {
82 id
83 spl
84 urlName
85 name
86 perex
87 duration
88 views
89 }'''
90 }).encode('utf-8'),
91 headers={'Content-Type': 'application/json;charset=UTF-8'}
92 )['data']['episode']
93
94 spl_url = data['spl'] + 'spl2,3'
95 metadata = self._download_json(spl_url, video_id, 'Downloading playlist')
96 if 'Location' in metadata and 'data' not in metadata:
97 spl_url = metadata['Location']
98 metadata = self._download_json(spl_url, video_id, 'Downloading redirected playlist')
99 video = metadata['data']
100
101 subtitles = {}
102 for subs in video.get('subtitles', {}).values():
103 if not subs.get('language'):
104 continue
105 for ext, sub_url in subs.get('urls').items():
106 subtitles.setdefault(subs['language'], []).append({
107 'ext': ext,
108 'url': urljoin(spl_url, sub_url)
109 })
110
111 formats = list(self._extract_formats(spl_url, video))
112 self._sort_formats(formats)
113
114 return {
115 'id': video_id,
116 'display_id': display_id,
117 'title': data.get('name'),
118 'description': data.get('perex'),
119 'duration': float_or_none(data.get('duration')),
120 'view_count': int_or_none(data.get('views')),
121 'formats': formats,
122 'subtitles': subtitles,
123 }
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt_dlp/extractor/streamcz.py b/yt_dlp/extractor/streamcz.py
--- a/yt_dlp/extractor/streamcz.py
+++ b/yt_dlp/extractor/streamcz.py
@@ -52,8 +52,8 @@
def _extract_formats(self, spl_url, video):
for ext, pref, streams in (
- ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))),
- ('mp4', 1, video.get('mp4'))):
+ ('ts', -1, traverse_obj(video, ('http_stream', 'qualities')) or {}),
+ ('mp4', 1, video.get('mp4') or {})):
for format_id, stream in streams.items():
if not stream.get('url'):
continue
| {"golden_diff": "diff --git a/yt_dlp/extractor/streamcz.py b/yt_dlp/extractor/streamcz.py\n--- a/yt_dlp/extractor/streamcz.py\n+++ b/yt_dlp/extractor/streamcz.py\n@@ -52,8 +52,8 @@\n \n def _extract_formats(self, spl_url, video):\n for ext, pref, streams in (\n- ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))),\n- ('mp4', 1, video.get('mp4'))):\n+ ('ts', -1, traverse_obj(video, ('http_stream', 'qualities')) or {}),\n+ ('mp4', 1, video.get('mp4') or {})):\n for format_id, stream in streams.items():\n if not stream.get('url'):\n continue\n", "issue": "StreamCZ extractor broken\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2022.04.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are alive and playable in a browser\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nworld\n\n### Description\n\nBroken support for stream.cz\r\nExample: https://www.stream.cz/autobazar/vecerni-auto-271357\n\n### Verbose log\n\n```shell\nC:\\Users\\xxx>yt-dlp.lnk https://www.stream.cz/autobazar/vecerni-auto-271357 -U -v\r\n[debug] Command-line config: ['https://www.stream.cz/autobazar/vecerni-auto-271357', '-U', '-v']\r\n[debug] User config \"C:\\Users\\xxx\\yt-dlp.conf\": ['--user-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36', '--no-check-certificate', '-o', 'D:\\\\Downloads\\\\yt-dlp\\\\%(autonumber)03d_%(title)s.%(ext)s', '--no-mtime', '--format', 'bestvideo[ext=mp4][height<=1080][fps<=30]+bestaudio[ext=m4a]/best[ext=mp4][height<=1080][fps<=30]/best[height<=1080][fps<=30]/best', '--merge-output-format', 'mkv', '--ffmpeg-location', 'C:\\\\Apps\\\\ffmpeg\\\\bin\\\\ffmpeg.exe', '--all-subs', '-i', '--add-metadata', '--remux-video', 'mkv', '--add-header', 'accept-language: cs,fi;q=0.9,en-US;q=0.8,en;q=0.7,en-GB;q=0.6', '--add-header', 'sec-ch-ua: \"Chromium\";v=\"94\", \"Google Chrome\";v=\"94\", \";Not A Brand\";v=\"99\"', '--add-header', 'sec-ch-ua-arch: \"x86\"', '--add-header', 'sec-ch-ua-bitness: \"64\"', '--add-header', 'sec-ch-ua-full-version: \"94.0.4606.71\"', '--add-header', 'sec-ch-ua-mobile: ?0', '--add-header', 'sec-ch-ua-model: \"\"', '--add-header', 'sec-ch-ua-platform: \"Windows\"', '--add-header', 'sec-ch-ua-platform-version: \"10.0.0\"', '--add-header', 'sec-fetch-dest: document', '--add-header', 'sec-fetch-mode: navigate', '--add-header', 'sec-fetch-site: none', '--add-header', 'sec-fetch-user: ?1', '--add-header', 'service-worker-navigation-preload: true', '--add-header', 'upgrade-insecure-requests: 1', '--add-header', 'alt-svc: h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-T051=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"', '--add-header', 'cache-control: no-cache, no-store, max-age=0, must-revalidate']\r\n[debug] Encodings: locale cp1252, fs utf-8, out utf-8, err utf-8, pref cp1252\r\n[debug] yt-dlp version 2022.04.08 [7884ade] (win_exe)\r\n[debug] Python version 3.8.10 (CPython 64bit) - Windows-10-10.0.19044-SP0\r\n[debug] Checking exe version: \"C:\\Apps\\ffmpeg\\bin\\ffmpeg.exe\" -bsfs\r\n[debug] Checking exe version: \"C:\\Apps\\ffmpeg\\bin\\ffprobe\" -bsfs\r\n[debug] exe versions: ffmpeg 4.4-full_build-www.gyan.dev (setts), ffprobe 4.4-full_build-www.gyan.dev\r\n[debug] Optional libraries: brotli, certifi, Cryptodome, mutagen, sqlite, websockets\r\n[debug] Proxy map: {}\r\nLatest version: 2022.04.08, Current version: 2022.04.08\r\nyt-dlp is up to date (2022.04.08)\r\n[debug] [StreamCZ] Extracting URL: https://www.stream.cz/autobazar/vecerni-auto-271357\r\n[StreamCZ] 271357: Downloading GraphQL result\r\n[StreamCZ] 271357: Downloading playlist\r\nERROR: 'NoneType' object has no attribute 'items'\r\nTraceback (most recent call last):\r\n File \"yt_dlp\\YoutubeDL.py\", line 1408, in wrapper\r\n File \"yt_dlp\\YoutubeDL.py\", line 1478, in __extract_info\r\n File \"yt_dlp\\extractor\\common.py\", line 641, in extract\r\n File \"yt_dlp\\extractor\\streamcz.py\", line 112, in _real_extract\r\n File \"yt_dlp\\extractor\\streamcz.py\", line 58, in _extract_formats\r\nAttributeError: 'NoneType' object has no attribute 'items'\n```\n\n", "before_files": [{"content": "import json\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n float_or_none,\n int_or_none,\n parse_codecs,\n traverse_obj,\n urljoin,\n)\n\n\nclass StreamCZIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?(?:stream|televizeseznam)\\.cz/[^?#]+/(?P<display_id>[^?#]+)-(?P<id>[0-9]+)'\n _TESTS = [{\n 'url': 'https://www.televizeseznam.cz/video/lajna/buh-57953890',\n 'md5': '40c41ade1464a390a0b447e333df4239',\n 'info_dict': {\n 'id': '57953890',\n 'ext': 'mp4',\n 'title': 'B\u016fh',\n 'display_id': 'buh',\n 'description': 'md5:8f5f09b9b7bc67df910486cdd88f7165',\n 'duration': 1369.6,\n 'view_count': int,\n }\n }, {\n 'url': 'https://www.stream.cz/kdo-to-mluvi/kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna-64087937',\n 'md5': '41fd358000086a1ccdb068c77809b158',\n 'info_dict': {\n 'id': '64087937',\n 'ext': 'mp4',\n 'title': 'Kdo to mluv\u00ed? Velk\u00e9 odhalen\u00ed p\u0159in\u00e1\u0161\u00ed nov\u00fd po\u0159ad u\u017e od 25. srpna',\n 'display_id': 'kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna',\n 'description': 'md5:97a811000a6460266029d6c1c2ebcd59',\n 'duration': 50.2,\n 'view_count': int,\n }\n }, {\n 'url': 'https://www.stream.cz/tajemno/znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili-64147267',\n 'md5': '3ee4d0be040e8f4a543e67e509d55e3f',\n 'info_dict': {\n 'id': '64147267',\n 'ext': 'mp4',\n 'title': 'Zni\u010dehonic jim skrz st\u0159echu prol\u00edtnul z\u00e1hadn\u00fd p\u0159edm\u011bt. Badatel\u00e9 v\u0161e objasnili',\n 'display_id': 'znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili',\n 'description': 'md5:4b8ada6718d34bb011c4e04ca4bc19bf',\n 'duration': 442.84,\n 'view_count': int,\n }\n }]\n\n def _extract_formats(self, spl_url, video):\n for ext, pref, streams in (\n ('ts', -1, traverse_obj(video, ('http_stream', 'qualities'))),\n ('mp4', 1, video.get('mp4'))):\n for format_id, stream in streams.items():\n if not stream.get('url'):\n continue\n yield {\n 'format_id': f'{format_id}-{ext}',\n 'ext': ext,\n 'source_preference': pref,\n 'url': urljoin(spl_url, stream['url']),\n 'tbr': float_or_none(stream.get('bandwidth'), scale=1000),\n 'duration': float_or_none(stream.get('duration'), scale=1000),\n 'width': traverse_obj(stream, ('resolution', 0)),\n 'height': traverse_obj(stream, ('resolution', 1)) or int_or_none(format_id.replace('p', '')),\n **parse_codecs(stream.get('codec')),\n }\n\n def _real_extract(self, url):\n display_id, video_id = self._match_valid_url(url).groups()\n\n data = self._download_json(\n 'https://www.televizeseznam.cz/api/graphql', video_id, 'Downloading GraphQL result',\n data=json.dumps({\n 'variables': {'urlName': video_id},\n 'query': '''\n query LoadEpisode($urlName : String){ episode(urlName: $urlName){ ...VideoDetailFragmentOnEpisode } }\n fragment VideoDetailFragmentOnEpisode on Episode {\n id\n spl\n urlName\n name\n perex\n duration\n views\n }'''\n }).encode('utf-8'),\n headers={'Content-Type': 'application/json;charset=UTF-8'}\n )['data']['episode']\n\n spl_url = data['spl'] + 'spl2,3'\n metadata = self._download_json(spl_url, video_id, 'Downloading playlist')\n if 'Location' in metadata and 'data' not in metadata:\n spl_url = metadata['Location']\n metadata = self._download_json(spl_url, video_id, 'Downloading redirected playlist')\n video = metadata['data']\n\n subtitles = {}\n for subs in video.get('subtitles', {}).values():\n if not subs.get('language'):\n continue\n for ext, sub_url in subs.get('urls').items():\n subtitles.setdefault(subs['language'], []).append({\n 'ext': ext,\n 'url': urljoin(spl_url, sub_url)\n })\n\n formats = list(self._extract_formats(spl_url, video))\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'display_id': display_id,\n 'title': data.get('name'),\n 'description': data.get('perex'),\n 'duration': float_or_none(data.get('duration')),\n 'view_count': int_or_none(data.get('views')),\n 'formats': formats,\n 'subtitles': subtitles,\n }\n", "path": "yt_dlp/extractor/streamcz.py"}], "after_files": [{"content": "import json\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n float_or_none,\n int_or_none,\n parse_codecs,\n traverse_obj,\n urljoin,\n)\n\n\nclass StreamCZIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?(?:stream|televizeseznam)\\.cz/[^?#]+/(?P<display_id>[^?#]+)-(?P<id>[0-9]+)'\n _TESTS = [{\n 'url': 'https://www.televizeseznam.cz/video/lajna/buh-57953890',\n 'md5': '40c41ade1464a390a0b447e333df4239',\n 'info_dict': {\n 'id': '57953890',\n 'ext': 'mp4',\n 'title': 'B\u016fh',\n 'display_id': 'buh',\n 'description': 'md5:8f5f09b9b7bc67df910486cdd88f7165',\n 'duration': 1369.6,\n 'view_count': int,\n }\n }, {\n 'url': 'https://www.stream.cz/kdo-to-mluvi/kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna-64087937',\n 'md5': '41fd358000086a1ccdb068c77809b158',\n 'info_dict': {\n 'id': '64087937',\n 'ext': 'mp4',\n 'title': 'Kdo to mluv\u00ed? Velk\u00e9 odhalen\u00ed p\u0159in\u00e1\u0161\u00ed nov\u00fd po\u0159ad u\u017e od 25. srpna',\n 'display_id': 'kdo-to-mluvi-velke-odhaleni-prinasi-novy-porad-uz-od-25-srpna',\n 'description': 'md5:97a811000a6460266029d6c1c2ebcd59',\n 'duration': 50.2,\n 'view_count': int,\n }\n }, {\n 'url': 'https://www.stream.cz/tajemno/znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili-64147267',\n 'md5': '3ee4d0be040e8f4a543e67e509d55e3f',\n 'info_dict': {\n 'id': '64147267',\n 'ext': 'mp4',\n 'title': 'Zni\u010dehonic jim skrz st\u0159echu prol\u00edtnul z\u00e1hadn\u00fd p\u0159edm\u011bt. Badatel\u00e9 v\u0161e objasnili',\n 'display_id': 'znicehonic-jim-skrz-strechu-prolitnul-zahadny-predmet-badatele-vse-objasnili',\n 'description': 'md5:4b8ada6718d34bb011c4e04ca4bc19bf',\n 'duration': 442.84,\n 'view_count': int,\n }\n }]\n\n def _extract_formats(self, spl_url, video):\n for ext, pref, streams in (\n ('ts', -1, traverse_obj(video, ('http_stream', 'qualities')) or {}),\n ('mp4', 1, video.get('mp4') or {})):\n for format_id, stream in streams.items():\n if not stream.get('url'):\n continue\n yield {\n 'format_id': f'{format_id}-{ext}',\n 'ext': ext,\n 'source_preference': pref,\n 'url': urljoin(spl_url, stream['url']),\n 'tbr': float_or_none(stream.get('bandwidth'), scale=1000),\n 'duration': float_or_none(stream.get('duration'), scale=1000),\n 'width': traverse_obj(stream, ('resolution', 0)),\n 'height': traverse_obj(stream, ('resolution', 1)) or int_or_none(format_id.replace('p', '')),\n **parse_codecs(stream.get('codec')),\n }\n\n def _real_extract(self, url):\n display_id, video_id = self._match_valid_url(url).groups()\n\n data = self._download_json(\n 'https://www.televizeseznam.cz/api/graphql', video_id, 'Downloading GraphQL result',\n data=json.dumps({\n 'variables': {'urlName': video_id},\n 'query': '''\n query LoadEpisode($urlName : String){ episode(urlName: $urlName){ ...VideoDetailFragmentOnEpisode } }\n fragment VideoDetailFragmentOnEpisode on Episode {\n id\n spl\n urlName\n name\n perex\n duration\n views\n }'''\n }).encode('utf-8'),\n headers={'Content-Type': 'application/json;charset=UTF-8'}\n )['data']['episode']\n\n spl_url = data['spl'] + 'spl2,3'\n metadata = self._download_json(spl_url, video_id, 'Downloading playlist')\n if 'Location' in metadata and 'data' not in metadata:\n spl_url = metadata['Location']\n metadata = self._download_json(spl_url, video_id, 'Downloading redirected playlist')\n video = metadata['data']\n\n subtitles = {}\n for subs in video.get('subtitles', {}).values():\n if not subs.get('language'):\n continue\n for ext, sub_url in subs.get('urls').items():\n subtitles.setdefault(subs['language'], []).append({\n 'ext': ext,\n 'url': urljoin(spl_url, sub_url)\n })\n\n formats = list(self._extract_formats(spl_url, video))\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'display_id': display_id,\n 'title': data.get('name'),\n 'description': data.get('perex'),\n 'duration': float_or_none(data.get('duration')),\n 'view_count': int_or_none(data.get('views')),\n 'formats': formats,\n 'subtitles': subtitles,\n }\n", "path": "yt_dlp/extractor/streamcz.py"}]} | 3,565 | 182 |
gh_patches_debug_2571 | rasdani/github-patches | git_diff | cupy__cupy-2318 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError for OutOfMemoryError
Seen while using chainer while multiprocessing and using the GPU:
```
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/usr/lib/python3.6/multiprocessing/pool.py", line 463, in _handle_results
task = get()
File "/usr/lib/python3.6/multiprocessing/connection.py", line 251, in recv
return _ForkingPickler.loads(buf.getbuffer())
File "cupy/cuda/memory.pyx", line 37, in cupy.cuda.memory.OutOfMemoryError.__init__
TypeError: __init__() takes exactly 3 positional arguments (2 given)
```
Seems like it tried to raise an OutOfMemoryError but failed to do so.
```
CuPy Version : 6.1.0
CUDA Root : /usr/local/cuda
CUDA Build Version : 10010
CUDA Driver Version : 10010
CUDA Runtime Version : 10010
cuDNN Build Version : 7500
cuDNN Version : 7500
NCCL Build Version : 2402
NCCL Runtime Version : 2402
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/cuda/compiler.py`
Content:
```
1 import hashlib
2 import math
3 import os
4 import re
5 import shutil
6 import sys
7 import tempfile
8
9 import six
10
11 from cupy.cuda import device
12 from cupy.cuda import function
13 from cupy.cuda import nvrtc
14
15 _nvrtc_version = None
16 _nvrtc_max_compute_capability = None
17
18
19 def _get_nvrtc_version():
20 global _nvrtc_version
21 if _nvrtc_version is None:
22 _nvrtc_version = nvrtc.getVersion()
23
24 return _nvrtc_version
25
26
27 def _get_arch():
28 global _nvrtc_max_compute_capability
29 if _nvrtc_max_compute_capability is None:
30 # See Supported Compile Options section of NVRTC User Guide for
31 # the maximum value allowed for `--gpu-architecture`.
32 major, minor = _get_nvrtc_version()
33 if major < 9:
34 # CUDA 7.0 / 7.5 / 8.0
35 _nvrtc_max_compute_capability = '50'
36 else:
37 # CUDA 9.0 / 9.1
38 _nvrtc_max_compute_capability = '70'
39 cc = min(device.Device().compute_capability, _nvrtc_max_compute_capability)
40 return 'compute_%s' % cc
41
42
43 class TemporaryDirectory(object):
44 def __enter__(self):
45 self.path = tempfile.mkdtemp()
46 return self.path
47
48 def __exit__(self, exc_type, exc_value, traceback):
49 if exc_value is not None:
50 return
51
52 for name in os.listdir(self.path):
53 os.unlink(os.path.join(self.path, name))
54 os.rmdir(self.path)
55
56
57 def _get_bool_env_variable(name, default):
58 val = os.environ.get(name)
59 if val is None or len(val) == 0:
60 return default
61 try:
62 return int(val) == 1
63 except ValueError:
64 return False
65
66
67 def compile_using_nvrtc(source, options=(), arch=None, filename='kern.cu'):
68 if not arch:
69 arch = _get_arch()
70
71 options += ('-arch={}'.format(arch),)
72
73 with TemporaryDirectory() as root_dir:
74 cu_path = os.path.join(root_dir, filename)
75
76 with open(cu_path, 'w') as cu_file:
77 cu_file.write(source)
78
79 prog = _NVRTCProgram(source, cu_path)
80 try:
81 ptx = prog.compile(options)
82 except CompileException as e:
83 dump = _get_bool_env_variable(
84 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
85 if dump:
86 e.dump(sys.stderr)
87 raise
88
89 return ptx
90
91
92 def _preprocess(source, options, arch):
93 options += ('-arch={}'.format(arch),)
94
95 prog = _NVRTCProgram(source, '')
96 try:
97 result = prog.compile(options)
98 except CompileException as e:
99 dump = _get_bool_env_variable(
100 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)
101 if dump:
102 e.dump(sys.stderr)
103 raise
104
105 assert isinstance(result, six.text_type)
106 return result
107
108
109 _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')
110
111
112 def get_cache_dir():
113 return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)
114
115
116 _empty_file_preprocess_cache = {}
117
118
119 def compile_with_cache(source, options=(), arch=None, cache_dir=None,
120 extra_source=None):
121 # NVRTC does not use extra_source. extra_source is used for cache key.
122 global _empty_file_preprocess_cache
123 if cache_dir is None:
124 cache_dir = get_cache_dir()
125 if arch is None:
126 arch = _get_arch()
127
128 options += ('-ftz=true',)
129 if _get_bool_env_variable('CUPY_CUDA_COMPILE_WITH_DEBUG', False):
130 options += ('--device-debug', '--generate-line-info')
131
132 env = (arch, options, _get_nvrtc_version())
133 base = _empty_file_preprocess_cache.get(env, None)
134 if base is None:
135 # This is checking of NVRTC compiler internal version
136 base = _preprocess('', options, arch)
137 _empty_file_preprocess_cache[env] = base
138 key_src = '%s %s %s %s' % (env, base, source, extra_source)
139
140 key_src = key_src.encode('utf-8')
141 name = '%s_2.cubin' % hashlib.md5(key_src).hexdigest()
142
143 if not os.path.isdir(cache_dir):
144 try:
145 os.makedirs(cache_dir)
146 except OSError:
147 if not os.path.isdir(cache_dir):
148 raise
149
150 mod = function.Module()
151 # To handle conflicts in concurrent situation, we adopt lock-free method
152 # to avoid performance degradation.
153 path = os.path.join(cache_dir, name)
154 if os.path.exists(path):
155 with open(path, 'rb') as file:
156 data = file.read()
157 if len(data) >= 32:
158 hash = data[:32]
159 cubin = data[32:]
160 cubin_hash = six.b(hashlib.md5(cubin).hexdigest())
161 if hash == cubin_hash:
162 mod.load(cubin)
163 return mod
164
165 ptx = compile_using_nvrtc(source, options, arch, name + '.cu')
166 ls = function.LinkState()
167 ls.add_ptr_data(ptx, u'cupy.ptx')
168 cubin = ls.complete()
169 cubin_hash = six.b(hashlib.md5(cubin).hexdigest())
170
171 # shutil.move is not atomic operation, so it could result in a corrupted
172 # file. We detect it by appending md5 hash at the beginning of each cache
173 # file. If the file is corrupted, it will be ignored next time it is read.
174 with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:
175 tf.write(cubin_hash)
176 tf.write(cubin)
177 temp_path = tf.name
178 shutil.move(temp_path, path)
179
180 # Save .cu source file along with .cubin
181 if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):
182 with open(path + '.cu', 'w') as f:
183 f.write(source)
184
185 mod.load(cubin)
186 return mod
187
188
189 class CompileException(Exception):
190
191 def __init__(self, msg, source, name, options):
192 self._msg = msg
193 self.source = source
194 self.name = name
195 self.options = options
196
197 def __repr__(self):
198 return str(self)
199
200 def __str__(self):
201 return self.get_message()
202
203 def get_message(self):
204 return self._msg
205
206 def dump(self, f):
207 lines = self.source.split('\n')
208 digits = int(math.floor(math.log10(len(lines)))) + 1
209 linum_fmt = '{{:0{}d}} '.format(digits)
210 f.write('NVRTC compilation error: {}\n'.format(self))
211 f.write('-----\n')
212 f.write('Name: {}\n'.format(self.name))
213 f.write('Options: {}\n'.format(' '.join(self.options)))
214 f.write('CUDA source:\n')
215 for i, line in enumerate(lines):
216 f.write(linum_fmt.format(i + 1) + line.rstrip() + '\n')
217 f.write('-----\n')
218 f.flush()
219
220
221 class _NVRTCProgram(object):
222
223 def __init__(self, src, name='default_program', headers=(),
224 include_names=()):
225 self.ptr = None
226
227 if isinstance(src, six.binary_type):
228 src = src.decode('UTF-8')
229 if isinstance(name, six.binary_type):
230 name = name.decode('UTF-8')
231
232 self.src = src
233 self.name = name
234 self.ptr = nvrtc.createProgram(src, name, headers, include_names)
235
236 def __del__(self):
237 if self.ptr:
238 nvrtc.destroyProgram(self.ptr)
239
240 def compile(self, options=()):
241 try:
242 nvrtc.compileProgram(self.ptr, options)
243 return nvrtc.getPTX(self.ptr)
244 except nvrtc.NVRTCError:
245 log = nvrtc.getProgramLog(self.ptr)
246 raise CompileException(log, self.src, self.name, options)
247
248
249 def is_valid_kernel_name(name):
250 return re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', name) is not None
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py
--- a/cupy/cuda/compiler.py
+++ b/cupy/cuda/compiler.py
@@ -193,6 +193,10 @@
self.source = source
self.name = name
self.options = options
+ super(CompileException, self).__init__()
+
+ def __reduce__(self):
+ return (type(self), (self._msg, self.source, self.name, self.options))
def __repr__(self):
return str(self)
| {"golden_diff": "diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\n--- a/cupy/cuda/compiler.py\n+++ b/cupy/cuda/compiler.py\n@@ -193,6 +193,10 @@\n self.source = source\n self.name = name\n self.options = options\n+ super(CompileException, self).__init__()\n+\n+ def __reduce__(self):\n+ return (type(self), (self._msg, self.source, self.name, self.options))\n \n def __repr__(self):\n return str(self)\n", "issue": "TypeError for OutOfMemoryError\nSeen while using chainer while multiprocessing and using the GPU:\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/lib/python3.6/multiprocessing/pool.py\", line 463, in _handle_results\r\n task = get()\r\n File \"/usr/lib/python3.6/multiprocessing/connection.py\", line 251, in recv\r\n return _ForkingPickler.loads(buf.getbuffer())\r\n File \"cupy/cuda/memory.pyx\", line 37, in cupy.cuda.memory.OutOfMemoryError.__init__\r\nTypeError: __init__() takes exactly 3 positional arguments (2 given)\r\n```\r\nSeems like it tried to raise an OutOfMemoryError but failed to do so.\r\n\r\n```\r\nCuPy Version : 6.1.0\r\nCUDA Root : /usr/local/cuda\r\nCUDA Build Version : 10010\r\nCUDA Driver Version : 10010\r\nCUDA Runtime Version : 10010\r\ncuDNN Build Version : 7500\r\ncuDNN Version : 7500\r\nNCCL Build Version : 2402\r\nNCCL Runtime Version : 2402\r\n```\n", "before_files": [{"content": "import hashlib\nimport math\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\n\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\nfrom cupy.cuda import nvrtc\n\n_nvrtc_version = None\n_nvrtc_max_compute_capability = None\n\n\ndef _get_nvrtc_version():\n global _nvrtc_version\n if _nvrtc_version is None:\n _nvrtc_version = nvrtc.getVersion()\n\n return _nvrtc_version\n\n\ndef _get_arch():\n global _nvrtc_max_compute_capability\n if _nvrtc_max_compute_capability is None:\n # See Supported Compile Options section of NVRTC User Guide for\n # the maximum value allowed for `--gpu-architecture`.\n major, minor = _get_nvrtc_version()\n if major < 9:\n # CUDA 7.0 / 7.5 / 8.0\n _nvrtc_max_compute_capability = '50'\n else:\n # CUDA 9.0 / 9.1\n _nvrtc_max_compute_capability = '70'\n cc = min(device.Device().compute_capability, _nvrtc_max_compute_capability)\n return 'compute_%s' % cc\n\n\nclass TemporaryDirectory(object):\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _get_bool_env_variable(name, default):\n val = os.environ.get(name)\n if val is None or len(val) == 0:\n return default\n try:\n return int(val) == 1\n except ValueError:\n return False\n\n\ndef compile_using_nvrtc(source, options=(), arch=None, filename='kern.cu'):\n if not arch:\n arch = _get_arch()\n\n options += ('-arch={}'.format(arch),)\n\n with TemporaryDirectory() as root_dir:\n cu_path = os.path.join(root_dir, filename)\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n prog = _NVRTCProgram(source, cu_path)\n try:\n ptx = prog.compile(options)\n except CompileException as e:\n dump = _get_bool_env_variable(\n 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\n if dump:\n e.dump(sys.stderr)\n raise\n\n return ptx\n\n\ndef _preprocess(source, options, arch):\n options += ('-arch={}'.format(arch),)\n\n prog = _NVRTCProgram(source, '')\n try:\n result = prog.compile(options)\n except CompileException as e:\n dump = _get_bool_env_variable(\n 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\n if dump:\n e.dump(sys.stderr)\n raise\n\n assert isinstance(result, six.text_type)\n return result\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None,\n extra_source=None):\n # NVRTC does not use extra_source. extra_source is used for cache key.\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n options += ('-ftz=true',)\n if _get_bool_env_variable('CUPY_CUDA_COMPILE_WITH_DEBUG', False):\n options += ('--device-debug', '--generate-line-info')\n\n env = (arch, options, _get_nvrtc_version())\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n # This is checking of NVRTC compiler internal version\n base = _preprocess('', options, arch)\n _empty_file_preprocess_cache[env] = base\n key_src = '%s %s %s %s' % (env, base, source, extra_source)\n\n key_src = key_src.encode('utf-8')\n name = '%s_2.cubin' % hashlib.md5(key_src).hexdigest()\n\n if not os.path.isdir(cache_dir):\n try:\n os.makedirs(cache_dir)\n except OSError:\n if not os.path.isdir(cache_dir):\n raise\n\n mod = function.Module()\n # To handle conflicts in concurrent situation, we adopt lock-free method\n # to avoid performance degradation.\n path = os.path.join(cache_dir, name)\n if os.path.exists(path):\n with open(path, 'rb') as file:\n data = file.read()\n if len(data) >= 32:\n hash = data[:32]\n cubin = data[32:]\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n if hash == cubin_hash:\n mod.load(cubin)\n return mod\n\n ptx = compile_using_nvrtc(source, options, arch, name + '.cu')\n ls = function.LinkState()\n ls.add_ptr_data(ptx, u'cupy.ptx')\n cubin = ls.complete()\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n\n # shutil.move is not atomic operation, so it could result in a corrupted\n # file. We detect it by appending md5 hash at the beginning of each cache\n # file. If the file is corrupted, it will be ignored next time it is read.\n with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:\n tf.write(cubin_hash)\n tf.write(cubin)\n temp_path = tf.name\n shutil.move(temp_path, path)\n\n # Save .cu source file along with .cubin\n if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):\n with open(path + '.cu', 'w') as f:\n f.write(source)\n\n mod.load(cubin)\n return mod\n\n\nclass CompileException(Exception):\n\n def __init__(self, msg, source, name, options):\n self._msg = msg\n self.source = source\n self.name = name\n self.options = options\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return self.get_message()\n\n def get_message(self):\n return self._msg\n\n def dump(self, f):\n lines = self.source.split('\\n')\n digits = int(math.floor(math.log10(len(lines)))) + 1\n linum_fmt = '{{:0{}d}} '.format(digits)\n f.write('NVRTC compilation error: {}\\n'.format(self))\n f.write('-----\\n')\n f.write('Name: {}\\n'.format(self.name))\n f.write('Options: {}\\n'.format(' '.join(self.options)))\n f.write('CUDA source:\\n')\n for i, line in enumerate(lines):\n f.write(linum_fmt.format(i + 1) + line.rstrip() + '\\n')\n f.write('-----\\n')\n f.flush()\n\n\nclass _NVRTCProgram(object):\n\n def __init__(self, src, name='default_program', headers=(),\n include_names=()):\n self.ptr = None\n\n if isinstance(src, six.binary_type):\n src = src.decode('UTF-8')\n if isinstance(name, six.binary_type):\n name = name.decode('UTF-8')\n\n self.src = src\n self.name = name\n self.ptr = nvrtc.createProgram(src, name, headers, include_names)\n\n def __del__(self):\n if self.ptr:\n nvrtc.destroyProgram(self.ptr)\n\n def compile(self, options=()):\n try:\n nvrtc.compileProgram(self.ptr, options)\n return nvrtc.getPTX(self.ptr)\n except nvrtc.NVRTCError:\n log = nvrtc.getProgramLog(self.ptr)\n raise CompileException(log, self.src, self.name, options)\n\n\ndef is_valid_kernel_name(name):\n return re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', name) is not None\n", "path": "cupy/cuda/compiler.py"}], "after_files": [{"content": "import hashlib\nimport math\nimport os\nimport re\nimport shutil\nimport sys\nimport tempfile\n\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\nfrom cupy.cuda import nvrtc\n\n_nvrtc_version = None\n_nvrtc_max_compute_capability = None\n\n\ndef _get_nvrtc_version():\n global _nvrtc_version\n if _nvrtc_version is None:\n _nvrtc_version = nvrtc.getVersion()\n\n return _nvrtc_version\n\n\ndef _get_arch():\n global _nvrtc_max_compute_capability\n if _nvrtc_max_compute_capability is None:\n # See Supported Compile Options section of NVRTC User Guide for\n # the maximum value allowed for `--gpu-architecture`.\n major, minor = _get_nvrtc_version()\n if major < 9:\n # CUDA 7.0 / 7.5 / 8.0\n _nvrtc_max_compute_capability = '50'\n else:\n # CUDA 9.0 / 9.1\n _nvrtc_max_compute_capability = '70'\n cc = min(device.Device().compute_capability, _nvrtc_max_compute_capability)\n return 'compute_%s' % cc\n\n\nclass TemporaryDirectory(object):\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _get_bool_env_variable(name, default):\n val = os.environ.get(name)\n if val is None or len(val) == 0:\n return default\n try:\n return int(val) == 1\n except ValueError:\n return False\n\n\ndef compile_using_nvrtc(source, options=(), arch=None, filename='kern.cu'):\n if not arch:\n arch = _get_arch()\n\n options += ('-arch={}'.format(arch),)\n\n with TemporaryDirectory() as root_dir:\n cu_path = os.path.join(root_dir, filename)\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n prog = _NVRTCProgram(source, cu_path)\n try:\n ptx = prog.compile(options)\n except CompileException as e:\n dump = _get_bool_env_variable(\n 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\n if dump:\n e.dump(sys.stderr)\n raise\n\n return ptx\n\n\ndef _preprocess(source, options, arch):\n options += ('-arch={}'.format(arch),)\n\n prog = _NVRTCProgram(source, '')\n try:\n result = prog.compile(options)\n except CompileException as e:\n dump = _get_bool_env_variable(\n 'CUPY_DUMP_CUDA_SOURCE_ON_ERROR', False)\n if dump:\n e.dump(sys.stderr)\n raise\n\n assert isinstance(result, six.text_type)\n return result\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None,\n extra_source=None):\n # NVRTC does not use extra_source. extra_source is used for cache key.\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n options += ('-ftz=true',)\n if _get_bool_env_variable('CUPY_CUDA_COMPILE_WITH_DEBUG', False):\n options += ('--device-debug', '--generate-line-info')\n\n env = (arch, options, _get_nvrtc_version())\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n # This is checking of NVRTC compiler internal version\n base = _preprocess('', options, arch)\n _empty_file_preprocess_cache[env] = base\n key_src = '%s %s %s %s' % (env, base, source, extra_source)\n\n key_src = key_src.encode('utf-8')\n name = '%s_2.cubin' % hashlib.md5(key_src).hexdigest()\n\n if not os.path.isdir(cache_dir):\n try:\n os.makedirs(cache_dir)\n except OSError:\n if not os.path.isdir(cache_dir):\n raise\n\n mod = function.Module()\n # To handle conflicts in concurrent situation, we adopt lock-free method\n # to avoid performance degradation.\n path = os.path.join(cache_dir, name)\n if os.path.exists(path):\n with open(path, 'rb') as file:\n data = file.read()\n if len(data) >= 32:\n hash = data[:32]\n cubin = data[32:]\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n if hash == cubin_hash:\n mod.load(cubin)\n return mod\n\n ptx = compile_using_nvrtc(source, options, arch, name + '.cu')\n ls = function.LinkState()\n ls.add_ptr_data(ptx, u'cupy.ptx')\n cubin = ls.complete()\n cubin_hash = six.b(hashlib.md5(cubin).hexdigest())\n\n # shutil.move is not atomic operation, so it could result in a corrupted\n # file. We detect it by appending md5 hash at the beginning of each cache\n # file. If the file is corrupted, it will be ignored next time it is read.\n with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tf:\n tf.write(cubin_hash)\n tf.write(cubin)\n temp_path = tf.name\n shutil.move(temp_path, path)\n\n # Save .cu source file along with .cubin\n if _get_bool_env_variable('CUPY_CACHE_SAVE_CUDA_SOURCE', False):\n with open(path + '.cu', 'w') as f:\n f.write(source)\n\n mod.load(cubin)\n return mod\n\n\nclass CompileException(Exception):\n\n def __init__(self, msg, source, name, options):\n self._msg = msg\n self.source = source\n self.name = name\n self.options = options\n super(CompileException, self).__init__()\n\n def __reduce__(self):\n return (type(self), (self._msg, self.source, self.name, self.options))\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return self.get_message()\n\n def get_message(self):\n return self._msg\n\n def dump(self, f):\n lines = self.source.split('\\n')\n digits = int(math.floor(math.log10(len(lines)))) + 1\n linum_fmt = '{{:0{}d}} '.format(digits)\n f.write('NVRTC compilation error: {}\\n'.format(self))\n f.write('-----\\n')\n f.write('Name: {}\\n'.format(self.name))\n f.write('Options: {}\\n'.format(' '.join(self.options)))\n f.write('CUDA source:\\n')\n for i, line in enumerate(lines):\n f.write(linum_fmt.format(i + 1) + line.rstrip() + '\\n')\n f.write('-----\\n')\n f.flush()\n\n\nclass _NVRTCProgram(object):\n\n def __init__(self, src, name='default_program', headers=(),\n include_names=()):\n self.ptr = None\n\n if isinstance(src, six.binary_type):\n src = src.decode('UTF-8')\n if isinstance(name, six.binary_type):\n name = name.decode('UTF-8')\n\n self.src = src\n self.name = name\n self.ptr = nvrtc.createProgram(src, name, headers, include_names)\n\n def __del__(self):\n if self.ptr:\n nvrtc.destroyProgram(self.ptr)\n\n def compile(self, options=()):\n try:\n nvrtc.compileProgram(self.ptr, options)\n return nvrtc.getPTX(self.ptr)\n except nvrtc.NVRTCError:\n log = nvrtc.getProgramLog(self.ptr)\n raise CompileException(log, self.src, self.name, options)\n\n\ndef is_valid_kernel_name(name):\n return re.match('^[a-zA-Z_][a-zA-Z_0-9]*$', name) is not None\n", "path": "cupy/cuda/compiler.py"}]} | 3,082 | 123 |
gh_patches_debug_6199 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-886 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SkillDialog not working for non-'expected replies' scenario
## Version
4.8.0
## Describe the bug
SkillDialog won't work out of the box for non expected-replies scenarios.
## To Reproduce
Steps to reproduce the behavior:
1. Set up a root bot using skill dialog and a skill bot
2. Run both bots and initiate the SkillDialog
3. When the skill first comes back to the parent an error like the following should arise:
```
File "..path-to-botbuilder/botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py", line 213, in _send_to_skill
if not 200 <= response.status <= 299:
AttributeError: 'NoneType' object has no attribute 'status'
```
## Expected behavior
The response get back to the parent without any problems
## Workaround
If the skill bot is modified to always send some content in every successful response at the route handler level, the scenario should work.
Example on how to do this for an aiohttp skill bot:
```python
#This is how a typical message handler method could look like
async def messages(req: Request) -> Response:
# Main bot message handler.
if "application/json" in req.headers["Content-Type"]:
body = await req.json()
else:
return Response(status=415)
activity = Activity().deserialize(body)
auth_header = req.headers["Authorization"] if "Authorization" in req.headers else ""
response = await ADAPTER.process_activity(activity, auth_header, BOT.on_turn)
if response:
return json_response(data=response.body, status=response.status)
# THE FIX IS IN THE LINE BELOW
return Response(status=201, body='{"foo":"bar"}'.encode("utf-8"))
```
**Alternative Workaround:** use expected replies as delivery method in the parent bot (SkillDialog).
[bug]
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 # pylint: disable=no-member
4
5 import json
6 from typing import Dict
7 from logging import Logger
8
9 import aiohttp
10 from botbuilder.core import InvokeResponse
11 from botbuilder.core.skills import BotFrameworkClient
12 from botbuilder.schema import (
13 Activity,
14 ExpectedReplies,
15 ConversationReference,
16 ConversationAccount,
17 )
18 from botframework.connector.auth import (
19 ChannelProvider,
20 CredentialProvider,
21 GovernmentConstants,
22 MicrosoftAppCredentials,
23 )
24
25
26 class BotFrameworkHttpClient(BotFrameworkClient):
27
28 """
29 A skill host adapter implements API to forward activity to a skill and
30 implements routing ChannelAPI calls from the Skill up through the bot/adapter.
31 """
32
33 INVOKE_ACTIVITY_NAME = "SkillEvents.ChannelApiInvoke"
34 _BOT_IDENTITY_KEY = "BotIdentity"
35 _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}
36
37 def __init__(
38 self,
39 credential_provider: CredentialProvider,
40 channel_provider: ChannelProvider = None,
41 logger: Logger = None,
42 ):
43 if not credential_provider:
44 raise TypeError("credential_provider can't be None")
45
46 self._credential_provider = credential_provider
47 self._channel_provider = channel_provider
48 self._logger = logger
49 self._session = aiohttp.ClientSession()
50
51 async def post_activity(
52 self,
53 from_bot_id: str,
54 to_bot_id: str,
55 to_url: str,
56 service_url: str,
57 conversation_id: str,
58 activity: Activity,
59 ) -> InvokeResponse:
60 app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)
61
62 if not app_credentials:
63 raise KeyError("Unable to get appCredentials to connect to the skill")
64
65 # Get token for the skill call
66 token = (
67 app_credentials.get_access_token()
68 if app_credentials.microsoft_app_id
69 else None
70 )
71
72 # Capture current activity settings before changing them.
73 # TODO: DO we need to set the activity ID? (events that are created manually don't have it).
74 original_conversation_id = activity.conversation.id
75 original_service_url = activity.service_url
76 original_caller_id = activity.caller_id
77 original_relates_to = activity.relates_to
78
79 try:
80 # TODO: The relato has to be ported to the adapter in the new integration library when
81 # resolving conflicts in merge
82 activity.relates_to = ConversationReference(
83 service_url=activity.service_url,
84 activity_id=activity.id,
85 channel_id=activity.channel_id,
86 conversation=ConversationAccount(
87 id=activity.conversation.id,
88 name=activity.conversation.name,
89 conversation_type=activity.conversation.conversation_type,
90 aad_object_id=activity.conversation.aad_object_id,
91 is_group=activity.conversation.is_group,
92 role=activity.conversation.role,
93 tenant_id=activity.conversation.tenant_id,
94 properties=activity.conversation.properties,
95 ),
96 bot=None,
97 )
98 activity.conversation.id = conversation_id
99 activity.service_url = service_url
100 activity.caller_id = f"urn:botframework:aadappid:{from_bot_id}"
101
102 headers_dict = {
103 "Content-type": "application/json; charset=utf-8",
104 }
105 if token:
106 headers_dict.update(
107 {"Authorization": f"Bearer {token}",}
108 )
109
110 json_content = json.dumps(activity.serialize())
111 resp = await self._session.post(
112 to_url, data=json_content.encode("utf-8"), headers=headers_dict,
113 )
114 resp.raise_for_status()
115 data = (await resp.read()).decode()
116 content = json.loads(data) if data else None
117
118 if content:
119 return InvokeResponse(status=resp.status, body=content)
120
121 finally:
122 # Restore activity properties.
123 activity.conversation.id = original_conversation_id
124 activity.service_url = original_service_url
125 activity.caller_id = original_caller_id
126 activity.relates_to = original_relates_to
127
128 async def post_buffered_activity(
129 self,
130 from_bot_id: str,
131 to_bot_id: str,
132 to_url: str,
133 service_url: str,
134 conversation_id: str,
135 activity: Activity,
136 ) -> [Activity]:
137 """
138 Helper method to return a list of activities when an Activity is being
139 sent with DeliveryMode == expectReplies.
140 """
141 response = await self.post_activity(
142 from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity
143 )
144 if not response or (response.status / 100) != 2:
145 return []
146 return ExpectedReplies().deserialize(response.body).activities
147
148 async def _get_app_credentials(
149 self, app_id: str, oauth_scope: str
150 ) -> MicrosoftAppCredentials:
151 if not app_id:
152 return MicrosoftAppCredentials(None, None)
153
154 cache_key = f"{app_id}{oauth_scope}"
155 app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)
156
157 if app_credentials:
158 return app_credentials
159
160 app_password = await self._credential_provider.get_app_password(app_id)
161 app_credentials = MicrosoftAppCredentials(
162 app_id, app_password, oauth_scope=oauth_scope
163 )
164 if self._channel_provider and self._channel_provider.is_government():
165 app_credentials.oauth_endpoint = (
166 GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL
167 )
168 app_credentials.oauth_scope = (
169 GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
170 )
171
172 BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials
173 return app_credentials
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py
--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py
+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py
@@ -115,8 +115,7 @@
data = (await resp.read()).decode()
content = json.loads(data) if data else None
- if content:
- return InvokeResponse(status=resp.status, body=content)
+ return InvokeResponse(status=resp.status, body=content)
finally:
# Restore activity properties.
| {"golden_diff": "diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n@@ -115,8 +115,7 @@\n data = (await resp.read()).decode()\n content = json.loads(data) if data else None\n \n- if content:\n- return InvokeResponse(status=resp.status, body=content)\n+ return InvokeResponse(status=resp.status, body=content)\n \n finally:\n # Restore activity properties.\n", "issue": "SkillDialog not working for non-'expected replies' scenario\n## Version\r\n4.8.0\r\n\r\n## Describe the bug\r\nSkillDialog won't work out of the box for non expected-replies scenarios.\r\n\r\n## To Reproduce\r\nSteps to reproduce the behavior:\r\n1. Set up a root bot using skill dialog and a skill bot\r\n2. Run both bots and initiate the SkillDialog\r\n3. When the skill first comes back to the parent an error like the following should arise:\r\n```\r\nFile \"..path-to-botbuilder/botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py\", line 213, in _send_to_skill\r\n if not 200 <= response.status <= 299:\r\nAttributeError: 'NoneType' object has no attribute 'status'\r\n```\r\n\r\n## Expected behavior\r\nThe response get back to the parent without any problems\r\n\r\n## Workaround\r\nIf the skill bot is modified to always send some content in every successful response at the route handler level, the scenario should work.\r\n\r\nExample on how to do this for an aiohttp skill bot:\r\n```python\r\n#This is how a typical message handler method could look like\r\nasync def messages(req: Request) -> Response:\r\n # Main bot message handler.\r\n if \"application/json\" in req.headers[\"Content-Type\"]:\r\n body = await req.json()\r\n else:\r\n return Response(status=415)\r\n\r\n activity = Activity().deserialize(body)\r\n auth_header = req.headers[\"Authorization\"] if \"Authorization\" in req.headers else \"\"\r\n\r\n response = await ADAPTER.process_activity(activity, auth_header, BOT.on_turn)\r\n if response:\r\n return json_response(data=response.body, status=response.status)\r\n # THE FIX IS IN THE LINE BELOW\r\n return Response(status=201, body='{\"foo\":\"bar\"}'.encode(\"utf-8\"))\r\n```\r\n\r\n**Alternative Workaround:** use expected replies as delivery method in the parent bot (SkillDialog).\r\n\r\n[bug]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# pylint: disable=no-member\n\nimport json\nfrom typing import Dict\nfrom logging import Logger\n\nimport aiohttp\nfrom botbuilder.core import InvokeResponse\nfrom botbuilder.core.skills import BotFrameworkClient\nfrom botbuilder.schema import (\n Activity,\n ExpectedReplies,\n ConversationReference,\n ConversationAccount,\n)\nfrom botframework.connector.auth import (\n ChannelProvider,\n CredentialProvider,\n GovernmentConstants,\n MicrosoftAppCredentials,\n)\n\n\nclass BotFrameworkHttpClient(BotFrameworkClient):\n\n \"\"\"\n A skill host adapter implements API to forward activity to a skill and\n implements routing ChannelAPI calls from the Skill up through the bot/adapter.\n \"\"\"\n\n INVOKE_ACTIVITY_NAME = \"SkillEvents.ChannelApiInvoke\"\n _BOT_IDENTITY_KEY = \"BotIdentity\"\n _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}\n\n def __init__(\n self,\n credential_provider: CredentialProvider,\n channel_provider: ChannelProvider = None,\n logger: Logger = None,\n ):\n if not credential_provider:\n raise TypeError(\"credential_provider can't be None\")\n\n self._credential_provider = credential_provider\n self._channel_provider = channel_provider\n self._logger = logger\n self._session = aiohttp.ClientSession()\n\n async def post_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> InvokeResponse:\n app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)\n\n if not app_credentials:\n raise KeyError(\"Unable to get appCredentials to connect to the skill\")\n\n # Get token for the skill call\n token = (\n app_credentials.get_access_token()\n if app_credentials.microsoft_app_id\n else None\n )\n\n # Capture current activity settings before changing them.\n # TODO: DO we need to set the activity ID? (events that are created manually don't have it).\n original_conversation_id = activity.conversation.id\n original_service_url = activity.service_url\n original_caller_id = activity.caller_id\n original_relates_to = activity.relates_to\n\n try:\n # TODO: The relato has to be ported to the adapter in the new integration library when\n # resolving conflicts in merge\n activity.relates_to = ConversationReference(\n service_url=activity.service_url,\n activity_id=activity.id,\n channel_id=activity.channel_id,\n conversation=ConversationAccount(\n id=activity.conversation.id,\n name=activity.conversation.name,\n conversation_type=activity.conversation.conversation_type,\n aad_object_id=activity.conversation.aad_object_id,\n is_group=activity.conversation.is_group,\n role=activity.conversation.role,\n tenant_id=activity.conversation.tenant_id,\n properties=activity.conversation.properties,\n ),\n bot=None,\n )\n activity.conversation.id = conversation_id\n activity.service_url = service_url\n activity.caller_id = f\"urn:botframework:aadappid:{from_bot_id}\"\n\n headers_dict = {\n \"Content-type\": \"application/json; charset=utf-8\",\n }\n if token:\n headers_dict.update(\n {\"Authorization\": f\"Bearer {token}\",}\n )\n\n json_content = json.dumps(activity.serialize())\n resp = await self._session.post(\n to_url, data=json_content.encode(\"utf-8\"), headers=headers_dict,\n )\n resp.raise_for_status()\n data = (await resp.read()).decode()\n content = json.loads(data) if data else None\n\n if content:\n return InvokeResponse(status=resp.status, body=content)\n\n finally:\n # Restore activity properties.\n activity.conversation.id = original_conversation_id\n activity.service_url = original_service_url\n activity.caller_id = original_caller_id\n activity.relates_to = original_relates_to\n\n async def post_buffered_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> [Activity]:\n \"\"\"\n Helper method to return a list of activities when an Activity is being\n sent with DeliveryMode == expectReplies.\n \"\"\"\n response = await self.post_activity(\n from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity\n )\n if not response or (response.status / 100) != 2:\n return []\n return ExpectedReplies().deserialize(response.body).activities\n\n async def _get_app_credentials(\n self, app_id: str, oauth_scope: str\n ) -> MicrosoftAppCredentials:\n if not app_id:\n return MicrosoftAppCredentials(None, None)\n\n cache_key = f\"{app_id}{oauth_scope}\"\n app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\n\n if app_credentials:\n return app_credentials\n\n app_password = await self._credential_provider.get_app_password(app_id)\n app_credentials = MicrosoftAppCredentials(\n app_id, app_password, oauth_scope=oauth_scope\n )\n if self._channel_provider and self._channel_provider.is_government():\n app_credentials.oauth_endpoint = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\n )\n app_credentials.oauth_scope = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n )\n\n BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\n return app_credentials\n", "path": "libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# pylint: disable=no-member\n\nimport json\nfrom typing import Dict\nfrom logging import Logger\n\nimport aiohttp\nfrom botbuilder.core import InvokeResponse\nfrom botbuilder.core.skills import BotFrameworkClient\nfrom botbuilder.schema import (\n Activity,\n ExpectedReplies,\n ConversationReference,\n ConversationAccount,\n)\nfrom botframework.connector.auth import (\n ChannelProvider,\n CredentialProvider,\n GovernmentConstants,\n MicrosoftAppCredentials,\n)\n\n\nclass BotFrameworkHttpClient(BotFrameworkClient):\n\n \"\"\"\n A skill host adapter implements API to forward activity to a skill and\n implements routing ChannelAPI calls from the Skill up through the bot/adapter.\n \"\"\"\n\n INVOKE_ACTIVITY_NAME = \"SkillEvents.ChannelApiInvoke\"\n _BOT_IDENTITY_KEY = \"BotIdentity\"\n _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}\n\n def __init__(\n self,\n credential_provider: CredentialProvider,\n channel_provider: ChannelProvider = None,\n logger: Logger = None,\n ):\n if not credential_provider:\n raise TypeError(\"credential_provider can't be None\")\n\n self._credential_provider = credential_provider\n self._channel_provider = channel_provider\n self._logger = logger\n self._session = aiohttp.ClientSession()\n\n async def post_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> InvokeResponse:\n app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)\n\n if not app_credentials:\n raise KeyError(\"Unable to get appCredentials to connect to the skill\")\n\n # Get token for the skill call\n token = (\n app_credentials.get_access_token()\n if app_credentials.microsoft_app_id\n else None\n )\n\n # Capture current activity settings before changing them.\n # TODO: DO we need to set the activity ID? (events that are created manually don't have it).\n original_conversation_id = activity.conversation.id\n original_service_url = activity.service_url\n original_caller_id = activity.caller_id\n original_relates_to = activity.relates_to\n\n try:\n # TODO: The relato has to be ported to the adapter in the new integration library when\n # resolving conflicts in merge\n activity.relates_to = ConversationReference(\n service_url=activity.service_url,\n activity_id=activity.id,\n channel_id=activity.channel_id,\n conversation=ConversationAccount(\n id=activity.conversation.id,\n name=activity.conversation.name,\n conversation_type=activity.conversation.conversation_type,\n aad_object_id=activity.conversation.aad_object_id,\n is_group=activity.conversation.is_group,\n role=activity.conversation.role,\n tenant_id=activity.conversation.tenant_id,\n properties=activity.conversation.properties,\n ),\n bot=None,\n )\n activity.conversation.id = conversation_id\n activity.service_url = service_url\n activity.caller_id = f\"urn:botframework:aadappid:{from_bot_id}\"\n\n headers_dict = {\n \"Content-type\": \"application/json; charset=utf-8\",\n }\n if token:\n headers_dict.update(\n {\"Authorization\": f\"Bearer {token}\",}\n )\n\n json_content = json.dumps(activity.serialize())\n resp = await self._session.post(\n to_url, data=json_content.encode(\"utf-8\"), headers=headers_dict,\n )\n resp.raise_for_status()\n data = (await resp.read()).decode()\n content = json.loads(data) if data else None\n\n return InvokeResponse(status=resp.status, body=content)\n\n finally:\n # Restore activity properties.\n activity.conversation.id = original_conversation_id\n activity.service_url = original_service_url\n activity.caller_id = original_caller_id\n activity.relates_to = original_relates_to\n\n async def post_buffered_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> [Activity]:\n \"\"\"\n Helper method to return a list of activities when an Activity is being\n sent with DeliveryMode == expectReplies.\n \"\"\"\n response = await self.post_activity(\n from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity\n )\n if not response or (response.status / 100) != 2:\n return []\n return ExpectedReplies().deserialize(response.body).activities\n\n async def _get_app_credentials(\n self, app_id: str, oauth_scope: str\n ) -> MicrosoftAppCredentials:\n if not app_id:\n return MicrosoftAppCredentials(None, None)\n\n cache_key = f\"{app_id}{oauth_scope}\"\n app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\n\n if app_credentials:\n return app_credentials\n\n app_password = await self._credential_provider.get_app_password(app_id)\n app_credentials = MicrosoftAppCredentials(\n app_id, app_password, oauth_scope=oauth_scope\n )\n if self._channel_provider and self._channel_provider.is_government():\n app_credentials.oauth_endpoint = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\n )\n app_credentials.oauth_scope = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n )\n\n BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\n return app_credentials\n", "path": "libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py"}]} | 2,326 | 187 |
gh_patches_debug_8973 | rasdani/github-patches | git_diff | spesmilo__electrum-2164 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HTTPS cert expired on LabelSync server
The cert on https://sync.bytesized-hosting.com:9090/ has expired and the LabelSync plugin stopped working
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/labels/labels.py`
Content:
```
1 import hashlib
2 import requests
3 import threading
4 import json
5 import sys
6 import traceback
7
8 import aes
9 import base64
10
11 import electrum
12 from electrum.plugins import BasePlugin, hook
13 from electrum.i18n import _
14
15
16
17
18 class LabelsPlugin(BasePlugin):
19
20 def __init__(self, parent, config, name):
21 BasePlugin.__init__(self, parent, config, name)
22 self.target_host = 'sync.bytesized-hosting.com:9090'
23 self.wallets = {}
24
25 def encode(self, wallet, msg):
26 password, iv, wallet_id = self.wallets[wallet]
27 encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,
28 msg.encode('utf8'))
29 return base64.b64encode(encrypted)
30
31 def decode(self, wallet, message):
32 password, iv, wallet_id = self.wallets[wallet]
33 decoded = base64.b64decode(message)
34 decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)
35 return decrypted.decode('utf8')
36
37 def get_nonce(self, wallet):
38 # nonce is the nonce to be used with the next change
39 nonce = wallet.storage.get('wallet_nonce')
40 if nonce is None:
41 nonce = 1
42 self.set_nonce(wallet, nonce)
43 return nonce
44
45 def set_nonce(self, wallet, nonce):
46 self.print_error("set", wallet.basename(), "nonce to", nonce)
47 wallet.storage.put("wallet_nonce", nonce)
48
49 @hook
50 def set_label(self, wallet, item, label):
51 if not wallet in self.wallets:
52 return
53 nonce = self.get_nonce(wallet)
54 wallet_id = self.wallets[wallet][2]
55 bundle = {"walletId": wallet_id,
56 "walletNonce": nonce,
57 "externalId": self.encode(wallet, item),
58 "encryptedLabel": self.encode(wallet, label)}
59 t = threading.Thread(target=self.do_request,
60 args=["POST", "/label", False, bundle])
61 t.setDaemon(True)
62 t.start()
63 # Caller will write the wallet
64 self.set_nonce(wallet, nonce + 1)
65
66 def do_request(self, method, url = "/labels", is_batch=False, data=None):
67 url = 'https://' + self.target_host + url
68 kwargs = {'headers': {}}
69 if method == 'GET' and data:
70 kwargs['params'] = data
71 elif method == 'POST' and data:
72 kwargs['data'] = json.dumps(data)
73 kwargs['headers']['Content-Type'] = 'application/json'
74 response = requests.request(method, url, **kwargs)
75 if response.status_code != 200:
76 raise BaseException(response.status_code, response.text)
77 response = response.json()
78 if "error" in response:
79 raise BaseException(response["error"])
80 return response
81
82 def push_thread(self, wallet):
83 wallet_id = self.wallets[wallet][2]
84 bundle = {"labels": [],
85 "walletId": wallet_id,
86 "walletNonce": self.get_nonce(wallet)}
87 for key, value in wallet.labels.iteritems():
88 try:
89 encoded_key = self.encode(wallet, key)
90 encoded_value = self.encode(wallet, value)
91 except:
92 self.print_error('cannot encode', repr(key), repr(value))
93 continue
94 bundle["labels"].append({'encryptedLabel': encoded_value,
95 'externalId': encoded_key})
96 self.do_request("POST", "/labels", True, bundle)
97
98 def pull_thread(self, wallet, force):
99 wallet_id = self.wallets[wallet][2]
100 nonce = 1 if force else self.get_nonce(wallet) - 1
101 self.print_error("asking for labels since nonce", nonce)
102 try:
103 response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
104 if response["labels"] is None:
105 self.print_error('no new labels')
106 return
107 result = {}
108 for label in response["labels"]:
109 try:
110 key = self.decode(wallet, label["externalId"])
111 value = self.decode(wallet, label["encryptedLabel"])
112 except:
113 continue
114 try:
115 json.dumps(key)
116 json.dumps(value)
117 except:
118 self.print_error('error: no json', key)
119 continue
120 result[key] = value
121
122 for key, value in result.items():
123 if force or not wallet.labels.get(key):
124 wallet.labels[key] = value
125
126 self.print_error("received %d labels" % len(response))
127 # do not write to disk because we're in a daemon thread
128 wallet.storage.put('labels', wallet.labels)
129 self.set_nonce(wallet, response["nonce"] + 1)
130 self.on_pulled(wallet)
131
132 except Exception as e:
133 traceback.print_exc(file=sys.stderr)
134 self.print_error("could not retrieve labels")
135
136 def start_wallet(self, wallet):
137 nonce = self.get_nonce(wallet)
138 self.print_error("wallet", wallet.basename(), "nonce is", nonce)
139 mpk = wallet.get_fingerprint()
140 if not mpk:
141 return
142 password = hashlib.sha1(mpk).digest().encode('hex')[:32]
143 iv = hashlib.sha256(password).digest()[:16]
144 wallet_id = hashlib.sha256(mpk).digest().encode('hex')
145 self.wallets[wallet] = (password, iv, wallet_id)
146 # If there is an auth token we can try to actually start syncing
147 t = threading.Thread(target=self.pull_thread, args=(wallet, False))
148 t.setDaemon(True)
149 t.start()
150
151 def stop_wallet(self, wallet):
152 self.wallets.pop(wallet, None)
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/labels/labels.py b/plugins/labels/labels.py
--- a/plugins/labels/labels.py
+++ b/plugins/labels/labels.py
@@ -5,7 +5,6 @@
import sys
import traceback
-import aes
import base64
import electrum
@@ -19,7 +18,7 @@
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
- self.target_host = 'sync.bytesized-hosting.com:9090'
+ self.target_host = 'labels.bauerj.eu'
self.wallets = {}
def encode(self, wallet, msg):
| {"golden_diff": "diff --git a/plugins/labels/labels.py b/plugins/labels/labels.py\n--- a/plugins/labels/labels.py\n+++ b/plugins/labels/labels.py\n@@ -5,7 +5,6 @@\n import sys\n import traceback\n \n-import aes\n import base64\n \n import electrum\n@@ -19,7 +18,7 @@\n \n def __init__(self, parent, config, name):\n BasePlugin.__init__(self, parent, config, name)\n- self.target_host = 'sync.bytesized-hosting.com:9090'\n+ self.target_host = 'labels.bauerj.eu'\n self.wallets = {}\n \n def encode(self, wallet, msg):\n", "issue": "HTTPS cert expired on LabelSync server\nThe cert on https://sync.bytesized-hosting.com:9090/ has expired and the LabelSync plugin stopped working \n", "before_files": [{"content": "import hashlib\nimport requests\nimport threading\nimport json\nimport sys\nimport traceback\n\nimport aes\nimport base64\n\nimport electrum\nfrom electrum.plugins import BasePlugin, hook\nfrom electrum.i18n import _\n\n\n\n\nclass LabelsPlugin(BasePlugin):\n\n def __init__(self, parent, config, name):\n BasePlugin.__init__(self, parent, config, name)\n self.target_host = 'sync.bytesized-hosting.com:9090'\n self.wallets = {}\n\n def encode(self, wallet, msg):\n password, iv, wallet_id = self.wallets[wallet]\n encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,\n msg.encode('utf8'))\n return base64.b64encode(encrypted)\n\n def decode(self, wallet, message):\n password, iv, wallet_id = self.wallets[wallet]\n decoded = base64.b64decode(message)\n decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)\n return decrypted.decode('utf8')\n\n def get_nonce(self, wallet):\n # nonce is the nonce to be used with the next change\n nonce = wallet.storage.get('wallet_nonce')\n if nonce is None:\n nonce = 1\n self.set_nonce(wallet, nonce)\n return nonce\n\n def set_nonce(self, wallet, nonce):\n self.print_error(\"set\", wallet.basename(), \"nonce to\", nonce)\n wallet.storage.put(\"wallet_nonce\", nonce)\n\n @hook\n def set_label(self, wallet, item, label):\n if not wallet in self.wallets:\n return\n nonce = self.get_nonce(wallet)\n wallet_id = self.wallets[wallet][2]\n bundle = {\"walletId\": wallet_id,\n \"walletNonce\": nonce,\n \"externalId\": self.encode(wallet, item),\n \"encryptedLabel\": self.encode(wallet, label)}\n t = threading.Thread(target=self.do_request,\n args=[\"POST\", \"/label\", False, bundle])\n t.setDaemon(True)\n t.start()\n # Caller will write the wallet\n self.set_nonce(wallet, nonce + 1)\n\n def do_request(self, method, url = \"/labels\", is_batch=False, data=None):\n url = 'https://' + self.target_host + url\n kwargs = {'headers': {}}\n if method == 'GET' and data:\n kwargs['params'] = data\n elif method == 'POST' and data:\n kwargs['data'] = json.dumps(data)\n kwargs['headers']['Content-Type'] = 'application/json'\n response = requests.request(method, url, **kwargs)\n if response.status_code != 200:\n raise BaseException(response.status_code, response.text)\n response = response.json()\n if \"error\" in response:\n raise BaseException(response[\"error\"])\n return response\n\n def push_thread(self, wallet):\n wallet_id = self.wallets[wallet][2]\n bundle = {\"labels\": [],\n \"walletId\": wallet_id,\n \"walletNonce\": self.get_nonce(wallet)}\n for key, value in wallet.labels.iteritems():\n try:\n encoded_key = self.encode(wallet, key)\n encoded_value = self.encode(wallet, value)\n except:\n self.print_error('cannot encode', repr(key), repr(value))\n continue\n bundle[\"labels\"].append({'encryptedLabel': encoded_value,\n 'externalId': encoded_key})\n self.do_request(\"POST\", \"/labels\", True, bundle)\n\n def pull_thread(self, wallet, force):\n wallet_id = self.wallets[wallet][2]\n nonce = 1 if force else self.get_nonce(wallet) - 1\n self.print_error(\"asking for labels since nonce\", nonce)\n try:\n response = self.do_request(\"GET\", (\"/labels/since/%d/for/%s\" % (nonce, wallet_id) ))\n if response[\"labels\"] is None:\n self.print_error('no new labels')\n return\n result = {}\n for label in response[\"labels\"]:\n try:\n key = self.decode(wallet, label[\"externalId\"])\n value = self.decode(wallet, label[\"encryptedLabel\"])\n except:\n continue\n try:\n json.dumps(key)\n json.dumps(value)\n except:\n self.print_error('error: no json', key)\n continue\n result[key] = value\n\n for key, value in result.items():\n if force or not wallet.labels.get(key):\n wallet.labels[key] = value\n\n self.print_error(\"received %d labels\" % len(response))\n # do not write to disk because we're in a daemon thread\n wallet.storage.put('labels', wallet.labels)\n self.set_nonce(wallet, response[\"nonce\"] + 1)\n self.on_pulled(wallet)\n\n except Exception as e:\n traceback.print_exc(file=sys.stderr)\n self.print_error(\"could not retrieve labels\")\n\n def start_wallet(self, wallet):\n nonce = self.get_nonce(wallet)\n self.print_error(\"wallet\", wallet.basename(), \"nonce is\", nonce)\n mpk = wallet.get_fingerprint()\n if not mpk:\n return\n password = hashlib.sha1(mpk).digest().encode('hex')[:32]\n iv = hashlib.sha256(password).digest()[:16]\n wallet_id = hashlib.sha256(mpk).digest().encode('hex')\n self.wallets[wallet] = (password, iv, wallet_id)\n # If there is an auth token we can try to actually start syncing\n t = threading.Thread(target=self.pull_thread, args=(wallet, False))\n t.setDaemon(True)\n t.start()\n\n def stop_wallet(self, wallet):\n self.wallets.pop(wallet, None)\n", "path": "plugins/labels/labels.py"}], "after_files": [{"content": "import hashlib\nimport requests\nimport threading\nimport json\nimport sys\nimport traceback\n\nimport base64\n\nimport electrum\nfrom electrum.plugins import BasePlugin, hook\nfrom electrum.i18n import _\n\n\n\n\nclass LabelsPlugin(BasePlugin):\n\n def __init__(self, parent, config, name):\n BasePlugin.__init__(self, parent, config, name)\n self.target_host = 'labels.bauerj.eu'\n self.wallets = {}\n\n def encode(self, wallet, msg):\n password, iv, wallet_id = self.wallets[wallet]\n encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,\n msg.encode('utf8'))\n return base64.b64encode(encrypted)\n\n def decode(self, wallet, message):\n password, iv, wallet_id = self.wallets[wallet]\n decoded = base64.b64decode(message)\n decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)\n return decrypted.decode('utf8')\n\n def get_nonce(self, wallet):\n # nonce is the nonce to be used with the next change\n nonce = wallet.storage.get('wallet_nonce')\n if nonce is None:\n nonce = 1\n self.set_nonce(wallet, nonce)\n return nonce\n\n def set_nonce(self, wallet, nonce):\n self.print_error(\"set\", wallet.basename(), \"nonce to\", nonce)\n wallet.storage.put(\"wallet_nonce\", nonce)\n\n @hook\n def set_label(self, wallet, item, label):\n if not wallet in self.wallets:\n return\n nonce = self.get_nonce(wallet)\n wallet_id = self.wallets[wallet][2]\n bundle = {\"walletId\": wallet_id,\n \"walletNonce\": nonce,\n \"externalId\": self.encode(wallet, item),\n \"encryptedLabel\": self.encode(wallet, label)}\n t = threading.Thread(target=self.do_request,\n args=[\"POST\", \"/label\", False, bundle])\n t.setDaemon(True)\n t.start()\n # Caller will write the wallet\n self.set_nonce(wallet, nonce + 1)\n\n def do_request(self, method, url = \"/labels\", is_batch=False, data=None):\n url = 'https://' + self.target_host + url\n kwargs = {'headers': {}}\n if method == 'GET' and data:\n kwargs['params'] = data\n elif method == 'POST' and data:\n kwargs['data'] = json.dumps(data)\n kwargs['headers']['Content-Type'] = 'application/json'\n response = requests.request(method, url, **kwargs)\n if response.status_code != 200:\n raise BaseException(response.status_code, response.text)\n response = response.json()\n if \"error\" in response:\n raise BaseException(response[\"error\"])\n return response\n\n def push_thread(self, wallet):\n wallet_id = self.wallets[wallet][2]\n bundle = {\"labels\": [],\n \"walletId\": wallet_id,\n \"walletNonce\": self.get_nonce(wallet)}\n for key, value in wallet.labels.iteritems():\n try:\n encoded_key = self.encode(wallet, key)\n encoded_value = self.encode(wallet, value)\n except:\n self.print_error('cannot encode', repr(key), repr(value))\n continue\n bundle[\"labels\"].append({'encryptedLabel': encoded_value,\n 'externalId': encoded_key})\n self.do_request(\"POST\", \"/labels\", True, bundle)\n\n def pull_thread(self, wallet, force):\n wallet_id = self.wallets[wallet][2]\n nonce = 1 if force else self.get_nonce(wallet) - 1\n self.print_error(\"asking for labels since nonce\", nonce)\n try:\n response = self.do_request(\"GET\", (\"/labels/since/%d/for/%s\" % (nonce, wallet_id) ))\n if response[\"labels\"] is None:\n self.print_error('no new labels')\n return\n result = {}\n for label in response[\"labels\"]:\n try:\n key = self.decode(wallet, label[\"externalId\"])\n value = self.decode(wallet, label[\"encryptedLabel\"])\n except:\n continue\n try:\n json.dumps(key)\n json.dumps(value)\n except:\n self.print_error('error: no json', key)\n continue\n result[key] = value\n\n for key, value in result.items():\n if force or not wallet.labels.get(key):\n wallet.labels[key] = value\n\n self.print_error(\"received %d labels\" % len(response))\n # do not write to disk because we're in a daemon thread\n wallet.storage.put('labels', wallet.labels)\n self.set_nonce(wallet, response[\"nonce\"] + 1)\n self.on_pulled(wallet)\n\n except Exception as e:\n traceback.print_exc(file=sys.stderr)\n self.print_error(\"could not retrieve labels\")\n\n def start_wallet(self, wallet):\n nonce = self.get_nonce(wallet)\n self.print_error(\"wallet\", wallet.basename(), \"nonce is\", nonce)\n mpk = wallet.get_fingerprint()\n if not mpk:\n return\n password = hashlib.sha1(mpk).digest().encode('hex')[:32]\n iv = hashlib.sha256(password).digest()[:16]\n wallet_id = hashlib.sha256(mpk).digest().encode('hex')\n self.wallets[wallet] = (password, iv, wallet_id)\n # If there is an auth token we can try to actually start syncing\n t = threading.Thread(target=self.pull_thread, args=(wallet, False))\n t.setDaemon(True)\n t.start()\n\n def stop_wallet(self, wallet):\n self.wallets.pop(wallet, None)\n", "path": "plugins/labels/labels.py"}]} | 1,882 | 152 |
gh_patches_debug_29856 | rasdani/github-patches | git_diff | mesonbuild__meson-9174 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Get the i18n targets inside the meson files
I need to generate a file from the i18n *.gmo files. This `custom_target` needs to depend on the `<project_id>-gmo` target documented [here](https://mesonbuild.com/i18n-module.html).
This target should be retrievable through a method like :
```
gettext_targets = i18n.gettext('mypackage')
custom_target(
depends: gettext_targets.target_gmo()
)
```
For now, I think the only way to achieve that is something like that :
```
ninja_gmo = custom_target(command: [ 'ninja', '-C' meson.build_root(), 'mypackage-gmo'])
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesonbuild/modules/i18n.py`
Content:
```
1 # Copyright 2016 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import shutil
16
17 from os import path
18 from .. import coredata, mesonlib, build, mlog
19 from ..mesonlib import MesonException
20 from ..scripts.gettext import read_linguas
21 from . import ModuleReturnValue
22 from . import ExtensionModule
23 from ..interpreterbase import permittedKwargs, FeatureNew, FeatureNewKwargs
24
25 PRESET_ARGS = {
26 'glib': [
27 '--from-code=UTF-8',
28 '--add-comments',
29
30 # https://developer.gnome.org/glib/stable/glib-I18N.html
31 '--keyword=_',
32 '--keyword=N_',
33 '--keyword=C_:1c,2',
34 '--keyword=NC_:1c,2',
35 '--keyword=g_dcgettext:2',
36 '--keyword=g_dngettext:2,3',
37 '--keyword=g_dpgettext2:2c,3',
38
39 '--flag=N_:1:pass-c-format',
40 '--flag=C_:2:pass-c-format',
41 '--flag=NC_:2:pass-c-format',
42 '--flag=g_dngettext:2:pass-c-format',
43 '--flag=g_strdup_printf:1:c-format',
44 '--flag=g_string_printf:2:c-format',
45 '--flag=g_string_append_printf:2:c-format',
46 '--flag=g_error_new:3:c-format',
47 '--flag=g_set_error:4:c-format',
48 '--flag=g_markup_printf_escaped:1:c-format',
49 '--flag=g_log:3:c-format',
50 '--flag=g_print:1:c-format',
51 '--flag=g_printerr:1:c-format',
52 '--flag=g_printf:1:c-format',
53 '--flag=g_fprintf:2:c-format',
54 '--flag=g_sprintf:2:c-format',
55 '--flag=g_snprintf:3:c-format',
56 ]
57 }
58
59
60 class I18nModule(ExtensionModule):
61 def __init__(self, interpreter):
62 super().__init__(interpreter)
63 self.methods.update({
64 'merge_file': self.merge_file,
65 'gettext': self.gettext,
66 })
67
68 @staticmethod
69 def nogettext_warning():
70 mlog.warning('Gettext not found, all translation targets will be ignored.', once=True)
71
72 @staticmethod
73 def _get_data_dirs(state, dirs):
74 """Returns source directories of relative paths"""
75 src_dir = path.join(state.environment.get_source_dir(), state.subdir)
76 return [path.join(src_dir, d) for d in dirs]
77
78 @FeatureNew('i18n.merge_file', '0.37.0')
79 @FeatureNewKwargs('i18n.merge_file', '0.51.0', ['args'])
80 @permittedKwargs(build.CustomTarget.known_kwargs | {'data_dirs', 'po_dir', 'type', 'args'})
81 def merge_file(self, state, args, kwargs):
82 if not shutil.which('xgettext'):
83 self.nogettext_warning()
84 return
85 podir = kwargs.pop('po_dir', None)
86 if not podir:
87 raise MesonException('i18n: po_dir is a required kwarg')
88 podir = path.join(state.build_to_src, state.subdir, podir)
89
90 file_type = kwargs.pop('type', 'xml')
91 VALID_TYPES = ('xml', 'desktop')
92 if file_type not in VALID_TYPES:
93 raise MesonException(f'i18n: "{file_type}" is not a valid type {VALID_TYPES}')
94
95 datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.pop('data_dirs', [])))
96 datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None
97
98 command = state.environment.get_build_command() + [
99 '--internal', 'msgfmthelper',
100 '@INPUT@', '@OUTPUT@', file_type, podir
101 ]
102 if datadirs:
103 command.append(datadirs)
104
105 if 'args' in kwargs:
106 command.append('--')
107 command.append(mesonlib.stringlistify(kwargs.pop('args', [])))
108
109 kwargs['command'] = command
110
111 # We only use this input file to create a name of the custom target.
112 # Thus we can ignore the other entries.
113 inputfile = mesonlib.extract_as_list(kwargs, 'input')[0]
114 if isinstance(inputfile, str):
115 inputfile = mesonlib.File.from_source_file(state.environment.source_dir,
116 state.subdir, inputfile)
117 if isinstance(inputfile, mesonlib.File):
118 # output could be '@BASENAME@' in which case we need to do substitutions
119 # to get a unique target name.
120 output = kwargs['output']
121 ifile_abs = inputfile.absolute_path(state.environment.source_dir,
122 state.environment.build_dir)
123 values = mesonlib.get_filenames_templates_dict([ifile_abs], None)
124 outputs = mesonlib.substitute_values([output], values)
125 output = outputs[0]
126 ct = build.CustomTarget(output + '_' + state.subdir.replace('/', '@').replace('\\', '@') + '_merge', state.subdir, state.subproject, kwargs)
127 else:
128 ct = build.CustomTarget(kwargs['output'] + '_merge', state.subdir, state.subproject, kwargs)
129
130 return ModuleReturnValue(ct, [ct])
131
132 @FeatureNewKwargs('i18n.gettext', '0.37.0', ['preset'])
133 @FeatureNewKwargs('i18n.gettext', '0.50.0', ['install_dir'])
134 @permittedKwargs({'po_dir', 'data_dirs', 'type', 'languages', 'args', 'preset', 'install', 'install_dir'})
135 def gettext(self, state, args, kwargs):
136 if len(args) != 1:
137 raise coredata.MesonException('Gettext requires one positional argument (package name).')
138 if not shutil.which('xgettext'):
139 self.nogettext_warning()
140 return
141 packagename = args[0]
142 languages = mesonlib.stringlistify(kwargs.get('languages', []))
143 datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))
144 extra_args = mesonlib.stringlistify(kwargs.get('args', []))
145 targets = []
146
147 preset = kwargs.pop('preset', None)
148 if preset:
149 preset_args = PRESET_ARGS.get(preset)
150 if not preset_args:
151 raise coredata.MesonException('i18n: Preset "{}" is not one of the valid options: {}'.format(
152 preset, list(PRESET_ARGS.keys())))
153 extra_args = set(preset_args + extra_args)
154
155 pkg_arg = '--pkgname=' + packagename
156 lang_arg = '--langs=' + '@@'.join(languages) if languages else None
157 datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None
158 extra_args = '--extra-args=' + '@@'.join(extra_args) if extra_args else None
159
160 potargs = state.environment.get_build_command() + ['--internal', 'gettext', 'pot', pkg_arg]
161 if datadirs:
162 potargs.append(datadirs)
163 if extra_args:
164 potargs.append(extra_args)
165 pottarget = build.RunTarget(packagename + '-pot', potargs, [], state.subdir, state.subproject)
166 targets.append(pottarget)
167
168 install = kwargs.get('install', True)
169 install_dir = kwargs.get('install_dir', state.environment.coredata.get_option(mesonlib.OptionKey('localedir')))
170 if not languages:
171 languages = read_linguas(path.join(state.environment.source_dir, state.subdir))
172 for l in languages:
173 po_file = mesonlib.File.from_source_file(state.environment.source_dir,
174 state.subdir, l+'.po')
175 gmo_kwargs = {'command': ['msgfmt', '@INPUT@', '-o', '@OUTPUT@'],
176 'input': po_file,
177 'output': packagename+'.mo',
178 'install': install,
179 # We have multiple files all installed as packagename+'.mo' in different install subdirs.
180 # What we really wanted to do, probably, is have a rename: kwarg, but that's not available
181 # to custom_targets. Crude hack: set the build target's subdir manually.
182 # Bonus: the build tree has something usable as an uninstalled bindtextdomain() target dir.
183 'install_dir': path.join(install_dir, l, 'LC_MESSAGES'),
184 'install_tag': 'i18n',
185 }
186 gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs)
187 targets.append(gmotarget)
188
189 updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]
190 if lang_arg:
191 updatepoargs.append(lang_arg)
192 if datadirs:
193 updatepoargs.append(datadirs)
194 if extra_args:
195 updatepoargs.append(extra_args)
196 updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)
197 targets.append(updatepotarget)
198
199 return ModuleReturnValue(None, targets)
200
201 def initialize(*args, **kwargs):
202 return I18nModule(*args, **kwargs)
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mesonbuild/modules/i18n.py b/mesonbuild/modules/i18n.py
--- a/mesonbuild/modules/i18n.py
+++ b/mesonbuild/modules/i18n.py
@@ -143,6 +143,7 @@
datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))
extra_args = mesonlib.stringlistify(kwargs.get('args', []))
targets = []
+ gmotargets = []
preset = kwargs.pop('preset', None)
if preset:
@@ -185,6 +186,10 @@
}
gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs)
targets.append(gmotarget)
+ gmotargets.append(gmotarget)
+
+ allgmotarget = build.AliasTarget(packagename + '-gmo', gmotargets, state.subdir, state.subproject)
+ targets.append(allgmotarget)
updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]
if lang_arg:
@@ -196,7 +201,7 @@
updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)
targets.append(updatepotarget)
- return ModuleReturnValue(None, targets)
+ return ModuleReturnValue([gmotargets, pottarget, updatepotarget], targets)
def initialize(*args, **kwargs):
return I18nModule(*args, **kwargs)
| {"golden_diff": "diff --git a/mesonbuild/modules/i18n.py b/mesonbuild/modules/i18n.py\n--- a/mesonbuild/modules/i18n.py\n+++ b/mesonbuild/modules/i18n.py\n@@ -143,6 +143,7 @@\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n targets = []\n+ gmotargets = []\n \n preset = kwargs.pop('preset', None)\n if preset:\n@@ -185,6 +186,10 @@\n }\n gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs)\n targets.append(gmotarget)\n+ gmotargets.append(gmotarget)\n+\n+ allgmotarget = build.AliasTarget(packagename + '-gmo', gmotargets, state.subdir, state.subproject)\n+ targets.append(allgmotarget)\n \n updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]\n if lang_arg:\n@@ -196,7 +201,7 @@\n updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)\n targets.append(updatepotarget)\n \n- return ModuleReturnValue(None, targets)\n+ return ModuleReturnValue([gmotargets, pottarget, updatepotarget], targets)\n \n def initialize(*args, **kwargs):\n return I18nModule(*args, **kwargs)\n", "issue": "Get the i18n targets inside the meson files\nI need to generate a file from the i18n *.gmo files. This `custom_target` needs to depend on the `<project_id>-gmo` target documented [here](https://mesonbuild.com/i18n-module.html).\r\n\r\nThis target should be retrievable through a method like :\r\n```\r\ngettext_targets = i18n.gettext('mypackage')\r\ncustom_target(\r\n depends: gettext_targets.target_gmo()\r\n)\r\n```\r\n\r\nFor now, I think the only way to achieve that is something like that :\r\n```\r\nninja_gmo = custom_target(command:\u00a0[ 'ninja', '-C' meson.build_root(), 'mypackage-gmo'])\r\n```\n", "before_files": [{"content": "# Copyright 2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport shutil\n\nfrom os import path\nfrom .. import coredata, mesonlib, build, mlog\nfrom ..mesonlib import MesonException\nfrom ..scripts.gettext import read_linguas\nfrom . import ModuleReturnValue\nfrom . import ExtensionModule\nfrom ..interpreterbase import permittedKwargs, FeatureNew, FeatureNewKwargs\n\nPRESET_ARGS = {\n 'glib': [\n '--from-code=UTF-8',\n '--add-comments',\n\n # https://developer.gnome.org/glib/stable/glib-I18N.html\n '--keyword=_',\n '--keyword=N_',\n '--keyword=C_:1c,2',\n '--keyword=NC_:1c,2',\n '--keyword=g_dcgettext:2',\n '--keyword=g_dngettext:2,3',\n '--keyword=g_dpgettext2:2c,3',\n\n '--flag=N_:1:pass-c-format',\n '--flag=C_:2:pass-c-format',\n '--flag=NC_:2:pass-c-format',\n '--flag=g_dngettext:2:pass-c-format',\n '--flag=g_strdup_printf:1:c-format',\n '--flag=g_string_printf:2:c-format',\n '--flag=g_string_append_printf:2:c-format',\n '--flag=g_error_new:3:c-format',\n '--flag=g_set_error:4:c-format',\n '--flag=g_markup_printf_escaped:1:c-format',\n '--flag=g_log:3:c-format',\n '--flag=g_print:1:c-format',\n '--flag=g_printerr:1:c-format',\n '--flag=g_printf:1:c-format',\n '--flag=g_fprintf:2:c-format',\n '--flag=g_sprintf:2:c-format',\n '--flag=g_snprintf:3:c-format',\n ]\n}\n\n\nclass I18nModule(ExtensionModule):\n def __init__(self, interpreter):\n super().__init__(interpreter)\n self.methods.update({\n 'merge_file': self.merge_file,\n 'gettext': self.gettext,\n })\n\n @staticmethod\n def nogettext_warning():\n mlog.warning('Gettext not found, all translation targets will be ignored.', once=True)\n\n @staticmethod\n def _get_data_dirs(state, dirs):\n \"\"\"Returns source directories of relative paths\"\"\"\n src_dir = path.join(state.environment.get_source_dir(), state.subdir)\n return [path.join(src_dir, d) for d in dirs]\n\n @FeatureNew('i18n.merge_file', '0.37.0')\n @FeatureNewKwargs('i18n.merge_file', '0.51.0', ['args'])\n @permittedKwargs(build.CustomTarget.known_kwargs | {'data_dirs', 'po_dir', 'type', 'args'})\n def merge_file(self, state, args, kwargs):\n if not shutil.which('xgettext'):\n self.nogettext_warning()\n return\n podir = kwargs.pop('po_dir', None)\n if not podir:\n raise MesonException('i18n: po_dir is a required kwarg')\n podir = path.join(state.build_to_src, state.subdir, podir)\n\n file_type = kwargs.pop('type', 'xml')\n VALID_TYPES = ('xml', 'desktop')\n if file_type not in VALID_TYPES:\n raise MesonException(f'i18n: \"{file_type}\" is not a valid type {VALID_TYPES}')\n\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.pop('data_dirs', [])))\n datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\n\n command = state.environment.get_build_command() + [\n '--internal', 'msgfmthelper',\n '@INPUT@', '@OUTPUT@', file_type, podir\n ]\n if datadirs:\n command.append(datadirs)\n\n if 'args' in kwargs:\n command.append('--')\n command.append(mesonlib.stringlistify(kwargs.pop('args', [])))\n\n kwargs['command'] = command\n\n # We only use this input file to create a name of the custom target.\n # Thus we can ignore the other entries.\n inputfile = mesonlib.extract_as_list(kwargs, 'input')[0]\n if isinstance(inputfile, str):\n inputfile = mesonlib.File.from_source_file(state.environment.source_dir,\n state.subdir, inputfile)\n if isinstance(inputfile, mesonlib.File):\n # output could be '@BASENAME@' in which case we need to do substitutions\n # to get a unique target name.\n output = kwargs['output']\n ifile_abs = inputfile.absolute_path(state.environment.source_dir,\n state.environment.build_dir)\n values = mesonlib.get_filenames_templates_dict([ifile_abs], None)\n outputs = mesonlib.substitute_values([output], values)\n output = outputs[0]\n ct = build.CustomTarget(output + '_' + state.subdir.replace('/', '@').replace('\\\\', '@') + '_merge', state.subdir, state.subproject, kwargs)\n else:\n ct = build.CustomTarget(kwargs['output'] + '_merge', state.subdir, state.subproject, kwargs)\n\n return ModuleReturnValue(ct, [ct])\n\n @FeatureNewKwargs('i18n.gettext', '0.37.0', ['preset'])\n @FeatureNewKwargs('i18n.gettext', '0.50.0', ['install_dir'])\n @permittedKwargs({'po_dir', 'data_dirs', 'type', 'languages', 'args', 'preset', 'install', 'install_dir'})\n def gettext(self, state, args, kwargs):\n if len(args) != 1:\n raise coredata.MesonException('Gettext requires one positional argument (package name).')\n if not shutil.which('xgettext'):\n self.nogettext_warning()\n return\n packagename = args[0]\n languages = mesonlib.stringlistify(kwargs.get('languages', []))\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n targets = []\n\n preset = kwargs.pop('preset', None)\n if preset:\n preset_args = PRESET_ARGS.get(preset)\n if not preset_args:\n raise coredata.MesonException('i18n: Preset \"{}\" is not one of the valid options: {}'.format(\n preset, list(PRESET_ARGS.keys())))\n extra_args = set(preset_args + extra_args)\n\n pkg_arg = '--pkgname=' + packagename\n lang_arg = '--langs=' + '@@'.join(languages) if languages else None\n datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\n extra_args = '--extra-args=' + '@@'.join(extra_args) if extra_args else None\n\n potargs = state.environment.get_build_command() + ['--internal', 'gettext', 'pot', pkg_arg]\n if datadirs:\n potargs.append(datadirs)\n if extra_args:\n potargs.append(extra_args)\n pottarget = build.RunTarget(packagename + '-pot', potargs, [], state.subdir, state.subproject)\n targets.append(pottarget)\n\n install = kwargs.get('install', True)\n install_dir = kwargs.get('install_dir', state.environment.coredata.get_option(mesonlib.OptionKey('localedir')))\n if not languages:\n languages = read_linguas(path.join(state.environment.source_dir, state.subdir))\n for l in languages:\n po_file = mesonlib.File.from_source_file(state.environment.source_dir,\n state.subdir, l+'.po')\n gmo_kwargs = {'command': ['msgfmt', '@INPUT@', '-o', '@OUTPUT@'],\n 'input': po_file,\n 'output': packagename+'.mo',\n 'install': install,\n # We have multiple files all installed as packagename+'.mo' in different install subdirs.\n # What we really wanted to do, probably, is have a rename: kwarg, but that's not available\n # to custom_targets. Crude hack: set the build target's subdir manually.\n # Bonus: the build tree has something usable as an uninstalled bindtextdomain() target dir.\n 'install_dir': path.join(install_dir, l, 'LC_MESSAGES'),\n 'install_tag': 'i18n',\n }\n gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs)\n targets.append(gmotarget)\n\n updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]\n if lang_arg:\n updatepoargs.append(lang_arg)\n if datadirs:\n updatepoargs.append(datadirs)\n if extra_args:\n updatepoargs.append(extra_args)\n updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)\n targets.append(updatepotarget)\n\n return ModuleReturnValue(None, targets)\n\ndef initialize(*args, **kwargs):\n return I18nModule(*args, **kwargs)\n", "path": "mesonbuild/modules/i18n.py"}], "after_files": [{"content": "# Copyright 2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport shutil\n\nfrom os import path\nfrom .. import coredata, mesonlib, build, mlog\nfrom ..mesonlib import MesonException\nfrom ..scripts.gettext import read_linguas\nfrom . import ModuleReturnValue\nfrom . import ExtensionModule\nfrom ..interpreterbase import permittedKwargs, FeatureNew, FeatureNewKwargs\n\nPRESET_ARGS = {\n 'glib': [\n '--from-code=UTF-8',\n '--add-comments',\n\n # https://developer.gnome.org/glib/stable/glib-I18N.html\n '--keyword=_',\n '--keyword=N_',\n '--keyword=C_:1c,2',\n '--keyword=NC_:1c,2',\n '--keyword=g_dcgettext:2',\n '--keyword=g_dngettext:2,3',\n '--keyword=g_dpgettext2:2c,3',\n\n '--flag=N_:1:pass-c-format',\n '--flag=C_:2:pass-c-format',\n '--flag=NC_:2:pass-c-format',\n '--flag=g_dngettext:2:pass-c-format',\n '--flag=g_strdup_printf:1:c-format',\n '--flag=g_string_printf:2:c-format',\n '--flag=g_string_append_printf:2:c-format',\n '--flag=g_error_new:3:c-format',\n '--flag=g_set_error:4:c-format',\n '--flag=g_markup_printf_escaped:1:c-format',\n '--flag=g_log:3:c-format',\n '--flag=g_print:1:c-format',\n '--flag=g_printerr:1:c-format',\n '--flag=g_printf:1:c-format',\n '--flag=g_fprintf:2:c-format',\n '--flag=g_sprintf:2:c-format',\n '--flag=g_snprintf:3:c-format',\n ]\n}\n\n\nclass I18nModule(ExtensionModule):\n def __init__(self, interpreter):\n super().__init__(interpreter)\n self.methods.update({\n 'merge_file': self.merge_file,\n 'gettext': self.gettext,\n })\n\n @staticmethod\n def nogettext_warning():\n mlog.warning('Gettext not found, all translation targets will be ignored.', once=True)\n\n @staticmethod\n def _get_data_dirs(state, dirs):\n \"\"\"Returns source directories of relative paths\"\"\"\n src_dir = path.join(state.environment.get_source_dir(), state.subdir)\n return [path.join(src_dir, d) for d in dirs]\n\n @FeatureNew('i18n.merge_file', '0.37.0')\n @FeatureNewKwargs('i18n.merge_file', '0.51.0', ['args'])\n @permittedKwargs(build.CustomTarget.known_kwargs | {'data_dirs', 'po_dir', 'type', 'args'})\n def merge_file(self, state, args, kwargs):\n if not shutil.which('xgettext'):\n self.nogettext_warning()\n return\n podir = kwargs.pop('po_dir', None)\n if not podir:\n raise MesonException('i18n: po_dir is a required kwarg')\n podir = path.join(state.build_to_src, state.subdir, podir)\n\n file_type = kwargs.pop('type', 'xml')\n VALID_TYPES = ('xml', 'desktop')\n if file_type not in VALID_TYPES:\n raise MesonException(f'i18n: \"{file_type}\" is not a valid type {VALID_TYPES}')\n\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.pop('data_dirs', [])))\n datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\n\n command = state.environment.get_build_command() + [\n '--internal', 'msgfmthelper',\n '@INPUT@', '@OUTPUT@', file_type, podir\n ]\n if datadirs:\n command.append(datadirs)\n\n if 'args' in kwargs:\n command.append('--')\n command.append(mesonlib.stringlistify(kwargs.pop('args', [])))\n\n kwargs['command'] = command\n\n # We only use this input file to create a name of the custom target.\n # Thus we can ignore the other entries.\n inputfile = mesonlib.extract_as_list(kwargs, 'input')[0]\n if isinstance(inputfile, str):\n inputfile = mesonlib.File.from_source_file(state.environment.source_dir,\n state.subdir, inputfile)\n if isinstance(inputfile, mesonlib.File):\n # output could be '@BASENAME@' in which case we need to do substitutions\n # to get a unique target name.\n output = kwargs['output']\n ifile_abs = inputfile.absolute_path(state.environment.source_dir,\n state.environment.build_dir)\n values = mesonlib.get_filenames_templates_dict([ifile_abs], None)\n outputs = mesonlib.substitute_values([output], values)\n output = outputs[0]\n ct = build.CustomTarget(output + '_' + state.subdir.replace('/', '@').replace('\\\\', '@') + '_merge', state.subdir, state.subproject, kwargs)\n else:\n ct = build.CustomTarget(kwargs['output'] + '_merge', state.subdir, state.subproject, kwargs)\n\n return ModuleReturnValue(ct, [ct])\n\n @FeatureNewKwargs('i18n.gettext', '0.37.0', ['preset'])\n @FeatureNewKwargs('i18n.gettext', '0.50.0', ['install_dir'])\n @permittedKwargs({'po_dir', 'data_dirs', 'type', 'languages', 'args', 'preset', 'install', 'install_dir'})\n def gettext(self, state, args, kwargs):\n if len(args) != 1:\n raise coredata.MesonException('Gettext requires one positional argument (package name).')\n if not shutil.which('xgettext'):\n self.nogettext_warning()\n return\n packagename = args[0]\n languages = mesonlib.stringlistify(kwargs.get('languages', []))\n datadirs = self._get_data_dirs(state, mesonlib.stringlistify(kwargs.get('data_dirs', [])))\n extra_args = mesonlib.stringlistify(kwargs.get('args', []))\n targets = []\n gmotargets = []\n\n preset = kwargs.pop('preset', None)\n if preset:\n preset_args = PRESET_ARGS.get(preset)\n if not preset_args:\n raise coredata.MesonException('i18n: Preset \"{}\" is not one of the valid options: {}'.format(\n preset, list(PRESET_ARGS.keys())))\n extra_args = set(preset_args + extra_args)\n\n pkg_arg = '--pkgname=' + packagename\n lang_arg = '--langs=' + '@@'.join(languages) if languages else None\n datadirs = '--datadirs=' + ':'.join(datadirs) if datadirs else None\n extra_args = '--extra-args=' + '@@'.join(extra_args) if extra_args else None\n\n potargs = state.environment.get_build_command() + ['--internal', 'gettext', 'pot', pkg_arg]\n if datadirs:\n potargs.append(datadirs)\n if extra_args:\n potargs.append(extra_args)\n pottarget = build.RunTarget(packagename + '-pot', potargs, [], state.subdir, state.subproject)\n targets.append(pottarget)\n\n install = kwargs.get('install', True)\n install_dir = kwargs.get('install_dir', state.environment.coredata.get_option(mesonlib.OptionKey('localedir')))\n if not languages:\n languages = read_linguas(path.join(state.environment.source_dir, state.subdir))\n for l in languages:\n po_file = mesonlib.File.from_source_file(state.environment.source_dir,\n state.subdir, l+'.po')\n gmo_kwargs = {'command': ['msgfmt', '@INPUT@', '-o', '@OUTPUT@'],\n 'input': po_file,\n 'output': packagename+'.mo',\n 'install': install,\n # We have multiple files all installed as packagename+'.mo' in different install subdirs.\n # What we really wanted to do, probably, is have a rename: kwarg, but that's not available\n # to custom_targets. Crude hack: set the build target's subdir manually.\n # Bonus: the build tree has something usable as an uninstalled bindtextdomain() target dir.\n 'install_dir': path.join(install_dir, l, 'LC_MESSAGES'),\n 'install_tag': 'i18n',\n }\n gmotarget = build.CustomTarget(l+'.mo', path.join(state.subdir, l, 'LC_MESSAGES'), state.subproject, gmo_kwargs)\n targets.append(gmotarget)\n gmotargets.append(gmotarget)\n\n allgmotarget = build.AliasTarget(packagename + '-gmo', gmotargets, state.subdir, state.subproject)\n targets.append(allgmotarget)\n\n updatepoargs = state.environment.get_build_command() + ['--internal', 'gettext', 'update_po', pkg_arg]\n if lang_arg:\n updatepoargs.append(lang_arg)\n if datadirs:\n updatepoargs.append(datadirs)\n if extra_args:\n updatepoargs.append(extra_args)\n updatepotarget = build.RunTarget(packagename + '-update-po', updatepoargs, [], state.subdir, state.subproject)\n targets.append(updatepotarget)\n\n return ModuleReturnValue([gmotargets, pottarget, updatepotarget], targets)\n\ndef initialize(*args, **kwargs):\n return I18nModule(*args, **kwargs)\n", "path": "mesonbuild/modules/i18n.py"}]} | 3,070 | 376 |
gh_patches_debug_9970 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-770 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E2532 when Step Functions Pass state has Parameters key
*cfn-lint version: 0.17.0*
Using latest version of cfn-lint and updated spec files.
*Description of issue.*
Linting a CF template fails when a `AWS::StepFunctions::StateMachine`'s `DefinitionString` has a `Pass` state with the `Parameters` key.
Example template:
```yaml
AWSTemplateFormatVersion: 2010-09-09
Resources:
ExampleStateMachine:
Type: AWS::StepFunctions::StateMachine
Properties:
DefinitionString: |-
{
"StartAt": "Pop Element from List",
"States": {
"Pop Element from List": {
"Type": "Pass",
"Parameters": {
"List.$": "$.List[1:]"
},
"End": true
}
}
}
RoleArn: redacted
```
Error:
```
E2532 State Machine Definition key (Parameters) for State (Pop Element from List) of Type (Pass) is not valid
example.yaml:6:13
```
However, the above `Pass` state conforms to the ASL [spec](https://states-language.net/spec.html#state-type-table) and [docs](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-pass-state.html) and comes directly from the [Transfer Data Records sample project](https://docs.aws.amazon.com/step-functions/latest/dg/sample-project-transfer-data-sqs.html).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/stepfunctions/StateMachine.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import json
18 import six
19 from cfnlint import CloudFormationLintRule
20 from cfnlint import RuleMatch
21
22
23 class StateMachine(CloudFormationLintRule):
24 """Check State Machine Definition"""
25 id = 'E2532'
26 shortdesc = 'Check State Machine Definition for proper syntax'
27 description = 'Check the State Machine String Definition to make sure its JSON. ' \
28 'Validate basic syntax of the file to determine validity.'
29 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html'
30 tags = ['resources', 'stepfunctions']
31
32 def __init__(self):
33 """Init"""
34 super(StateMachine, self).__init__()
35 self.resource_property_types.append('AWS::StepFunctions::StateMachine')
36
37 def _check_state_json(self, def_json, state_name, path):
38 """Check State JSON Definition"""
39 matches = []
40
41 # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html
42 common_state_keys = [
43 'Next',
44 'End',
45 'Type',
46 'Comment',
47 'InputPath',
48 'OutputPath',
49 ]
50 common_state_required_keys = [
51 'Type',
52 ]
53 state_key_types = {
54 'Pass': ['Result', 'ResultPath'],
55 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'],
56 'Choice': ['Choices', 'Default'],
57 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],
58 'Succeed': [],
59 'Fail': ['Cause', 'Error'],
60 'Parallel': ['Branches', 'ResultPath', 'Retry', 'Catch']
61 }
62 state_required_types = {
63 'Pass': [],
64 'Task': ['Resource'],
65 'Choice': ['Choices'],
66 'Wait': [],
67 'Succeed': [],
68 'Fail': [],
69 'Parallel': ['Branches']
70 }
71
72 for req_key in common_state_required_keys:
73 if req_key not in def_json:
74 message = 'State Machine Definition required key (%s) for State (%s) is missing' % (req_key, state_name)
75 matches.append(RuleMatch(path, message))
76 return matches
77
78 state_type = def_json.get('Type')
79
80 if state_type in state_key_types:
81 for state_key, _ in def_json.items():
82 if state_key not in common_state_keys + state_key_types.get(state_type, []):
83 message = 'State Machine Definition key (%s) for State (%s) of Type (%s) is not valid' % (state_key, state_name, state_type)
84 matches.append(RuleMatch(path, message))
85 for req_key in common_state_required_keys + state_required_types.get(state_type, []):
86 if req_key not in def_json:
87 message = 'State Machine Definition required key (%s) for State (%s) of Type (%s) is missing' % (req_key, state_name, state_type)
88 matches.append(RuleMatch(path, message))
89 return matches
90 else:
91 message = 'State Machine Definition Type (%s) is not valid' % (state_type)
92 matches.append(RuleMatch(path, message))
93
94 return matches
95
96 def _check_definition_json(self, def_json, path):
97 """Check JSON Definition"""
98 matches = []
99
100 top_level_keys = [
101 'Comment',
102 'StartAt',
103 'TimeoutSeconds',
104 'Version',
105 'States'
106 ]
107 top_level_required_keys = [
108 'StartAt',
109 'States'
110 ]
111 for top_key, _ in def_json.items():
112 if top_key not in top_level_keys:
113 message = 'State Machine Definition key (%s) is not valid' % top_key
114 matches.append(RuleMatch(path, message))
115
116 for req_key in top_level_required_keys:
117 if req_key not in def_json:
118 message = 'State Machine Definition required key (%s) is missing' % req_key
119 matches.append(RuleMatch(path, message))
120
121 for state_name, state_value in def_json.get('States', {}).items():
122 matches.extend(self._check_state_json(state_value, state_name, path))
123 return matches
124
125 def check_value(self, value, path, fail_on_loads=True):
126 """Check Definition Value"""
127 matches = []
128 try:
129 def_json = json.loads(value)
130 # pylint: disable=W0703
131 except Exception as err:
132 if fail_on_loads:
133 message = 'State Machine Definition needs to be formatted as JSON. Error %s' % err
134 matches.append(RuleMatch(path, message))
135 return matches
136
137 self.logger.debug('State Machine definition could not be parsed. Skipping')
138 return matches
139
140 matches.extend(self._check_definition_json(def_json, path))
141 return matches
142
143 def check_sub(self, value, path):
144 """Check Sub Object"""
145 matches = []
146 if isinstance(value, list):
147 matches.extend(self.check_value(value[0], path, False))
148 elif isinstance(value, six.string_types):
149 matches.extend(self.check_value(value, path, False))
150
151 return matches
152
153 def match_resource_properties(self, properties, _, path, cfn):
154 """Check CloudFormation Properties"""
155 matches = []
156
157 matches.extend(
158 cfn.check_value(
159 obj=properties, key='DefinitionString',
160 path=path[:],
161 check_value=self.check_value,
162 check_sub=self.check_sub
163 ))
164
165 return matches
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
--- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
+++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py
@@ -51,7 +51,7 @@
'Type',
]
state_key_types = {
- 'Pass': ['Result', 'ResultPath'],
+ 'Pass': ['Result', 'ResultPath', 'Parameters'],
'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'],
'Choice': ['Choices', 'Default'],
'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n--- a/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n+++ b/src/cfnlint/rules/resources/stepfunctions/StateMachine.py\n@@ -51,7 +51,7 @@\n 'Type',\n ]\n state_key_types = {\n- 'Pass': ['Result', 'ResultPath'],\n+ 'Pass': ['Result', 'ResultPath', 'Parameters'],\n 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'],\n 'Choice': ['Choices', 'Default'],\n 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],\n", "issue": "E2532 when Step Functions Pass state has Parameters key\n*cfn-lint version: 0.17.0*\r\n\r\nUsing latest version of cfn-lint and updated spec files.\r\n\r\n*Description of issue.*\r\n\r\nLinting a CF template fails when a `AWS::StepFunctions::StateMachine`'s `DefinitionString` has a `Pass` state with the `Parameters` key.\r\n\r\nExample template:\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\nResources:\r\n ExampleStateMachine:\r\n Type: AWS::StepFunctions::StateMachine\r\n Properties:\r\n DefinitionString: |-\r\n {\r\n \"StartAt\": \"Pop Element from List\",\r\n \"States\": {\r\n \"Pop Element from List\": {\r\n \"Type\": \"Pass\",\r\n \"Parameters\": {\r\n \"List.$\": \"$.List[1:]\"\r\n },\r\n \"End\": true\r\n }\r\n }\r\n }\r\n RoleArn: redacted\r\n```\r\n\r\nError:\r\n\r\n```\r\nE2532 State Machine Definition key (Parameters) for State (Pop Element from List) of Type (Pass) is not valid\r\nexample.yaml:6:13\r\n```\r\n\r\nHowever, the above `Pass` state conforms to the ASL [spec](https://states-language.net/spec.html#state-type-table) and [docs](https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-pass-state.html) and comes directly from the [Transfer Data Records sample project](https://docs.aws.amazon.com/step-functions/latest/dg/sample-project-transfer-data-sqs.html).\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport json\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass StateMachine(CloudFormationLintRule):\n \"\"\"Check State Machine Definition\"\"\"\n id = 'E2532'\n shortdesc = 'Check State Machine Definition for proper syntax'\n description = 'Check the State Machine String Definition to make sure its JSON. ' \\\n 'Validate basic syntax of the file to determine validity.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html'\n tags = ['resources', 'stepfunctions']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(StateMachine, self).__init__()\n self.resource_property_types.append('AWS::StepFunctions::StateMachine')\n\n def _check_state_json(self, def_json, state_name, path):\n \"\"\"Check State JSON Definition\"\"\"\n matches = []\n\n # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html\n common_state_keys = [\n 'Next',\n 'End',\n 'Type',\n 'Comment',\n 'InputPath',\n 'OutputPath',\n ]\n common_state_required_keys = [\n 'Type',\n ]\n state_key_types = {\n 'Pass': ['Result', 'ResultPath'],\n 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'],\n 'Choice': ['Choices', 'Default'],\n 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],\n 'Succeed': [],\n 'Fail': ['Cause', 'Error'],\n 'Parallel': ['Branches', 'ResultPath', 'Retry', 'Catch']\n }\n state_required_types = {\n 'Pass': [],\n 'Task': ['Resource'],\n 'Choice': ['Choices'],\n 'Wait': [],\n 'Succeed': [],\n 'Fail': [],\n 'Parallel': ['Branches']\n }\n\n for req_key in common_state_required_keys:\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) for State (%s) is missing' % (req_key, state_name)\n matches.append(RuleMatch(path, message))\n return matches\n\n state_type = def_json.get('Type')\n\n if state_type in state_key_types:\n for state_key, _ in def_json.items():\n if state_key not in common_state_keys + state_key_types.get(state_type, []):\n message = 'State Machine Definition key (%s) for State (%s) of Type (%s) is not valid' % (state_key, state_name, state_type)\n matches.append(RuleMatch(path, message))\n for req_key in common_state_required_keys + state_required_types.get(state_type, []):\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) for State (%s) of Type (%s) is missing' % (req_key, state_name, state_type)\n matches.append(RuleMatch(path, message))\n return matches\n else:\n message = 'State Machine Definition Type (%s) is not valid' % (state_type)\n matches.append(RuleMatch(path, message))\n\n return matches\n\n def _check_definition_json(self, def_json, path):\n \"\"\"Check JSON Definition\"\"\"\n matches = []\n\n top_level_keys = [\n 'Comment',\n 'StartAt',\n 'TimeoutSeconds',\n 'Version',\n 'States'\n ]\n top_level_required_keys = [\n 'StartAt',\n 'States'\n ]\n for top_key, _ in def_json.items():\n if top_key not in top_level_keys:\n message = 'State Machine Definition key (%s) is not valid' % top_key\n matches.append(RuleMatch(path, message))\n\n for req_key in top_level_required_keys:\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) is missing' % req_key\n matches.append(RuleMatch(path, message))\n\n for state_name, state_value in def_json.get('States', {}).items():\n matches.extend(self._check_state_json(state_value, state_name, path))\n return matches\n\n def check_value(self, value, path, fail_on_loads=True):\n \"\"\"Check Definition Value\"\"\"\n matches = []\n try:\n def_json = json.loads(value)\n # pylint: disable=W0703\n except Exception as err:\n if fail_on_loads:\n message = 'State Machine Definition needs to be formatted as JSON. Error %s' % err\n matches.append(RuleMatch(path, message))\n return matches\n\n self.logger.debug('State Machine definition could not be parsed. Skipping')\n return matches\n\n matches.extend(self._check_definition_json(def_json, path))\n return matches\n\n def check_sub(self, value, path):\n \"\"\"Check Sub Object\"\"\"\n matches = []\n if isinstance(value, list):\n matches.extend(self.check_value(value[0], path, False))\n elif isinstance(value, six.string_types):\n matches.extend(self.check_value(value, path, False))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties, key='DefinitionString',\n path=path[:],\n check_value=self.check_value,\n check_sub=self.check_sub\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/stepfunctions/StateMachine.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport json\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass StateMachine(CloudFormationLintRule):\n \"\"\"Check State Machine Definition\"\"\"\n id = 'E2532'\n shortdesc = 'Check State Machine Definition for proper syntax'\n description = 'Check the State Machine String Definition to make sure its JSON. ' \\\n 'Validate basic syntax of the file to determine validity.'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-stepfunctions-statemachine.html'\n tags = ['resources', 'stepfunctions']\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super(StateMachine, self).__init__()\n self.resource_property_types.append('AWS::StepFunctions::StateMachine')\n\n def _check_state_json(self, def_json, state_name, path):\n \"\"\"Check State JSON Definition\"\"\"\n matches = []\n\n # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-common-fields.html\n common_state_keys = [\n 'Next',\n 'End',\n 'Type',\n 'Comment',\n 'InputPath',\n 'OutputPath',\n ]\n common_state_required_keys = [\n 'Type',\n ]\n state_key_types = {\n 'Pass': ['Result', 'ResultPath', 'Parameters'],\n 'Task': ['Resource', 'ResultPath', 'Retry', 'Catch', 'TimeoutSeconds', 'Parameters', 'HeartbeatSeconds'],\n 'Choice': ['Choices', 'Default'],\n 'Wait': ['Seconds', 'Timestamp', 'SecondsPath', 'TimestampPath'],\n 'Succeed': [],\n 'Fail': ['Cause', 'Error'],\n 'Parallel': ['Branches', 'ResultPath', 'Retry', 'Catch']\n }\n state_required_types = {\n 'Pass': [],\n 'Task': ['Resource'],\n 'Choice': ['Choices'],\n 'Wait': [],\n 'Succeed': [],\n 'Fail': [],\n 'Parallel': ['Branches']\n }\n\n for req_key in common_state_required_keys:\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) for State (%s) is missing' % (req_key, state_name)\n matches.append(RuleMatch(path, message))\n return matches\n\n state_type = def_json.get('Type')\n\n if state_type in state_key_types:\n for state_key, _ in def_json.items():\n if state_key not in common_state_keys + state_key_types.get(state_type, []):\n message = 'State Machine Definition key (%s) for State (%s) of Type (%s) is not valid' % (state_key, state_name, state_type)\n matches.append(RuleMatch(path, message))\n for req_key in common_state_required_keys + state_required_types.get(state_type, []):\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) for State (%s) of Type (%s) is missing' % (req_key, state_name, state_type)\n matches.append(RuleMatch(path, message))\n return matches\n else:\n message = 'State Machine Definition Type (%s) is not valid' % (state_type)\n matches.append(RuleMatch(path, message))\n\n return matches\n\n def _check_definition_json(self, def_json, path):\n \"\"\"Check JSON Definition\"\"\"\n matches = []\n\n top_level_keys = [\n 'Comment',\n 'StartAt',\n 'TimeoutSeconds',\n 'Version',\n 'States'\n ]\n top_level_required_keys = [\n 'StartAt',\n 'States'\n ]\n for top_key, _ in def_json.items():\n if top_key not in top_level_keys:\n message = 'State Machine Definition key (%s) is not valid' % top_key\n matches.append(RuleMatch(path, message))\n\n for req_key in top_level_required_keys:\n if req_key not in def_json:\n message = 'State Machine Definition required key (%s) is missing' % req_key\n matches.append(RuleMatch(path, message))\n\n for state_name, state_value in def_json.get('States', {}).items():\n matches.extend(self._check_state_json(state_value, state_name, path))\n return matches\n\n def check_value(self, value, path, fail_on_loads=True):\n \"\"\"Check Definition Value\"\"\"\n matches = []\n try:\n def_json = json.loads(value)\n # pylint: disable=W0703\n except Exception as err:\n if fail_on_loads:\n message = 'State Machine Definition needs to be formatted as JSON. Error %s' % err\n matches.append(RuleMatch(path, message))\n return matches\n\n self.logger.debug('State Machine definition could not be parsed. Skipping')\n return matches\n\n matches.extend(self._check_definition_json(def_json, path))\n return matches\n\n def check_sub(self, value, path):\n \"\"\"Check Sub Object\"\"\"\n matches = []\n if isinstance(value, list):\n matches.extend(self.check_value(value[0], path, False))\n elif isinstance(value, six.string_types):\n matches.extend(self.check_value(value, path, False))\n\n return matches\n\n def match_resource_properties(self, properties, _, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n matches.extend(\n cfn.check_value(\n obj=properties, key='DefinitionString',\n path=path[:],\n check_value=self.check_value,\n check_sub=self.check_sub\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/stepfunctions/StateMachine.py"}]} | 2,383 | 168 |
gh_patches_debug_29287 | rasdani/github-patches | git_diff | weni-ai__bothub-engine-77 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Is possible translate example to same language
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bothub/api/serializers/translate.py`
Content:
```
1 from rest_framework import serializers
2
3 from django.utils.translation import gettext as _
4
5 from bothub.common.models import RepositoryTranslatedExampleEntity
6 from bothub.common.models import RepositoryTranslatedExample
7 from bothub.common.models import RepositoryExample
8
9 from ..validators import CanContributeInRepositoryTranslatedExampleValidator
10 from ..validators import CanContributeInRepositoryExampleValidator
11 from ..validators import TranslatedExampleEntitiesValidator
12
13
14 class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):
15 class Meta:
16 model = RepositoryTranslatedExampleEntity
17 fields = [
18 'id',
19 'repository_translated_example',
20 'start',
21 'end',
22 'entity',
23 'created_at',
24 'value',
25 ]
26
27 repository_translated_example = serializers.PrimaryKeyRelatedField(
28 queryset=RepositoryTranslatedExample.objects,
29 validators=[
30 CanContributeInRepositoryTranslatedExampleValidator(),
31 ],
32 help_text='Example translation ID')
33 value = serializers.SerializerMethodField()
34
35 def get_value(self, obj):
36 return obj.value
37
38
39 class RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):
40 class Meta:
41 model = RepositoryTranslatedExample
42 fields = [
43 'id',
44 'original_example',
45 'from_language',
46 'language',
47 'text',
48 'has_valid_entities',
49 'entities',
50 'created_at',
51 ]
52
53 original_example = serializers.PrimaryKeyRelatedField(
54 queryset=RepositoryExample.objects,
55 validators=[
56 CanContributeInRepositoryExampleValidator(),
57 ],
58 help_text=_('Example\'s ID'))
59 from_language = serializers.SerializerMethodField()
60 has_valid_entities = serializers.SerializerMethodField()
61 entities = RepositoryTranslatedExampleEntitySeralizer(
62 many=True,
63 read_only=True)
64
65 def get_from_language(self, obj):
66 return obj.original_example.repository_update.language
67
68 def get_has_valid_entities(self, obj):
69 return obj.has_valid_entities
70
71
72 class NewRepositoryTranslatedExampleEntitySeralizer(
73 serializers.ModelSerializer):
74 class Meta:
75 model = RepositoryTranslatedExampleEntity
76 fields = [
77 'start',
78 'end',
79 'entity',
80 ]
81
82
83 class NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):
84 class Meta:
85 model = RepositoryTranslatedExample
86 fields = [
87 'id',
88 'original_example',
89 'language',
90 'text',
91 'has_valid_entities',
92 'entities',
93 ]
94
95 def __init__(self, *args, **kwargs):
96 super().__init__(*args, **kwargs)
97 self.validators.append(TranslatedExampleEntitiesValidator())
98
99 original_example = serializers.PrimaryKeyRelatedField(
100 queryset=RepositoryExample.objects,
101 validators=[
102 CanContributeInRepositoryExampleValidator(),
103 ],
104 help_text=_('Example\'s ID'))
105 has_valid_entities = serializers.SerializerMethodField()
106 entities = NewRepositoryTranslatedExampleEntitySeralizer(
107 many=True,
108 style={'text_field': 'text'})
109
110 def get_has_valid_entities(self, obj):
111 return obj.has_valid_entities
112
113 def create(self, validated_data):
114 entities_data = validated_data.pop('entities')
115
116 translated = self.Meta.model.objects.create(**validated_data)
117 for entity_data in entities_data:
118 RepositoryTranslatedExampleEntity.objects.create(
119 repository_translated_example=translated,
120 **entity_data)
121 return translated
122
```
Path: `bothub/api/validators.py`
Content:
```
1 from django.utils.translation import gettext as _
2 from rest_framework.exceptions import PermissionDenied
3 from rest_framework.exceptions import ValidationError
4
5 from bothub.common.models import RepositoryTranslatedExample
6
7
8 class CanContributeInRepositoryValidator(object):
9 def __call__(self, value):
10 user_authorization = value.get_user_authorization(
11 self.request.user)
12 if not user_authorization.can_contribute:
13 raise PermissionDenied(
14 _('You can\'t contribute in this repository'))
15
16 def set_context(self, serializer):
17 self.request = serializer.context.get('request')
18
19
20 class CanContributeInRepositoryExampleValidator(object):
21 def __call__(self, value):
22 repository = value.repository_update.repository
23 user_authorization = repository.get_user_authorization(
24 self.request.user)
25 if not user_authorization.can_contribute:
26 raise PermissionDenied(
27 _('You can\'t contribute in this repository'))
28
29 def set_context(self, serializer):
30 self.request = serializer.context.get('request')
31
32
33 class CanContributeInRepositoryTranslatedExampleValidator(object):
34 def __call__(self, value):
35 repository = value.original_example.repository_update.repository
36 user_authorization = repository.get_user_authorization(
37 self.request.user)
38 if not user_authorization.can_contribute:
39 raise PermissionDenied(
40 _('You can\'t contribute in this repository'))
41
42 def set_context(self, serializer):
43 self.request = serializer.context.get('request')
44
45
46 class TranslatedExampleEntitiesValidator(object):
47 def __call__(self, attrs):
48 original_example = attrs.get('original_example')
49 entities_valid = RepositoryTranslatedExample.same_entities_validator(
50 list(map(lambda x: dict(x), attrs.get('entities'))),
51 list(map(lambda x: x.to_dict, original_example.entities.all())))
52 if not entities_valid:
53 raise ValidationError({'entities': _('Invalid entities')})
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bothub/api/serializers/translate.py b/bothub/api/serializers/translate.py
--- a/bothub/api/serializers/translate.py
+++ b/bothub/api/serializers/translate.py
@@ -9,6 +9,7 @@
from ..validators import CanContributeInRepositoryTranslatedExampleValidator
from ..validators import CanContributeInRepositoryExampleValidator
from ..validators import TranslatedExampleEntitiesValidator
+from ..validators import TranslatedExampleLanguageValidator
class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):
@@ -95,6 +96,7 @@
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validators.append(TranslatedExampleEntitiesValidator())
+ self.validators.append(TranslatedExampleLanguageValidator())
original_example = serializers.PrimaryKeyRelatedField(
queryset=RepositoryExample.objects,
diff --git a/bothub/api/validators.py b/bothub/api/validators.py
--- a/bothub/api/validators.py
+++ b/bothub/api/validators.py
@@ -51,3 +51,11 @@
list(map(lambda x: x.to_dict, original_example.entities.all())))
if not entities_valid:
raise ValidationError({'entities': _('Invalid entities')})
+
+
+class TranslatedExampleLanguageValidator(object):
+ def __call__(self, attrs):
+ original_example = attrs.get('original_example')
+ language = attrs.get('language')
+ if original_example.repository_update.language == language:
+ raise ValidationError({'language': _('Can\'t translate to same language')})
| {"golden_diff": "diff --git a/bothub/api/serializers/translate.py b/bothub/api/serializers/translate.py\n--- a/bothub/api/serializers/translate.py\n+++ b/bothub/api/serializers/translate.py\n@@ -9,6 +9,7 @@\n from ..validators import CanContributeInRepositoryTranslatedExampleValidator\n from ..validators import CanContributeInRepositoryExampleValidator\n from ..validators import TranslatedExampleEntitiesValidator\n+from ..validators import TranslatedExampleLanguageValidator\n \n \n class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n@@ -95,6 +96,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n+ self.validators.append(TranslatedExampleLanguageValidator())\n \n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\ndiff --git a/bothub/api/validators.py b/bothub/api/validators.py\n--- a/bothub/api/validators.py\n+++ b/bothub/api/validators.py\n@@ -51,3 +51,11 @@\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n+\n+\n+class TranslatedExampleLanguageValidator(object):\n+ def __call__(self, attrs):\n+ original_example = attrs.get('original_example')\n+ language = attrs.get('language')\n+ if original_example.repository_update.language == language:\n+ raise ValidationError({'language': _('Can\\'t translate to same language')})\n", "issue": "Is possible translate example to same language\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom django.utils.translation import gettext as _\n\nfrom bothub.common.models import RepositoryTranslatedExampleEntity\nfrom bothub.common.models import RepositoryTranslatedExample\nfrom bothub.common.models import RepositoryExample\n\nfrom ..validators import CanContributeInRepositoryTranslatedExampleValidator\nfrom ..validators import CanContributeInRepositoryExampleValidator\nfrom ..validators import TranslatedExampleEntitiesValidator\n\n\nclass RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'id',\n 'repository_translated_example',\n 'start',\n 'end',\n 'entity',\n 'created_at',\n 'value',\n ]\n\n repository_translated_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryTranslatedExample.objects,\n validators=[\n CanContributeInRepositoryTranslatedExampleValidator(),\n ],\n help_text='Example translation ID')\n value = serializers.SerializerMethodField()\n\n def get_value(self, obj):\n return obj.value\n\n\nclass RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'from_language',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n 'created_at',\n ]\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n from_language = serializers.SerializerMethodField()\n has_valid_entities = serializers.SerializerMethodField()\n entities = RepositoryTranslatedExampleEntitySeralizer(\n many=True,\n read_only=True)\n\n def get_from_language(self, obj):\n return obj.original_example.repository_update.language\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n\nclass NewRepositoryTranslatedExampleEntitySeralizer(\n serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'start',\n 'end',\n 'entity',\n ]\n\n\nclass NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n has_valid_entities = serializers.SerializerMethodField()\n entities = NewRepositoryTranslatedExampleEntitySeralizer(\n many=True,\n style={'text_field': 'text'})\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n def create(self, validated_data):\n entities_data = validated_data.pop('entities')\n\n translated = self.Meta.model.objects.create(**validated_data)\n for entity_data in entities_data:\n RepositoryTranslatedExampleEntity.objects.create(\n repository_translated_example=translated,\n **entity_data)\n return translated\n", "path": "bothub/api/serializers/translate.py"}, {"content": "from django.utils.translation import gettext as _\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.exceptions import ValidationError\n\nfrom bothub.common.models import RepositoryTranslatedExample\n\n\nclass CanContributeInRepositoryValidator(object):\n def __call__(self, value):\n user_authorization = value.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryExampleValidator(object):\n def __call__(self, value):\n repository = value.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryTranslatedExampleValidator(object):\n def __call__(self, value):\n repository = value.original_example.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass TranslatedExampleEntitiesValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n entities_valid = RepositoryTranslatedExample.same_entities_validator(\n list(map(lambda x: dict(x), attrs.get('entities'))),\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n", "path": "bothub/api/validators.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom django.utils.translation import gettext as _\n\nfrom bothub.common.models import RepositoryTranslatedExampleEntity\nfrom bothub.common.models import RepositoryTranslatedExample\nfrom bothub.common.models import RepositoryExample\n\nfrom ..validators import CanContributeInRepositoryTranslatedExampleValidator\nfrom ..validators import CanContributeInRepositoryExampleValidator\nfrom ..validators import TranslatedExampleEntitiesValidator\nfrom ..validators import TranslatedExampleLanguageValidator\n\n\nclass RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'id',\n 'repository_translated_example',\n 'start',\n 'end',\n 'entity',\n 'created_at',\n 'value',\n ]\n\n repository_translated_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryTranslatedExample.objects,\n validators=[\n CanContributeInRepositoryTranslatedExampleValidator(),\n ],\n help_text='Example translation ID')\n value = serializers.SerializerMethodField()\n\n def get_value(self, obj):\n return obj.value\n\n\nclass RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'from_language',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n 'created_at',\n ]\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n from_language = serializers.SerializerMethodField()\n has_valid_entities = serializers.SerializerMethodField()\n entities = RepositoryTranslatedExampleEntitySeralizer(\n many=True,\n read_only=True)\n\n def get_from_language(self, obj):\n return obj.original_example.repository_update.language\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n\nclass NewRepositoryTranslatedExampleEntitySeralizer(\n serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'start',\n 'end',\n 'entity',\n ]\n\n\nclass NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n self.validators.append(TranslatedExampleLanguageValidator())\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n has_valid_entities = serializers.SerializerMethodField()\n entities = NewRepositoryTranslatedExampleEntitySeralizer(\n many=True,\n style={'text_field': 'text'})\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n def create(self, validated_data):\n entities_data = validated_data.pop('entities')\n\n translated = self.Meta.model.objects.create(**validated_data)\n for entity_data in entities_data:\n RepositoryTranslatedExampleEntity.objects.create(\n repository_translated_example=translated,\n **entity_data)\n return translated\n", "path": "bothub/api/serializers/translate.py"}, {"content": "from django.utils.translation import gettext as _\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.exceptions import ValidationError\n\nfrom bothub.common.models import RepositoryTranslatedExample\n\n\nclass CanContributeInRepositoryValidator(object):\n def __call__(self, value):\n user_authorization = value.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryExampleValidator(object):\n def __call__(self, value):\n repository = value.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryTranslatedExampleValidator(object):\n def __call__(self, value):\n repository = value.original_example.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass TranslatedExampleEntitiesValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n entities_valid = RepositoryTranslatedExample.same_entities_validator(\n list(map(lambda x: dict(x), attrs.get('entities'))),\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n\n\nclass TranslatedExampleLanguageValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n language = attrs.get('language')\n if original_example.repository_update.language == language:\n raise ValidationError({'language': _('Can\\'t translate to same language')})\n", "path": "bothub/api/validators.py"}]} | 1,713 | 347 |
gh_patches_debug_37497 | rasdani/github-patches | git_diff | Parsl__parsl-2221 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PBSPro scheduler options ignored
**Describe the bug**
The PBSPro provider template adds `${scheduler_options}` to the bottom of the list of #PBS commands. However, PBSPro seems to only consider the first #PBS option and ignore any later competing ones. This means specifying a new select option with gpus is superseded by the default one and ignored. We can resolve this by moving the user defined scheduler options to the top of the #PBS list in the template.
**To Reproduce**
Use the PBSPro provider (e.g., at ALCF's edge testbed for Polaris) and try to specify a new select option:`-l select=1:ncpus=32:ngpus=1`. These options are ignored in favor of the default specified above in the template.
**Expected behavior**
I would like scheduler_options to be put first so they can be used to enter specific options.
**Environment**
- Parsl 1.2.0
**Distributed Environment**
- Where are you running the Parsl script from ? ALCF
- Where do you need the workers to run ? Compute nodes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/providers/pbspro/template.py`
Content:
```
1 template_string = '''#!/bin/bash
2
3 #PBS -S /bin/bash
4 #PBS -N ${jobname}
5 #PBS -m n
6 #PBS -l walltime=$walltime
7 #PBS -l select=${nodes_per_block}:ncpus=${ncpus}
8 #PBS -o ${submit_script_dir}/${jobname}.submit.stdout
9 #PBS -e ${submit_script_dir}/${jobname}.submit.stderr
10 ${scheduler_options}
11
12 ${worker_init}
13
14 export JOBNAME="${jobname}"
15
16 ${user_script}
17
18 '''
19
```
Path: `parsl/providers/pbspro/pbspro.py`
Content:
```
1 import logging
2 import os
3 import time
4
5 from parsl.channels import LocalChannel
6 from parsl.launchers import SingleNodeLauncher
7 from parsl.providers.pbspro.template import template_string
8 from parsl.providers import TorqueProvider
9 from parsl.providers.provider_base import JobState, JobStatus
10
11 logger = logging.getLogger(__name__)
12
13
14 class PBSProProvider(TorqueProvider):
15 """PBS Pro Execution Provider
16
17 Parameters
18 ----------
19 channel : Channel
20 Channel for accessing this provider. Possible channels include
21 :class:`~parsl.channels.LocalChannel` (the default),
22 :class:`~parsl.channels.SSHChannel`, or
23 :class:`~parsl.channels.SSHInteractiveLoginChannel`.
24 account : str
25 Account the job will be charged against.
26 queue : str
27 Queue to request blocks from.
28 nodes_per_block : int
29 Nodes to provision per block.
30 cpus_per_node : int
31 CPUs to provision per node.
32 init_blocks : int
33 Number of blocks to provision at the start of the run. Default is 1.
34 min_blocks : int
35 Minimum number of blocks to maintain. Default is 0.
36 max_blocks : int
37 Maximum number of blocks to maintain.
38 parallelism : float
39 Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
40 scaling where as many resources as possible are used; parallelism close to 0 represents
41 the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
42 walltime : str
43 Walltime requested per block in HH:MM:SS.
44 scheduler_options : str
45 String to prepend to the #PBS blocks in the submit script to the scheduler.
46 worker_init : str
47 Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
48 launcher : Launcher
49 Launcher for this provider. The default is
50 :class:`~parsl.launchers.SingleNodeLauncher`.
51 """
52 def __init__(self,
53 channel=LocalChannel(),
54 account=None,
55 queue=None,
56 scheduler_options='',
57 worker_init='',
58 nodes_per_block=1,
59 cpus_per_node=1,
60 init_blocks=1,
61 min_blocks=0,
62 max_blocks=1,
63 parallelism=1,
64 launcher=SingleNodeLauncher(),
65 walltime="00:20:00",
66 cmd_timeout=120):
67 super().__init__(channel,
68 account,
69 queue,
70 scheduler_options,
71 worker_init,
72 nodes_per_block,
73 init_blocks,
74 min_blocks,
75 max_blocks,
76 parallelism,
77 launcher,
78 walltime,
79 cmd_timeout=cmd_timeout)
80
81 self.template_string = template_string
82 self._label = 'pbspro'
83 self.cpus_per_node = cpus_per_node
84
85 def submit(self, command, tasks_per_node, job_name="parsl"):
86 """Submits the command job.
87
88 Parameters
89 ----------
90 command : str
91 Command to be executed on the remote side.
92 tasks_per_node : int
93 Command invocations to be launched per node.
94 job_name : str
95 Identifier for job.
96
97 Returns
98 -------
99 None
100 If at capacity and cannot provision more
101 job_id : str
102 Identifier for the job
103 """
104
105 job_name = "{0}.{1}".format(job_name, time.time())
106
107 script_path = os.path.abspath("{0}/{1}.submit".format(self.script_dir, job_name))
108
109 logger.debug("Requesting {} nodes_per_block, {} tasks_per_node".format(
110 self.nodes_per_block, tasks_per_node)
111 )
112
113 job_config = {}
114 job_config["submit_script_dir"] = self.channel.script_dir
115 job_config["nodes_per_block"] = self.nodes_per_block
116 job_config["ncpus"] = self.cpus_per_node
117 job_config["walltime"] = self.walltime
118 job_config["scheduler_options"] = self.scheduler_options
119 job_config["worker_init"] = self.worker_init
120 job_config["user_script"] = command
121
122 # Wrap the command
123 job_config["user_script"] = self.launcher(command,
124 tasks_per_node,
125 self.nodes_per_block)
126
127 logger.debug("Writing submit script")
128 self._write_submit_script(self.template_string, script_path, job_name, job_config)
129
130 channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)
131
132 submit_options = ''
133 if self.queue is not None:
134 submit_options = '{0} -q {1}'.format(submit_options, self.queue)
135 if self.account is not None:
136 submit_options = '{0} -A {1}'.format(submit_options, self.account)
137
138 launch_cmd = "qsub {0} {1}".format(submit_options, channel_script_path)
139 retcode, stdout, stderr = self.execute_wait(launch_cmd)
140
141 job_id = None
142 if retcode == 0:
143 for line in stdout.split('\n'):
144 if line.strip():
145 job_id = line.strip()
146 self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}
147 else:
148 message = "Command '{}' failed with return code {}".format(launch_cmd, retcode)
149 if (stdout is not None) and (stderr is not None):
150 message += "\nstderr:{}\nstdout{}".format(stderr.strip(), stdout.strip())
151 logger.error(message)
152
153 return job_id
154
155 @property
156 def status_polling_interval(self):
157 return 60
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/providers/pbspro/pbspro.py b/parsl/providers/pbspro/pbspro.py
--- a/parsl/providers/pbspro/pbspro.py
+++ b/parsl/providers/pbspro/pbspro.py
@@ -43,6 +43,9 @@
Walltime requested per block in HH:MM:SS.
scheduler_options : str
String to prepend to the #PBS blocks in the submit script to the scheduler.
+ select_options : str
+ String to append to the #PBS -l select block in the submit script to the scheduler. This can be used to
+ specify ngpus.
worker_init : str
Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.
launcher : Launcher
@@ -54,6 +57,7 @@
account=None,
queue=None,
scheduler_options='',
+ select_options='',
worker_init='',
nodes_per_block=1,
cpus_per_node=1,
@@ -81,6 +85,7 @@
self.template_string = template_string
self._label = 'pbspro'
self.cpus_per_node = cpus_per_node
+ self.select_options = select_options
def submit(self, command, tasks_per_node, job_name="parsl"):
"""Submits the command job.
@@ -119,6 +124,12 @@
job_config["worker_init"] = self.worker_init
job_config["user_script"] = command
+ # Add a colon to select_options if one isn't included
+ if self.select_options and not self.select_options.startswith(":"):
+ self.select_options = ":" + self.select_options
+
+ job_config["select_options"] = self.select_options
+
# Wrap the command
job_config["user_script"] = self.launcher(command,
tasks_per_node,
diff --git a/parsl/providers/pbspro/template.py b/parsl/providers/pbspro/template.py
--- a/parsl/providers/pbspro/template.py
+++ b/parsl/providers/pbspro/template.py
@@ -4,7 +4,7 @@
#PBS -N ${jobname}
#PBS -m n
#PBS -l walltime=$walltime
-#PBS -l select=${nodes_per_block}:ncpus=${ncpus}
+#PBS -l select=${nodes_per_block}:ncpus=${ncpus}${select_options}
#PBS -o ${submit_script_dir}/${jobname}.submit.stdout
#PBS -e ${submit_script_dir}/${jobname}.submit.stderr
${scheduler_options}
| {"golden_diff": "diff --git a/parsl/providers/pbspro/pbspro.py b/parsl/providers/pbspro/pbspro.py\n--- a/parsl/providers/pbspro/pbspro.py\n+++ b/parsl/providers/pbspro/pbspro.py\n@@ -43,6 +43,9 @@\n Walltime requested per block in HH:MM:SS.\n scheduler_options : str\n String to prepend to the #PBS blocks in the submit script to the scheduler.\n+ select_options : str\n+ String to append to the #PBS -l select block in the submit script to the scheduler. This can be used to\n+ specify ngpus.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n launcher : Launcher\n@@ -54,6 +57,7 @@\n account=None,\n queue=None,\n scheduler_options='',\n+ select_options='',\n worker_init='',\n nodes_per_block=1,\n cpus_per_node=1,\n@@ -81,6 +85,7 @@\n self.template_string = template_string\n self._label = 'pbspro'\n self.cpus_per_node = cpus_per_node\n+ self.select_options = select_options\n \n def submit(self, command, tasks_per_node, job_name=\"parsl\"):\n \"\"\"Submits the command job.\n@@ -119,6 +124,12 @@\n job_config[\"worker_init\"] = self.worker_init\n job_config[\"user_script\"] = command\n \n+ # Add a colon to select_options if one isn't included\n+ if self.select_options and not self.select_options.startswith(\":\"):\n+ self.select_options = \":\" + self.select_options\n+\n+ job_config[\"select_options\"] = self.select_options\n+\n # Wrap the command\n job_config[\"user_script\"] = self.launcher(command,\n tasks_per_node,\ndiff --git a/parsl/providers/pbspro/template.py b/parsl/providers/pbspro/template.py\n--- a/parsl/providers/pbspro/template.py\n+++ b/parsl/providers/pbspro/template.py\n@@ -4,7 +4,7 @@\n #PBS -N ${jobname}\n #PBS -m n\n #PBS -l walltime=$walltime\n-#PBS -l select=${nodes_per_block}:ncpus=${ncpus}\n+#PBS -l select=${nodes_per_block}:ncpus=${ncpus}${select_options}\n #PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n #PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n ${scheduler_options}\n", "issue": "PBSPro scheduler options ignored\n**Describe the bug**\r\nThe PBSPro provider template adds `${scheduler_options}` to the bottom of the list of #PBS commands. However, PBSPro seems to only consider the first #PBS option and ignore any later competing ones. This means specifying a new select option with gpus is superseded by the default one and ignored. We can resolve this by moving the user defined scheduler options to the top of the #PBS list in the template.\r\n\r\n**To Reproduce**\r\nUse the PBSPro provider (e.g., at ALCF's edge testbed for Polaris) and try to specify a new select option:`-l select=1:ncpus=32:ngpus=1`. These options are ignored in favor of the default specified above in the template.\r\n\r\n**Expected behavior**\r\nI would like scheduler_options to be put first so they can be used to enter specific options.\r\n\r\n**Environment**\r\n - Parsl 1.2.0\r\n\r\n**Distributed Environment**\r\n- Where are you running the Parsl script from ? ALCF\r\n- Where do you need the workers to run ? Compute nodes\r\n\n", "before_files": [{"content": "template_string = '''#!/bin/bash\n\n#PBS -S /bin/bash\n#PBS -N ${jobname}\n#PBS -m n\n#PBS -l walltime=$walltime\n#PBS -l select=${nodes_per_block}:ncpus=${ncpus}\n#PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n#PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n${scheduler_options}\n\n${worker_init}\n\nexport JOBNAME=\"${jobname}\"\n\n${user_script}\n\n'''\n", "path": "parsl/providers/pbspro/template.py"}, {"content": "import logging\nimport os\nimport time\n\nfrom parsl.channels import LocalChannel\nfrom parsl.launchers import SingleNodeLauncher\nfrom parsl.providers.pbspro.template import template_string\nfrom parsl.providers import TorqueProvider\nfrom parsl.providers.provider_base import JobState, JobStatus\n\nlogger = logging.getLogger(__name__)\n\n\nclass PBSProProvider(TorqueProvider):\n \"\"\"PBS Pro Execution Provider\n\n Parameters\n ----------\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n account : str\n Account the job will be charged against.\n queue : str\n Queue to request blocks from.\n nodes_per_block : int\n Nodes to provision per block.\n cpus_per_node : int\n CPUs to provision per node.\n init_blocks : int\n Number of blocks to provision at the start of the run. Default is 1.\n min_blocks : int\n Minimum number of blocks to maintain. Default is 0.\n max_blocks : int\n Maximum number of blocks to maintain.\n parallelism : float\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n scaling where as many resources as possible are used; parallelism close to 0 represents\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n walltime : str\n Walltime requested per block in HH:MM:SS.\n scheduler_options : str\n String to prepend to the #PBS blocks in the submit script to the scheduler.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n launcher : Launcher\n Launcher for this provider. The default is\n :class:`~parsl.launchers.SingleNodeLauncher`.\n \"\"\"\n def __init__(self,\n channel=LocalChannel(),\n account=None,\n queue=None,\n scheduler_options='',\n worker_init='',\n nodes_per_block=1,\n cpus_per_node=1,\n init_blocks=1,\n min_blocks=0,\n max_blocks=1,\n parallelism=1,\n launcher=SingleNodeLauncher(),\n walltime=\"00:20:00\",\n cmd_timeout=120):\n super().__init__(channel,\n account,\n queue,\n scheduler_options,\n worker_init,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n launcher,\n walltime,\n cmd_timeout=cmd_timeout)\n\n self.template_string = template_string\n self._label = 'pbspro'\n self.cpus_per_node = cpus_per_node\n\n def submit(self, command, tasks_per_node, job_name=\"parsl\"):\n \"\"\"Submits the command job.\n\n Parameters\n ----------\n command : str\n Command to be executed on the remote side.\n tasks_per_node : int\n Command invocations to be launched per node.\n job_name : str\n Identifier for job.\n\n Returns\n -------\n None\n If at capacity and cannot provision more\n job_id : str\n Identifier for the job\n \"\"\"\n\n job_name = \"{0}.{1}\".format(job_name, time.time())\n\n script_path = os.path.abspath(\"{0}/{1}.submit\".format(self.script_dir, job_name))\n\n logger.debug(\"Requesting {} nodes_per_block, {} tasks_per_node\".format(\n self.nodes_per_block, tasks_per_node)\n )\n\n job_config = {}\n job_config[\"submit_script_dir\"] = self.channel.script_dir\n job_config[\"nodes_per_block\"] = self.nodes_per_block\n job_config[\"ncpus\"] = self.cpus_per_node\n job_config[\"walltime\"] = self.walltime\n job_config[\"scheduler_options\"] = self.scheduler_options\n job_config[\"worker_init\"] = self.worker_init\n job_config[\"user_script\"] = command\n\n # Wrap the command\n job_config[\"user_script\"] = self.launcher(command,\n tasks_per_node,\n self.nodes_per_block)\n\n logger.debug(\"Writing submit script\")\n self._write_submit_script(self.template_string, script_path, job_name, job_config)\n\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n\n submit_options = ''\n if self.queue is not None:\n submit_options = '{0} -q {1}'.format(submit_options, self.queue)\n if self.account is not None:\n submit_options = '{0} -A {1}'.format(submit_options, self.account)\n\n launch_cmd = \"qsub {0} {1}\".format(submit_options, channel_script_path)\n retcode, stdout, stderr = self.execute_wait(launch_cmd)\n\n job_id = None\n if retcode == 0:\n for line in stdout.split('\\n'):\n if line.strip():\n job_id = line.strip()\n self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}\n else:\n message = \"Command '{}' failed with return code {}\".format(launch_cmd, retcode)\n if (stdout is not None) and (stderr is not None):\n message += \"\\nstderr:{}\\nstdout{}\".format(stderr.strip(), stdout.strip())\n logger.error(message)\n\n return job_id\n\n @property\n def status_polling_interval(self):\n return 60\n", "path": "parsl/providers/pbspro/pbspro.py"}], "after_files": [{"content": "template_string = '''#!/bin/bash\n\n#PBS -S /bin/bash\n#PBS -N ${jobname}\n#PBS -m n\n#PBS -l walltime=$walltime\n#PBS -l select=${nodes_per_block}:ncpus=${ncpus}${select_options}\n#PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n#PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n${scheduler_options}\n\n${worker_init}\n\nexport JOBNAME=\"${jobname}\"\n\n${user_script}\n\n'''\n", "path": "parsl/providers/pbspro/template.py"}, {"content": "import logging\nimport os\nimport time\n\nfrom parsl.channels import LocalChannel\nfrom parsl.launchers import SingleNodeLauncher\nfrom parsl.providers.pbspro.template import template_string\nfrom parsl.providers import TorqueProvider\nfrom parsl.providers.provider_base import JobState, JobStatus\n\nlogger = logging.getLogger(__name__)\n\n\nclass PBSProProvider(TorqueProvider):\n \"\"\"PBS Pro Execution Provider\n\n Parameters\n ----------\n channel : Channel\n Channel for accessing this provider. Possible channels include\n :class:`~parsl.channels.LocalChannel` (the default),\n :class:`~parsl.channels.SSHChannel`, or\n :class:`~parsl.channels.SSHInteractiveLoginChannel`.\n account : str\n Account the job will be charged against.\n queue : str\n Queue to request blocks from.\n nodes_per_block : int\n Nodes to provision per block.\n cpus_per_node : int\n CPUs to provision per node.\n init_blocks : int\n Number of blocks to provision at the start of the run. Default is 1.\n min_blocks : int\n Minimum number of blocks to maintain. Default is 0.\n max_blocks : int\n Maximum number of blocks to maintain.\n parallelism : float\n Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive\n scaling where as many resources as possible are used; parallelism close to 0 represents\n the opposite situation in which as few resources as possible (i.e., min_blocks) are used.\n walltime : str\n Walltime requested per block in HH:MM:SS.\n scheduler_options : str\n String to prepend to the #PBS blocks in the submit script to the scheduler.\n select_options : str\n String to append to the #PBS -l select block in the submit script to the scheduler. This can be used to\n specify ngpus.\n worker_init : str\n Command to be run before starting a worker, such as 'module load Anaconda; source activate env'.\n launcher : Launcher\n Launcher for this provider. The default is\n :class:`~parsl.launchers.SingleNodeLauncher`.\n \"\"\"\n def __init__(self,\n channel=LocalChannel(),\n account=None,\n queue=None,\n scheduler_options='',\n select_options='',\n worker_init='',\n nodes_per_block=1,\n cpus_per_node=1,\n init_blocks=1,\n min_blocks=0,\n max_blocks=1,\n parallelism=1,\n launcher=SingleNodeLauncher(),\n walltime=\"00:20:00\",\n cmd_timeout=120):\n super().__init__(channel,\n account,\n queue,\n scheduler_options,\n worker_init,\n nodes_per_block,\n init_blocks,\n min_blocks,\n max_blocks,\n parallelism,\n launcher,\n walltime,\n cmd_timeout=cmd_timeout)\n\n self.template_string = template_string\n self._label = 'pbspro'\n self.cpus_per_node = cpus_per_node\n self.select_options = select_options\n\n def submit(self, command, tasks_per_node, job_name=\"parsl\"):\n \"\"\"Submits the command job.\n\n Parameters\n ----------\n command : str\n Command to be executed on the remote side.\n tasks_per_node : int\n Command invocations to be launched per node.\n job_name : str\n Identifier for job.\n\n Returns\n -------\n None\n If at capacity and cannot provision more\n job_id : str\n Identifier for the job\n \"\"\"\n\n job_name = \"{0}.{1}\".format(job_name, time.time())\n\n script_path = os.path.abspath(\"{0}/{1}.submit\".format(self.script_dir, job_name))\n\n logger.debug(\"Requesting {} nodes_per_block, {} tasks_per_node\".format(\n self.nodes_per_block, tasks_per_node)\n )\n\n job_config = {}\n job_config[\"submit_script_dir\"] = self.channel.script_dir\n job_config[\"nodes_per_block\"] = self.nodes_per_block\n job_config[\"ncpus\"] = self.cpus_per_node\n job_config[\"walltime\"] = self.walltime\n job_config[\"scheduler_options\"] = self.scheduler_options\n job_config[\"worker_init\"] = self.worker_init\n job_config[\"user_script\"] = command\n\n # Add a colon to select_options if one isn't included\n if self.select_options and not self.select_options.startswith(\":\"):\n self.select_options = \":\" + self.select_options\n\n job_config[\"select_options\"] = self.select_options\n\n # Wrap the command\n job_config[\"user_script\"] = self.launcher(command,\n tasks_per_node,\n self.nodes_per_block)\n\n logger.debug(\"Writing submit script\")\n self._write_submit_script(self.template_string, script_path, job_name, job_config)\n\n channel_script_path = self.channel.push_file(script_path, self.channel.script_dir)\n\n submit_options = ''\n if self.queue is not None:\n submit_options = '{0} -q {1}'.format(submit_options, self.queue)\n if self.account is not None:\n submit_options = '{0} -A {1}'.format(submit_options, self.account)\n\n launch_cmd = \"qsub {0} {1}\".format(submit_options, channel_script_path)\n retcode, stdout, stderr = self.execute_wait(launch_cmd)\n\n job_id = None\n if retcode == 0:\n for line in stdout.split('\\n'):\n if line.strip():\n job_id = line.strip()\n self.resources[job_id] = {'job_id': job_id, 'status': JobStatus(JobState.PENDING)}\n else:\n message = \"Command '{}' failed with return code {}\".format(launch_cmd, retcode)\n if (stdout is not None) and (stderr is not None):\n message += \"\\nstderr:{}\\nstdout{}\".format(stderr.strip(), stdout.strip())\n logger.error(message)\n\n return job_id\n\n @property\n def status_polling_interval(self):\n return 60\n", "path": "parsl/providers/pbspro/pbspro.py"}]} | 2,224 | 569 |
gh_patches_debug_18321 | rasdani/github-patches | git_diff | crytic__slither-2394 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
filter `name-reused` to only run on Truffle projects
The detector should check which platform was used with https://crytic.github.io/crytic-compile/crytic_compile/crytic_compile.html#CryticCompile.platform and https://crytic.github.io/slither/slither/core/compilation_unit.html#SlitherCompilationUnit.crytic_compile
https://github.com/crytic/slither/blob/13d7d9f66a6be4f798478fa3735fb63444b46c3d/slither/detectors/slither/name_reused.py#L51-L61
https://github.com/crytic/crytic-compile/blob/b5c538aaa66be44b7a68d9723881a7eba2c20898/crytic_compile/platform/truffle.py#L83-L90
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/detectors/slither/name_reused.py`
Content:
```
1 from collections import defaultdict
2 from typing import List
3
4 from slither.core.compilation_unit import SlitherCompilationUnit
5 from slither.core.declarations import Contract
6 from slither.detectors.abstract_detector import (
7 AbstractDetector,
8 DetectorClassification,
9 DETECTOR_INFO,
10 )
11 from slither.utils.output import Output
12
13
14 def _find_missing_inheritance(compilation_unit: SlitherCompilationUnit) -> List[Contract]:
15 """
16 Filter contracts with missing inheritance to return only the "most base" contracts
17 in the inheritance tree.
18 :param slither:
19 :return:
20 """
21 missings = compilation_unit.contracts_with_missing_inheritance
22
23 ret = []
24 for b in missings:
25 is_most_base = True
26 for inheritance in b.immediate_inheritance:
27 if inheritance in missings:
28 is_most_base = False
29 if is_most_base:
30 ret.append(b)
31
32 return ret
33
34
35 class NameReused(AbstractDetector):
36 ARGUMENT = "name-reused"
37 HELP = "Contract's name reused"
38 IMPACT = DetectorClassification.HIGH
39 CONFIDENCE = DetectorClassification.HIGH
40
41 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#name-reused"
42
43 WIKI_TITLE = "Name reused"
44
45 # region wiki_description
46 WIKI_DESCRIPTION = """If a codebase has two contracts the similar names, the compilation artifacts
47 will not contain one of the contracts with the duplicate name."""
48 # endregion wiki_description
49
50 # region wiki_exploit_scenario
51 WIKI_EXPLOIT_SCENARIO = """
52 Bob's `truffle` codebase has two contracts named `ERC20`.
53 When `truffle compile` runs, only one of the two contracts will generate artifacts in `build/contracts`.
54 As a result, the second contract cannot be analyzed.
55 """
56 # endregion wiki_exploit_scenario
57
58 WIKI_RECOMMENDATION = "Rename the contract."
59
60 # pylint: disable=too-many-locals,too-many-branches
61 def _detect(self) -> List[Output]:
62 results = []
63 compilation_unit = self.compilation_unit
64
65 all_contracts = compilation_unit.contracts
66 all_contracts_name = [c.name for c in all_contracts]
67 contracts_name_reused = {
68 contract for contract in all_contracts_name if all_contracts_name.count(contract) > 1
69 }
70
71 names_reused = {
72 name: compilation_unit.get_contract_from_name(name) for name in contracts_name_reused
73 }
74
75 # First show the contracts that we know are missing
76 incorrectly_constructed = [
77 contract
78 for contract in compilation_unit.contracts
79 if contract.is_incorrectly_constructed
80 ]
81
82 inheritance_corrupted = defaultdict(list)
83 for contract in incorrectly_constructed:
84 for father in contract.inheritance:
85 inheritance_corrupted[father.name].append(contract)
86
87 for contract_name, files in names_reused.items():
88 info: DETECTOR_INFO = [contract_name, " is re-used:\n"]
89 for file in files:
90 if file is None:
91 info += ["\t- In an file not found, most likely in\n"]
92 else:
93 info += ["\t- ", file, "\n"]
94
95 if contract_name in inheritance_corrupted:
96 info += ["\tAs a result, the inherited contracts are not correctly analyzed:\n"]
97 for corrupted in inheritance_corrupted[contract_name]:
98 info += ["\t\t- ", corrupted, "\n"]
99 res = self.generate_result(info)
100 results.append(res)
101
102 # Then show the contracts for which one of the father was not found
103 # Here we are not able to know
104 most_base_with_missing_inheritance = _find_missing_inheritance(compilation_unit)
105
106 for b in most_base_with_missing_inheritance:
107 info = [b, " inherits from a contract for which the name is reused.\n"]
108 if b.inheritance:
109 info += ["\t- Slither could not determine which contract has a duplicate name:\n"]
110 for inheritance in b.inheritance:
111 info += ["\t\t-", inheritance, "\n"]
112 info += ["\t- Check if:\n"]
113 info += ["\t\t- A inherited contract is missing from this list,\n"]
114 info += ["\t\t- The contract are imported from the correct files.\n"]
115 if b.derived_contracts:
116 info += [f"\t- This issue impacts the contracts inheriting from {b.name}:\n"]
117 for derived in b.derived_contracts:
118 info += ["\t\t-", derived, "\n"]
119 res = self.generate_result(info)
120 results.append(res)
121 return results
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/slither/detectors/slither/name_reused.py b/slither/detectors/slither/name_reused.py
--- a/slither/detectors/slither/name_reused.py
+++ b/slither/detectors/slither/name_reused.py
@@ -1,6 +1,8 @@
from collections import defaultdict
from typing import List
+from crytic_compile.platform import Type as PlatformType
+
from slither.core.compilation_unit import SlitherCompilationUnit
from slither.core.declarations import Contract
from slither.detectors.abstract_detector import (
@@ -61,6 +63,8 @@
def _detect(self) -> List[Output]:
results = []
compilation_unit = self.compilation_unit
+ if compilation_unit.core.crytic_compile.platform != PlatformType.TRUFFLE:
+ return []
all_contracts = compilation_unit.contracts
all_contracts_name = [c.name for c in all_contracts]
| {"golden_diff": "diff --git a/slither/detectors/slither/name_reused.py b/slither/detectors/slither/name_reused.py\n--- a/slither/detectors/slither/name_reused.py\n+++ b/slither/detectors/slither/name_reused.py\n@@ -1,6 +1,8 @@\n from collections import defaultdict\n from typing import List\n \n+from crytic_compile.platform import Type as PlatformType\n+\n from slither.core.compilation_unit import SlitherCompilationUnit\n from slither.core.declarations import Contract\n from slither.detectors.abstract_detector import (\n@@ -61,6 +63,8 @@\n def _detect(self) -> List[Output]:\n results = []\n compilation_unit = self.compilation_unit\n+ if compilation_unit.core.crytic_compile.platform != PlatformType.TRUFFLE:\n+ return []\n \n all_contracts = compilation_unit.contracts\n all_contracts_name = [c.name for c in all_contracts]\n", "issue": "filter `name-reused` to only run on Truffle projects\nThe detector should check which platform was used with https://crytic.github.io/crytic-compile/crytic_compile/crytic_compile.html#CryticCompile.platform and https://crytic.github.io/slither/slither/core/compilation_unit.html#SlitherCompilationUnit.crytic_compile \r\nhttps://github.com/crytic/slither/blob/13d7d9f66a6be4f798478fa3735fb63444b46c3d/slither/detectors/slither/name_reused.py#L51-L61\r\n\r\nhttps://github.com/crytic/crytic-compile/blob/b5c538aaa66be44b7a68d9723881a7eba2c20898/crytic_compile/platform/truffle.py#L83-L90\n", "before_files": [{"content": "from collections import defaultdict\nfrom typing import List\n\nfrom slither.core.compilation_unit import SlitherCompilationUnit\nfrom slither.core.declarations import Contract\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.utils.output import Output\n\n\ndef _find_missing_inheritance(compilation_unit: SlitherCompilationUnit) -> List[Contract]:\n \"\"\"\n Filter contracts with missing inheritance to return only the \"most base\" contracts\n in the inheritance tree.\n :param slither:\n :return:\n \"\"\"\n missings = compilation_unit.contracts_with_missing_inheritance\n\n ret = []\n for b in missings:\n is_most_base = True\n for inheritance in b.immediate_inheritance:\n if inheritance in missings:\n is_most_base = False\n if is_most_base:\n ret.append(b)\n\n return ret\n\n\nclass NameReused(AbstractDetector):\n ARGUMENT = \"name-reused\"\n HELP = \"Contract's name reused\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#name-reused\"\n\n WIKI_TITLE = \"Name reused\"\n\n # region wiki_description\n WIKI_DESCRIPTION = \"\"\"If a codebase has two contracts the similar names, the compilation artifacts\nwill not contain one of the contracts with the duplicate name.\"\"\"\n # endregion wiki_description\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\nBob's `truffle` codebase has two contracts named `ERC20`.\nWhen `truffle compile` runs, only one of the two contracts will generate artifacts in `build/contracts`.\nAs a result, the second contract cannot be analyzed.\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Rename the contract.\"\n\n # pylint: disable=too-many-locals,too-many-branches\n def _detect(self) -> List[Output]:\n results = []\n compilation_unit = self.compilation_unit\n\n all_contracts = compilation_unit.contracts\n all_contracts_name = [c.name for c in all_contracts]\n contracts_name_reused = {\n contract for contract in all_contracts_name if all_contracts_name.count(contract) > 1\n }\n\n names_reused = {\n name: compilation_unit.get_contract_from_name(name) for name in contracts_name_reused\n }\n\n # First show the contracts that we know are missing\n incorrectly_constructed = [\n contract\n for contract in compilation_unit.contracts\n if contract.is_incorrectly_constructed\n ]\n\n inheritance_corrupted = defaultdict(list)\n for contract in incorrectly_constructed:\n for father in contract.inheritance:\n inheritance_corrupted[father.name].append(contract)\n\n for contract_name, files in names_reused.items():\n info: DETECTOR_INFO = [contract_name, \" is re-used:\\n\"]\n for file in files:\n if file is None:\n info += [\"\\t- In an file not found, most likely in\\n\"]\n else:\n info += [\"\\t- \", file, \"\\n\"]\n\n if contract_name in inheritance_corrupted:\n info += [\"\\tAs a result, the inherited contracts are not correctly analyzed:\\n\"]\n for corrupted in inheritance_corrupted[contract_name]:\n info += [\"\\t\\t- \", corrupted, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n\n # Then show the contracts for which one of the father was not found\n # Here we are not able to know\n most_base_with_missing_inheritance = _find_missing_inheritance(compilation_unit)\n\n for b in most_base_with_missing_inheritance:\n info = [b, \" inherits from a contract for which the name is reused.\\n\"]\n if b.inheritance:\n info += [\"\\t- Slither could not determine which contract has a duplicate name:\\n\"]\n for inheritance in b.inheritance:\n info += [\"\\t\\t-\", inheritance, \"\\n\"]\n info += [\"\\t- Check if:\\n\"]\n info += [\"\\t\\t- A inherited contract is missing from this list,\\n\"]\n info += [\"\\t\\t- The contract are imported from the correct files.\\n\"]\n if b.derived_contracts:\n info += [f\"\\t- This issue impacts the contracts inheriting from {b.name}:\\n\"]\n for derived in b.derived_contracts:\n info += [\"\\t\\t-\", derived, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n return results\n", "path": "slither/detectors/slither/name_reused.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom typing import List\n\nfrom crytic_compile.platform import Type as PlatformType\n\nfrom slither.core.compilation_unit import SlitherCompilationUnit\nfrom slither.core.declarations import Contract\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.utils.output import Output\n\n\ndef _find_missing_inheritance(compilation_unit: SlitherCompilationUnit) -> List[Contract]:\n \"\"\"\n Filter contracts with missing inheritance to return only the \"most base\" contracts\n in the inheritance tree.\n :param slither:\n :return:\n \"\"\"\n missings = compilation_unit.contracts_with_missing_inheritance\n\n ret = []\n for b in missings:\n is_most_base = True\n for inheritance in b.immediate_inheritance:\n if inheritance in missings:\n is_most_base = False\n if is_most_base:\n ret.append(b)\n\n return ret\n\n\nclass NameReused(AbstractDetector):\n ARGUMENT = \"name-reused\"\n HELP = \"Contract's name reused\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#name-reused\"\n\n WIKI_TITLE = \"Name reused\"\n\n # region wiki_description\n WIKI_DESCRIPTION = \"\"\"If a codebase has two contracts the similar names, the compilation artifacts\nwill not contain one of the contracts with the duplicate name.\"\"\"\n # endregion wiki_description\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\nBob's `truffle` codebase has two contracts named `ERC20`.\nWhen `truffle compile` runs, only one of the two contracts will generate artifacts in `build/contracts`.\nAs a result, the second contract cannot be analyzed.\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Rename the contract.\"\n\n # pylint: disable=too-many-locals,too-many-branches\n def _detect(self) -> List[Output]:\n results = []\n compilation_unit = self.compilation_unit\n if compilation_unit.core.crytic_compile.platform != PlatformType.TRUFFLE:\n return []\n\n all_contracts = compilation_unit.contracts\n all_contracts_name = [c.name for c in all_contracts]\n contracts_name_reused = {\n contract for contract in all_contracts_name if all_contracts_name.count(contract) > 1\n }\n\n names_reused = {\n name: compilation_unit.get_contract_from_name(name) for name in contracts_name_reused\n }\n\n # First show the contracts that we know are missing\n incorrectly_constructed = [\n contract\n for contract in compilation_unit.contracts\n if contract.is_incorrectly_constructed\n ]\n\n inheritance_corrupted = defaultdict(list)\n for contract in incorrectly_constructed:\n for father in contract.inheritance:\n inheritance_corrupted[father.name].append(contract)\n\n for contract_name, files in names_reused.items():\n info: DETECTOR_INFO = [contract_name, \" is re-used:\\n\"]\n for file in files:\n if file is None:\n info += [\"\\t- In an file not found, most likely in\\n\"]\n else:\n info += [\"\\t- \", file, \"\\n\"]\n\n if contract_name in inheritance_corrupted:\n info += [\"\\tAs a result, the inherited contracts are not correctly analyzed:\\n\"]\n for corrupted in inheritance_corrupted[contract_name]:\n info += [\"\\t\\t- \", corrupted, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n\n # Then show the contracts for which one of the father was not found\n # Here we are not able to know\n most_base_with_missing_inheritance = _find_missing_inheritance(compilation_unit)\n\n for b in most_base_with_missing_inheritance:\n info = [b, \" inherits from a contract for which the name is reused.\\n\"]\n if b.inheritance:\n info += [\"\\t- Slither could not determine which contract has a duplicate name:\\n\"]\n for inheritance in b.inheritance:\n info += [\"\\t\\t-\", inheritance, \"\\n\"]\n info += [\"\\t- Check if:\\n\"]\n info += [\"\\t\\t- A inherited contract is missing from this list,\\n\"]\n info += [\"\\t\\t- The contract are imported from the correct files.\\n\"]\n if b.derived_contracts:\n info += [f\"\\t- This issue impacts the contracts inheriting from {b.name}:\\n\"]\n for derived in b.derived_contracts:\n info += [\"\\t\\t-\", derived, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n return results\n", "path": "slither/detectors/slither/name_reused.py"}]} | 1,745 | 205 |
gh_patches_debug_16712 | rasdani/github-patches | git_diff | hydroshare__hydroshare-2263 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Metadata strings updated through hs_restclient are parsed incorrectly
When using the hs_restclient to update metadata for Generic and Composite resources (I haven't tested other types), string values are updated as a unicode string inside an array (e.g. [u'some_string'] ).
Here's an example code snippet:
`
put_data = {"title": "New Title", "description": "New Description"}
client.updateScienceMetadata('f44c00556cd847b98dd47f3a6279014d', put_data)
`
Two resources that show this issue:


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hs_core/views/resource_metadata_rest_api.py`
Content:
```
1 import logging
2
3 from rest_framework.response import Response
4 from rest_framework.exceptions import ValidationError
5 from rest_framework import status
6 from rest_framework import generics
7 from rest_framework import serializers
8
9 from hs_core import hydroshare
10 from hs_core.models import Contributor, CoreMetaData, Coverage, Creator, Date, \
11 ExternalProfileLink, Format, FundingAgency, Identifier, Subject, Source, Relation
12 from hs_core.views import utils as view_utils
13 from hs_core.views.utils import ACTION_TO_AUTHORIZE
14
15 logger = logging.getLogger(__name__)
16
17
18 class ExternalProfileLinkSerializer(serializers.Serializer):
19 type = serializers.CharField(required=False)
20 url = serializers.URLField(required=False)
21 object_id = serializers.IntegerField(required=False)
22 # content_type = models.ForeignKey(ContentType)
23 # content_object = GenericForeignKey('content_type', 'object_id')
24
25 class Meta:
26 model = ExternalProfileLink
27
28
29 class PartySerializer(serializers.Serializer):
30 name = serializers.CharField()
31 description = serializers.URLField(required=False)
32 organization = serializers.CharField(required=False)
33 email = serializers.EmailField(required=False)
34 address = serializers.CharField(required=False)
35 phone = serializers.CharField(required=False)
36 homepage = serializers.URLField(required=False)
37 external_links = serializers = ExternalProfileLinkSerializer(required=False, many=True)
38
39 class Meta:
40 model = Creator
41 fields = {'name', 'description', 'organization', 'email',
42 'address', 'phone', 'homepage', 'external_links'}
43
44
45 class CreatorSerializer(PartySerializer):
46 order = serializers.IntegerField(required=False)
47
48 class Meta:
49 model = Contributor
50
51
52 class DateSerializer(serializers.Serializer):
53 # term = 'Date'
54 type = serializers.CharField(required=False)
55 start_date = serializers.DateTimeField(required=False)
56 end_date = serializers.DateTimeField(required=False)
57
58 class Meta:
59 model = Date
60
61
62 class CoverageSerializer(serializers.Serializer):
63 type = serializers.CharField(required=False)
64 value = serializers.SerializerMethodField(required=False)
65
66 class Meta:
67 model = Coverage
68
69 def get_value(self, obj):
70 return obj.value
71
72
73 class FormatSerializer(serializers.Serializer):
74 value = serializers.CharField(required=False)
75
76 class Meta:
77 model = Format
78
79
80 class FundingAgencySerializer(serializers.Serializer):
81 agency_name = serializers.CharField()
82 award_title = serializers.CharField(required=False)
83 award_number = serializers.CharField(required=False)
84 agency_url = serializers.URLField(required=False)
85
86 class Meta:
87 model = FundingAgency
88
89
90 class IdentifierSerializer(serializers.Serializer):
91 name = serializers.CharField(required=False)
92 url = serializers.URLField(required=False)
93
94 class Meta:
95 model = Identifier
96
97
98 class SubjectSerializer(serializers.Serializer):
99 value = serializers.CharField(required=False)
100
101 class Meta:
102 model = Subject
103
104
105 class SourceSerializer(serializers.Serializer):
106 derived_from = serializers.CharField(required=False)
107
108 class Meta:
109 model = Source
110
111
112 class RelationSerializer(serializers.Serializer):
113 type = serializers.CharField(required=False)
114 value = serializers.CharField(required=False)
115
116 class Meta:
117 model = Relation
118
119
120 class CoreMetaDataSerializer(serializers.Serializer):
121 title = serializers.CharField(required=False)
122 creators = CreatorSerializer(required=False, many=True)
123 contributors = PartySerializer(required=False, many=True)
124 coverages = CoverageSerializer(required=False, many=True)
125 dates = DateSerializer(required=False, many=True)
126 description = serializers.CharField(required=False)
127 formats = FormatSerializer(required=False, many=True)
128 funding_agencies = FundingAgencySerializer(required=False, many=True)
129 identifiers = IdentifierSerializer(required=False, many=True)
130 language = serializers.CharField(required=False)
131 rights = serializers.CharField(required=False)
132 type = serializers.CharField(required=False)
133 publisher = serializers.CharField(required=False)
134 sources = SourceSerializer(required=False, many=True)
135 subjects = SubjectSerializer(required=False, many=True)
136 relations = RelationSerializer(required=False, many=True)
137
138 class Meta:
139 model = CoreMetaData
140
141
142 class MetadataElementsRetrieveUpdate(generics.RetrieveUpdateDestroyAPIView):
143 """
144 Retrieve resource science (Dublin Core) metadata
145
146 REST URL: /hsapi/resource/{pk}/scimeta/elements/
147 HTTP method: GET
148
149 :type pk: str
150 :param pk: id of the resource
151 :return: resource science metadata as JSON document
152 :rtype: str
153 :raises:
154 NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}
155 PermissionDenied: return json format: {'detail': 'You do not have permission to perform
156 this action.'}
157
158 REST URL: /hsapi/resource/{pk}/scimeta/elements/
159 HTTP method: PUT
160
161 :type pk: str
162 :param pk: id of the resource
163 :type request: JSON formatted string
164 :param request: resource metadata
165 :return: updated resource science metadata as JSON document
166 :rtype: str
167 :raises:
168 NotFound: return json format: {'detail': 'No resource was found for resource id':pk}
169 PermissionDenied: return json format: {'detail': 'You do not have permission to perform
170 this action.'}
171 ValidationError: return json format: {parameter-1': ['error message-1'],
172 'parameter-2': ['error message-2'], .. }
173 """
174 ACCEPT_FORMATS = ('application/json',)
175
176 allowed_methods = ('GET', 'PUT')
177
178 serializer_class = CoreMetaDataSerializer
179
180 def get(self, request, pk):
181 view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)
182 resource = hydroshare.get_resource_by_shortkey(shortkey=pk)
183 serializer = resource.metadata.serializer
184 return Response(data=serializer.data, status=status.HTTP_200_OK)
185
186 def put(self, request, pk):
187 # Update science metadata
188 resource, _, _ = view_utils.authorize(
189 request, pk,
190 needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
191
192 metadata = []
193 put_data = request.data.copy()
194
195 try:
196 resource.metadata.parse_for_bulk_update(put_data, metadata)
197 hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)
198 except Exception as ex:
199 error_msg = {
200 'resource': "Resource metadata update failed: %s, %s"
201 % (ex.__class__, ex.message)
202 }
203 raise ValidationError(detail=error_msg)
204
205 resource = hydroshare.get_resource_by_shortkey(shortkey=pk)
206 serializer = resource.metadata.serializer
207 return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hs_core/views/resource_metadata_rest_api.py b/hs_core/views/resource_metadata_rest_api.py
--- a/hs_core/views/resource_metadata_rest_api.py
+++ b/hs_core/views/resource_metadata_rest_api.py
@@ -1,5 +1,7 @@
import logging
+from django.http import QueryDict
+
from rest_framework.response import Response
from rest_framework.exceptions import ValidationError
from rest_framework import status
@@ -192,6 +194,10 @@
metadata = []
put_data = request.data.copy()
+ # convert the QueryDict to dict
+ if isinstance(put_data, QueryDict):
+ put_data = put_data.dict()
+
try:
resource.metadata.parse_for_bulk_update(put_data, metadata)
hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)
| {"golden_diff": "diff --git a/hs_core/views/resource_metadata_rest_api.py b/hs_core/views/resource_metadata_rest_api.py\n--- a/hs_core/views/resource_metadata_rest_api.py\n+++ b/hs_core/views/resource_metadata_rest_api.py\n@@ -1,5 +1,7 @@\n import logging\n \n+from django.http import QueryDict\n+\n from rest_framework.response import Response\n from rest_framework.exceptions import ValidationError\n from rest_framework import status\n@@ -192,6 +194,10 @@\n metadata = []\n put_data = request.data.copy()\n \n+ # convert the QueryDict to dict\n+ if isinstance(put_data, QueryDict):\n+ put_data = put_data.dict()\n+\n try:\n resource.metadata.parse_for_bulk_update(put_data, metadata)\n hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)\n", "issue": "Metadata strings updated through hs_restclient are parsed incorrectly\nWhen using the hs_restclient to update metadata for Generic and Composite resources (I haven't tested other types), string values are updated as a unicode string inside an array (e.g. [u'some_string'] ). \r\n\r\nHere's an example code snippet:\r\n`\r\nput_data = {\"title\": \"New Title\", \"description\": \"New Description\"}\r\nclient.updateScienceMetadata('f44c00556cd847b98dd47f3a6279014d', put_data)\r\n`\r\n\r\nTwo resources that show this issue:\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework import status\nfrom rest_framework import generics\nfrom rest_framework import serializers\n\nfrom hs_core import hydroshare\nfrom hs_core.models import Contributor, CoreMetaData, Coverage, Creator, Date, \\\n ExternalProfileLink, Format, FundingAgency, Identifier, Subject, Source, Relation\nfrom hs_core.views import utils as view_utils\nfrom hs_core.views.utils import ACTION_TO_AUTHORIZE\n\nlogger = logging.getLogger(__name__)\n\n\nclass ExternalProfileLinkSerializer(serializers.Serializer):\n type = serializers.CharField(required=False)\n url = serializers.URLField(required=False)\n object_id = serializers.IntegerField(required=False)\n # content_type = models.ForeignKey(ContentType)\n # content_object = GenericForeignKey('content_type', 'object_id')\n\n class Meta:\n model = ExternalProfileLink\n\n\nclass PartySerializer(serializers.Serializer):\n name = serializers.CharField()\n description = serializers.URLField(required=False)\n organization = serializers.CharField(required=False)\n email = serializers.EmailField(required=False)\n address = serializers.CharField(required=False)\n phone = serializers.CharField(required=False)\n homepage = serializers.URLField(required=False)\n external_links = serializers = ExternalProfileLinkSerializer(required=False, many=True)\n\n class Meta:\n model = Creator\n fields = {'name', 'description', 'organization', 'email',\n 'address', 'phone', 'homepage', 'external_links'}\n\n\nclass CreatorSerializer(PartySerializer):\n order = serializers.IntegerField(required=False)\n\n class Meta:\n model = Contributor\n\n\nclass DateSerializer(serializers.Serializer):\n # term = 'Date'\n type = serializers.CharField(required=False)\n start_date = serializers.DateTimeField(required=False)\n end_date = serializers.DateTimeField(required=False)\n\n class Meta:\n model = Date\n\n\nclass CoverageSerializer(serializers.Serializer):\n type = serializers.CharField(required=False)\n value = serializers.SerializerMethodField(required=False)\n\n class Meta:\n model = Coverage\n\n def get_value(self, obj):\n return obj.value\n\n\nclass FormatSerializer(serializers.Serializer):\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Format\n\n\nclass FundingAgencySerializer(serializers.Serializer):\n agency_name = serializers.CharField()\n award_title = serializers.CharField(required=False)\n award_number = serializers.CharField(required=False)\n agency_url = serializers.URLField(required=False)\n\n class Meta:\n model = FundingAgency\n\n\nclass IdentifierSerializer(serializers.Serializer):\n name = serializers.CharField(required=False)\n url = serializers.URLField(required=False)\n\n class Meta:\n model = Identifier\n\n\nclass SubjectSerializer(serializers.Serializer):\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Subject\n\n\nclass SourceSerializer(serializers.Serializer):\n derived_from = serializers.CharField(required=False)\n\n class Meta:\n model = Source\n\n\nclass RelationSerializer(serializers.Serializer):\n type = serializers.CharField(required=False)\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Relation\n\n\nclass CoreMetaDataSerializer(serializers.Serializer):\n title = serializers.CharField(required=False)\n creators = CreatorSerializer(required=False, many=True)\n contributors = PartySerializer(required=False, many=True)\n coverages = CoverageSerializer(required=False, many=True)\n dates = DateSerializer(required=False, many=True)\n description = serializers.CharField(required=False)\n formats = FormatSerializer(required=False, many=True)\n funding_agencies = FundingAgencySerializer(required=False, many=True)\n identifiers = IdentifierSerializer(required=False, many=True)\n language = serializers.CharField(required=False)\n rights = serializers.CharField(required=False)\n type = serializers.CharField(required=False)\n publisher = serializers.CharField(required=False)\n sources = SourceSerializer(required=False, many=True)\n subjects = SubjectSerializer(required=False, many=True)\n relations = RelationSerializer(required=False, many=True)\n\n class Meta:\n model = CoreMetaData\n\n\nclass MetadataElementsRetrieveUpdate(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Retrieve resource science (Dublin Core) metadata\n\n REST URL: /hsapi/resource/{pk}/scimeta/elements/\n HTTP method: GET\n\n :type pk: str\n :param pk: id of the resource\n :return: resource science metadata as JSON document\n :rtype: str\n :raises:\n NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}\n PermissionDenied: return json format: {'detail': 'You do not have permission to perform\n this action.'}\n\n REST URL: /hsapi/resource/{pk}/scimeta/elements/\n HTTP method: PUT\n\n :type pk: str\n :param pk: id of the resource\n :type request: JSON formatted string\n :param request: resource metadata\n :return: updated resource science metadata as JSON document\n :rtype: str\n :raises:\n NotFound: return json format: {'detail': 'No resource was found for resource id':pk}\n PermissionDenied: return json format: {'detail': 'You do not have permission to perform\n this action.'}\n ValidationError: return json format: {parameter-1': ['error message-1'],\n 'parameter-2': ['error message-2'], .. }\n \"\"\"\n ACCEPT_FORMATS = ('application/json',)\n\n allowed_methods = ('GET', 'PUT')\n\n serializer_class = CoreMetaDataSerializer\n\n def get(self, request, pk):\n view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)\n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n serializer = resource.metadata.serializer\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n def put(self, request, pk):\n # Update science metadata\n resource, _, _ = view_utils.authorize(\n request, pk,\n needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)\n\n metadata = []\n put_data = request.data.copy()\n\n try:\n resource.metadata.parse_for_bulk_update(put_data, metadata)\n hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)\n except Exception as ex:\n error_msg = {\n 'resource': \"Resource metadata update failed: %s, %s\"\n % (ex.__class__, ex.message)\n }\n raise ValidationError(detail=error_msg)\n\n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n serializer = resource.metadata.serializer\n return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)\n", "path": "hs_core/views/resource_metadata_rest_api.py"}], "after_files": [{"content": "import logging\n\nfrom django.http import QueryDict\n\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework import status\nfrom rest_framework import generics\nfrom rest_framework import serializers\n\nfrom hs_core import hydroshare\nfrom hs_core.models import Contributor, CoreMetaData, Coverage, Creator, Date, \\\n ExternalProfileLink, Format, FundingAgency, Identifier, Subject, Source, Relation\nfrom hs_core.views import utils as view_utils\nfrom hs_core.views.utils import ACTION_TO_AUTHORIZE\n\nlogger = logging.getLogger(__name__)\n\n\nclass ExternalProfileLinkSerializer(serializers.Serializer):\n type = serializers.CharField(required=False)\n url = serializers.URLField(required=False)\n object_id = serializers.IntegerField(required=False)\n # content_type = models.ForeignKey(ContentType)\n # content_object = GenericForeignKey('content_type', 'object_id')\n\n class Meta:\n model = ExternalProfileLink\n\n\nclass PartySerializer(serializers.Serializer):\n name = serializers.CharField()\n description = serializers.URLField(required=False)\n organization = serializers.CharField(required=False)\n email = serializers.EmailField(required=False)\n address = serializers.CharField(required=False)\n phone = serializers.CharField(required=False)\n homepage = serializers.URLField(required=False)\n external_links = serializers = ExternalProfileLinkSerializer(required=False, many=True)\n\n class Meta:\n model = Creator\n fields = {'name', 'description', 'organization', 'email',\n 'address', 'phone', 'homepage', 'external_links'}\n\n\nclass CreatorSerializer(PartySerializer):\n order = serializers.IntegerField(required=False)\n\n class Meta:\n model = Contributor\n\n\nclass DateSerializer(serializers.Serializer):\n # term = 'Date'\n type = serializers.CharField(required=False)\n start_date = serializers.DateTimeField(required=False)\n end_date = serializers.DateTimeField(required=False)\n\n class Meta:\n model = Date\n\n\nclass CoverageSerializer(serializers.Serializer):\n type = serializers.CharField(required=False)\n value = serializers.SerializerMethodField(required=False)\n\n class Meta:\n model = Coverage\n\n def get_value(self, obj):\n return obj.value\n\n\nclass FormatSerializer(serializers.Serializer):\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Format\n\n\nclass FundingAgencySerializer(serializers.Serializer):\n agency_name = serializers.CharField()\n award_title = serializers.CharField(required=False)\n award_number = serializers.CharField(required=False)\n agency_url = serializers.URLField(required=False)\n\n class Meta:\n model = FundingAgency\n\n\nclass IdentifierSerializer(serializers.Serializer):\n name = serializers.CharField(required=False)\n url = serializers.URLField(required=False)\n\n class Meta:\n model = Identifier\n\n\nclass SubjectSerializer(serializers.Serializer):\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Subject\n\n\nclass SourceSerializer(serializers.Serializer):\n derived_from = serializers.CharField(required=False)\n\n class Meta:\n model = Source\n\n\nclass RelationSerializer(serializers.Serializer):\n type = serializers.CharField(required=False)\n value = serializers.CharField(required=False)\n\n class Meta:\n model = Relation\n\n\nclass CoreMetaDataSerializer(serializers.Serializer):\n title = serializers.CharField(required=False)\n creators = CreatorSerializer(required=False, many=True)\n contributors = PartySerializer(required=False, many=True)\n coverages = CoverageSerializer(required=False, many=True)\n dates = DateSerializer(required=False, many=True)\n description = serializers.CharField(required=False)\n formats = FormatSerializer(required=False, many=True)\n funding_agencies = FundingAgencySerializer(required=False, many=True)\n identifiers = IdentifierSerializer(required=False, many=True)\n language = serializers.CharField(required=False)\n rights = serializers.CharField(required=False)\n type = serializers.CharField(required=False)\n publisher = serializers.CharField(required=False)\n sources = SourceSerializer(required=False, many=True)\n subjects = SubjectSerializer(required=False, many=True)\n relations = RelationSerializer(required=False, many=True)\n\n class Meta:\n model = CoreMetaData\n\n\nclass MetadataElementsRetrieveUpdate(generics.RetrieveUpdateDestroyAPIView):\n \"\"\"\n Retrieve resource science (Dublin Core) metadata\n\n REST URL: /hsapi/resource/{pk}/scimeta/elements/\n HTTP method: GET\n\n :type pk: str\n :param pk: id of the resource\n :return: resource science metadata as JSON document\n :rtype: str\n :raises:\n NotFound: return json format: {'detail': 'No resource was found for resource id:pk'}\n PermissionDenied: return json format: {'detail': 'You do not have permission to perform\n this action.'}\n\n REST URL: /hsapi/resource/{pk}/scimeta/elements/\n HTTP method: PUT\n\n :type pk: str\n :param pk: id of the resource\n :type request: JSON formatted string\n :param request: resource metadata\n :return: updated resource science metadata as JSON document\n :rtype: str\n :raises:\n NotFound: return json format: {'detail': 'No resource was found for resource id':pk}\n PermissionDenied: return json format: {'detail': 'You do not have permission to perform\n this action.'}\n ValidationError: return json format: {parameter-1': ['error message-1'],\n 'parameter-2': ['error message-2'], .. }\n \"\"\"\n ACCEPT_FORMATS = ('application/json',)\n\n allowed_methods = ('GET', 'PUT')\n\n serializer_class = CoreMetaDataSerializer\n\n def get(self, request, pk):\n view_utils.authorize(request, pk, needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)\n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n serializer = resource.metadata.serializer\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n def put(self, request, pk):\n # Update science metadata\n resource, _, _ = view_utils.authorize(\n request, pk,\n needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)\n\n metadata = []\n put_data = request.data.copy()\n\n # convert the QueryDict to dict\n if isinstance(put_data, QueryDict):\n put_data = put_data.dict()\n\n try:\n resource.metadata.parse_for_bulk_update(put_data, metadata)\n hydroshare.update_science_metadata(pk=pk, metadata=metadata, user=request.user)\n except Exception as ex:\n error_msg = {\n 'resource': \"Resource metadata update failed: %s, %s\"\n % (ex.__class__, ex.message)\n }\n raise ValidationError(detail=error_msg)\n\n resource = hydroshare.get_resource_by_shortkey(shortkey=pk)\n serializer = resource.metadata.serializer\n return Response(data=serializer.data, status=status.HTTP_202_ACCEPTED)\n", "path": "hs_core/views/resource_metadata_rest_api.py"}]} | 2,424 | 183 |
gh_patches_debug_4452 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-3517 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken link
Hello,
I found that the link at [https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/core/google/cloud/client.py#L33][1] is broken.
https://google-cloud-python.readthedocs.io/en/latest/google-cloud-auth.html
[1]: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/661816540f1387bcc6e08b0fd722f4abae585b37/core/google/cloud/client.py#L33
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/google/cloud/client.py`
Content:
```
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Base classes for client used to interact with Google Cloud APIs."""
16
17 import io
18 import json
19 from pickle import PicklingError
20
21 import google.auth.credentials
22 from google.oauth2 import service_account
23 import google_auth_httplib2
24 import six
25
26 from google.cloud._helpers import _determine_default_project
27 from google.cloud.credentials import get_credentials
28
29
30 _GOOGLE_AUTH_CREDENTIALS_HELP = (
31 'This library only supports credentials from google-auth-library-python. '
32 'See https://google-cloud-python.readthedocs.io/en/latest/'
33 'google-cloud-auth.html for help on authentication with this library.'
34 )
35
36
37 class _ClientFactoryMixin(object):
38 """Mixin to allow factories that create credentials.
39
40 .. note::
41
42 This class is virtual.
43 """
44
45 _SET_PROJECT = False
46
47 @classmethod
48 def from_service_account_json(cls, json_credentials_path, *args, **kwargs):
49 """Factory to retrieve JSON credentials while creating client.
50
51 :type json_credentials_path: str
52 :param json_credentials_path: The path to a private key file (this file
53 was given to you when you created the
54 service account). This file must contain
55 a JSON object with a private key and
56 other credentials information (downloaded
57 from the Google APIs console).
58
59 :type args: tuple
60 :param args: Remaining positional arguments to pass to constructor.
61
62 :type kwargs: dict
63 :param kwargs: Remaining keyword arguments to pass to constructor.
64
65 :rtype: :class:`_ClientFactoryMixin`
66 :returns: The client created with the retrieved JSON credentials.
67 :raises: :class:`TypeError` if there is a conflict with the kwargs
68 and the credentials created by the factory.
69 """
70 if 'credentials' in kwargs:
71 raise TypeError('credentials must not be in keyword arguments')
72 with io.open(json_credentials_path, 'r', encoding='utf-8') as json_fi:
73 credentials_info = json.load(json_fi)
74 credentials = service_account.Credentials.from_service_account_info(
75 credentials_info)
76 if cls._SET_PROJECT:
77 if 'project' not in kwargs:
78 kwargs['project'] = credentials_info.get('project_id')
79
80 kwargs['credentials'] = credentials
81 return cls(*args, **kwargs)
82
83
84 class Client(_ClientFactoryMixin):
85 """Client to bundle configuration needed for API requests.
86
87 Stores ``credentials`` and an HTTP object so that subclasses
88 can pass them along to a connection class.
89
90 If no value is passed in for ``_http``, a :class:`httplib2.Http` object
91 will be created and authorized with the ``credentials``. If not, the
92 ``credentials`` and ``_http`` need not be related.
93
94 Callers and subclasses may seek to use the private key from
95 ``credentials`` to sign data.
96
97 A custom (non-``httplib2``) HTTP object must have a ``request`` method
98 which accepts the following arguments:
99
100 * ``uri``
101 * ``method``
102 * ``body``
103 * ``headers``
104
105 In addition, ``redirections`` and ``connection_type`` may be used.
106
107 A custom ``_http`` object will also need to be able to add a bearer token
108 to API requests and handle token refresh on 401 errors.
109
110 :type credentials: :class:`~google.auth.credentials.Credentials`
111 :param credentials: (Optional) The OAuth2 Credentials to use for this
112 client. If not passed (and if no ``_http`` object is
113 passed), falls back to the default inferred from the
114 environment.
115
116 :type _http: :class:`~httplib2.Http`
117 :param _http: (Optional) HTTP object to make requests. Can be any object
118 that defines ``request()`` with the same interface as
119 :meth:`~httplib2.Http.request`. If not passed, an
120 ``_http`` object is created that is bound to the
121 ``credentials`` for the current object.
122 This parameter should be considered private, and could
123 change in the future.
124 """
125
126 SCOPE = None
127 """The scopes required for authenticating with a service.
128
129 Needs to be set by subclasses.
130 """
131
132 def __init__(self, credentials=None, _http=None):
133 if (credentials is not None and
134 not isinstance(
135 credentials, google.auth.credentials.Credentials)):
136 raise ValueError(_GOOGLE_AUTH_CREDENTIALS_HELP)
137 if credentials is None and _http is None:
138 credentials = get_credentials()
139 self._credentials = google.auth.credentials.with_scopes_if_required(
140 credentials, self.SCOPE)
141 self._http_internal = _http
142
143 def __getstate__(self):
144 """Explicitly state that clients are not pickleable."""
145 raise PicklingError('\n'.join([
146 'Pickling client objects is explicitly not supported.',
147 'Clients have non-trivial state that is local and unpickleable.',
148 ]))
149
150 @property
151 def _http(self):
152 """Getter for object used for HTTP transport.
153
154 :rtype: :class:`~httplib2.Http`
155 :returns: An HTTP object.
156 """
157 if self._http_internal is None:
158 self._http_internal = google_auth_httplib2.AuthorizedHttp(
159 self._credentials)
160 return self._http_internal
161
162
163 class _ClientProjectMixin(object):
164 """Mixin to allow setting the project on the client.
165
166 :type project: str
167 :param project: the project which the client acts on behalf of. If not
168 passed falls back to the default inferred from the
169 environment.
170
171 :raises: :class:`EnvironmentError` if the project is neither passed in nor
172 set in the environment. :class:`ValueError` if the project value
173 is invalid.
174 """
175
176 def __init__(self, project=None):
177 project = self._determine_default(project)
178 if project is None:
179 raise EnvironmentError('Project was not passed and could not be '
180 'determined from the environment.')
181 if isinstance(project, six.binary_type):
182 project = project.decode('utf-8')
183 if not isinstance(project, six.string_types):
184 raise ValueError('Project must be a string.')
185 self.project = project
186
187 @staticmethod
188 def _determine_default(project):
189 """Helper: use default project detection."""
190 return _determine_default_project(project)
191
192
193 class ClientWithProject(Client, _ClientProjectMixin):
194 """Client that also stores a project.
195
196 :type project: str
197 :param project: the project which the client acts on behalf of. If not
198 passed falls back to the default inferred from the
199 environment.
200
201 :type credentials: :class:`~google.auth.credentials.Credentials`
202 :param credentials: (Optional) The OAuth2 Credentials to use for this
203 client. If not passed (and if no ``_http`` object is
204 passed), falls back to the default inferred from the
205 environment.
206
207 :type _http: :class:`~httplib2.Http`
208 :param _http: (Optional) HTTP object to make requests. Can be any object
209 that defines ``request()`` with the same interface as
210 :meth:`~httplib2.Http.request`. If not passed, an
211 ``_http`` object is created that is bound to the
212 ``credentials`` for the current object.
213 This parameter should be considered private, and could
214 change in the future.
215
216 :raises: :class:`ValueError` if the project is neither passed in nor
217 set in the environment.
218 """
219
220 _SET_PROJECT = True # Used by from_service_account_json()
221
222 def __init__(self, project=None, credentials=None, _http=None):
223 _ClientProjectMixin.__init__(self, project=project)
224 Client.__init__(self, credentials=credentials, _http=_http)
225
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/google/cloud/client.py b/core/google/cloud/client.py
--- a/core/google/cloud/client.py
+++ b/core/google/cloud/client.py
@@ -29,8 +29,8 @@
_GOOGLE_AUTH_CREDENTIALS_HELP = (
'This library only supports credentials from google-auth-library-python. '
- 'See https://google-cloud-python.readthedocs.io/en/latest/'
- 'google-cloud-auth.html for help on authentication with this library.'
+ 'See https://google-cloud-python.readthedocs.io/en/latest/core/auth.html '
+ 'for help on authentication with this library.'
)
| {"golden_diff": "diff --git a/core/google/cloud/client.py b/core/google/cloud/client.py\n--- a/core/google/cloud/client.py\n+++ b/core/google/cloud/client.py\n@@ -29,8 +29,8 @@\n \n _GOOGLE_AUTH_CREDENTIALS_HELP = (\n 'This library only supports credentials from google-auth-library-python. '\n- 'See https://google-cloud-python.readthedocs.io/en/latest/'\n- 'google-cloud-auth.html for help on authentication with this library.'\n+ 'See https://google-cloud-python.readthedocs.io/en/latest/core/auth.html '\n+ 'for help on authentication with this library.'\n )\n", "issue": "Broken link\nHello,\r\n\r\nI found that the link at [https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/core/google/cloud/client.py#L33][1] is broken.\r\n\r\nhttps://google-cloud-python.readthedocs.io/en/latest/google-cloud-auth.html\r\n\r\n[1]: https://github.com/GoogleCloudPlatform/google-cloud-python/blob/661816540f1387bcc6e08b0fd722f4abae585b37/core/google/cloud/client.py#L33\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base classes for client used to interact with Google Cloud APIs.\"\"\"\n\nimport io\nimport json\nfrom pickle import PicklingError\n\nimport google.auth.credentials\nfrom google.oauth2 import service_account\nimport google_auth_httplib2\nimport six\n\nfrom google.cloud._helpers import _determine_default_project\nfrom google.cloud.credentials import get_credentials\n\n\n_GOOGLE_AUTH_CREDENTIALS_HELP = (\n 'This library only supports credentials from google-auth-library-python. '\n 'See https://google-cloud-python.readthedocs.io/en/latest/'\n 'google-cloud-auth.html for help on authentication with this library.'\n)\n\n\nclass _ClientFactoryMixin(object):\n \"\"\"Mixin to allow factories that create credentials.\n\n .. note::\n\n This class is virtual.\n \"\"\"\n\n _SET_PROJECT = False\n\n @classmethod\n def from_service_account_json(cls, json_credentials_path, *args, **kwargs):\n \"\"\"Factory to retrieve JSON credentials while creating client.\n\n :type json_credentials_path: str\n :param json_credentials_path: The path to a private key file (this file\n was given to you when you created the\n service account). This file must contain\n a JSON object with a private key and\n other credentials information (downloaded\n from the Google APIs console).\n\n :type args: tuple\n :param args: Remaining positional arguments to pass to constructor.\n\n :type kwargs: dict\n :param kwargs: Remaining keyword arguments to pass to constructor.\n\n :rtype: :class:`_ClientFactoryMixin`\n :returns: The client created with the retrieved JSON credentials.\n :raises: :class:`TypeError` if there is a conflict with the kwargs\n and the credentials created by the factory.\n \"\"\"\n if 'credentials' in kwargs:\n raise TypeError('credentials must not be in keyword arguments')\n with io.open(json_credentials_path, 'r', encoding='utf-8') as json_fi:\n credentials_info = json.load(json_fi)\n credentials = service_account.Credentials.from_service_account_info(\n credentials_info)\n if cls._SET_PROJECT:\n if 'project' not in kwargs:\n kwargs['project'] = credentials_info.get('project_id')\n\n kwargs['credentials'] = credentials\n return cls(*args, **kwargs)\n\n\nclass Client(_ClientFactoryMixin):\n \"\"\"Client to bundle configuration needed for API requests.\n\n Stores ``credentials`` and an HTTP object so that subclasses\n can pass them along to a connection class.\n\n If no value is passed in for ``_http``, a :class:`httplib2.Http` object\n will be created and authorized with the ``credentials``. If not, the\n ``credentials`` and ``_http`` need not be related.\n\n Callers and subclasses may seek to use the private key from\n ``credentials`` to sign data.\n\n A custom (non-``httplib2``) HTTP object must have a ``request`` method\n which accepts the following arguments:\n\n * ``uri``\n * ``method``\n * ``body``\n * ``headers``\n\n In addition, ``redirections`` and ``connection_type`` may be used.\n\n A custom ``_http`` object will also need to be able to add a bearer token\n to API requests and handle token refresh on 401 errors.\n\n :type credentials: :class:`~google.auth.credentials.Credentials`\n :param credentials: (Optional) The OAuth2 Credentials to use for this\n client. If not passed (and if no ``_http`` object is\n passed), falls back to the default inferred from the\n environment.\n\n :type _http: :class:`~httplib2.Http`\n :param _http: (Optional) HTTP object to make requests. Can be any object\n that defines ``request()`` with the same interface as\n :meth:`~httplib2.Http.request`. If not passed, an\n ``_http`` object is created that is bound to the\n ``credentials`` for the current object.\n This parameter should be considered private, and could\n change in the future.\n \"\"\"\n\n SCOPE = None\n \"\"\"The scopes required for authenticating with a service.\n\n Needs to be set by subclasses.\n \"\"\"\n\n def __init__(self, credentials=None, _http=None):\n if (credentials is not None and\n not isinstance(\n credentials, google.auth.credentials.Credentials)):\n raise ValueError(_GOOGLE_AUTH_CREDENTIALS_HELP)\n if credentials is None and _http is None:\n credentials = get_credentials()\n self._credentials = google.auth.credentials.with_scopes_if_required(\n credentials, self.SCOPE)\n self._http_internal = _http\n\n def __getstate__(self):\n \"\"\"Explicitly state that clients are not pickleable.\"\"\"\n raise PicklingError('\\n'.join([\n 'Pickling client objects is explicitly not supported.',\n 'Clients have non-trivial state that is local and unpickleable.',\n ]))\n\n @property\n def _http(self):\n \"\"\"Getter for object used for HTTP transport.\n\n :rtype: :class:`~httplib2.Http`\n :returns: An HTTP object.\n \"\"\"\n if self._http_internal is None:\n self._http_internal = google_auth_httplib2.AuthorizedHttp(\n self._credentials)\n return self._http_internal\n\n\nclass _ClientProjectMixin(object):\n \"\"\"Mixin to allow setting the project on the client.\n\n :type project: str\n :param project: the project which the client acts on behalf of. If not\n passed falls back to the default inferred from the\n environment.\n\n :raises: :class:`EnvironmentError` if the project is neither passed in nor\n set in the environment. :class:`ValueError` if the project value\n is invalid.\n \"\"\"\n\n def __init__(self, project=None):\n project = self._determine_default(project)\n if project is None:\n raise EnvironmentError('Project was not passed and could not be '\n 'determined from the environment.')\n if isinstance(project, six.binary_type):\n project = project.decode('utf-8')\n if not isinstance(project, six.string_types):\n raise ValueError('Project must be a string.')\n self.project = project\n\n @staticmethod\n def _determine_default(project):\n \"\"\"Helper: use default project detection.\"\"\"\n return _determine_default_project(project)\n\n\nclass ClientWithProject(Client, _ClientProjectMixin):\n \"\"\"Client that also stores a project.\n\n :type project: str\n :param project: the project which the client acts on behalf of. If not\n passed falls back to the default inferred from the\n environment.\n\n :type credentials: :class:`~google.auth.credentials.Credentials`\n :param credentials: (Optional) The OAuth2 Credentials to use for this\n client. If not passed (and if no ``_http`` object is\n passed), falls back to the default inferred from the\n environment.\n\n :type _http: :class:`~httplib2.Http`\n :param _http: (Optional) HTTP object to make requests. Can be any object\n that defines ``request()`` with the same interface as\n :meth:`~httplib2.Http.request`. If not passed, an\n ``_http`` object is created that is bound to the\n ``credentials`` for the current object.\n This parameter should be considered private, and could\n change in the future.\n\n :raises: :class:`ValueError` if the project is neither passed in nor\n set in the environment.\n \"\"\"\n\n _SET_PROJECT = True # Used by from_service_account_json()\n\n def __init__(self, project=None, credentials=None, _http=None):\n _ClientProjectMixin.__init__(self, project=project)\n Client.__init__(self, credentials=credentials, _http=_http)\n", "path": "core/google/cloud/client.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base classes for client used to interact with Google Cloud APIs.\"\"\"\n\nimport io\nimport json\nfrom pickle import PicklingError\n\nimport google.auth.credentials\nfrom google.oauth2 import service_account\nimport google_auth_httplib2\nimport six\n\nfrom google.cloud._helpers import _determine_default_project\nfrom google.cloud.credentials import get_credentials\n\n\n_GOOGLE_AUTH_CREDENTIALS_HELP = (\n 'This library only supports credentials from google-auth-library-python. '\n 'See https://google-cloud-python.readthedocs.io/en/latest/core/auth.html '\n 'for help on authentication with this library.'\n)\n\n\nclass _ClientFactoryMixin(object):\n \"\"\"Mixin to allow factories that create credentials.\n\n .. note::\n\n This class is virtual.\n \"\"\"\n\n _SET_PROJECT = False\n\n @classmethod\n def from_service_account_json(cls, json_credentials_path, *args, **kwargs):\n \"\"\"Factory to retrieve JSON credentials while creating client.\n\n :type json_credentials_path: str\n :param json_credentials_path: The path to a private key file (this file\n was given to you when you created the\n service account). This file must contain\n a JSON object with a private key and\n other credentials information (downloaded\n from the Google APIs console).\n\n :type args: tuple\n :param args: Remaining positional arguments to pass to constructor.\n\n :type kwargs: dict\n :param kwargs: Remaining keyword arguments to pass to constructor.\n\n :rtype: :class:`_ClientFactoryMixin`\n :returns: The client created with the retrieved JSON credentials.\n :raises: :class:`TypeError` if there is a conflict with the kwargs\n and the credentials created by the factory.\n \"\"\"\n if 'credentials' in kwargs:\n raise TypeError('credentials must not be in keyword arguments')\n with io.open(json_credentials_path, 'r', encoding='utf-8') as json_fi:\n credentials_info = json.load(json_fi)\n credentials = service_account.Credentials.from_service_account_info(\n credentials_info)\n if cls._SET_PROJECT:\n if 'project' not in kwargs:\n kwargs['project'] = credentials_info.get('project_id')\n\n kwargs['credentials'] = credentials\n return cls(*args, **kwargs)\n\n\nclass Client(_ClientFactoryMixin):\n \"\"\"Client to bundle configuration needed for API requests.\n\n Stores ``credentials`` and an HTTP object so that subclasses\n can pass them along to a connection class.\n\n If no value is passed in for ``_http``, a :class:`httplib2.Http` object\n will be created and authorized with the ``credentials``. If not, the\n ``credentials`` and ``_http`` need not be related.\n\n Callers and subclasses may seek to use the private key from\n ``credentials`` to sign data.\n\n A custom (non-``httplib2``) HTTP object must have a ``request`` method\n which accepts the following arguments:\n\n * ``uri``\n * ``method``\n * ``body``\n * ``headers``\n\n In addition, ``redirections`` and ``connection_type`` may be used.\n\n A custom ``_http`` object will also need to be able to add a bearer token\n to API requests and handle token refresh on 401 errors.\n\n :type credentials: :class:`~google.auth.credentials.Credentials`\n :param credentials: (Optional) The OAuth2 Credentials to use for this\n client. If not passed (and if no ``_http`` object is\n passed), falls back to the default inferred from the\n environment.\n\n :type _http: :class:`~httplib2.Http`\n :param _http: (Optional) HTTP object to make requests. Can be any object\n that defines ``request()`` with the same interface as\n :meth:`~httplib2.Http.request`. If not passed, an\n ``_http`` object is created that is bound to the\n ``credentials`` for the current object.\n This parameter should be considered private, and could\n change in the future.\n \"\"\"\n\n SCOPE = None\n \"\"\"The scopes required for authenticating with a service.\n\n Needs to be set by subclasses.\n \"\"\"\n\n def __init__(self, credentials=None, _http=None):\n if (credentials is not None and\n not isinstance(\n credentials, google.auth.credentials.Credentials)):\n raise ValueError(_GOOGLE_AUTH_CREDENTIALS_HELP)\n if credentials is None and _http is None:\n credentials = get_credentials()\n self._credentials = google.auth.credentials.with_scopes_if_required(\n credentials, self.SCOPE)\n self._http_internal = _http\n\n def __getstate__(self):\n \"\"\"Explicitly state that clients are not pickleable.\"\"\"\n raise PicklingError('\\n'.join([\n 'Pickling client objects is explicitly not supported.',\n 'Clients have non-trivial state that is local and unpickleable.',\n ]))\n\n @property\n def _http(self):\n \"\"\"Getter for object used for HTTP transport.\n\n :rtype: :class:`~httplib2.Http`\n :returns: An HTTP object.\n \"\"\"\n if self._http_internal is None:\n self._http_internal = google_auth_httplib2.AuthorizedHttp(\n self._credentials)\n return self._http_internal\n\n\nclass _ClientProjectMixin(object):\n \"\"\"Mixin to allow setting the project on the client.\n\n :type project: str\n :param project: the project which the client acts on behalf of. If not\n passed falls back to the default inferred from the\n environment.\n\n :raises: :class:`EnvironmentError` if the project is neither passed in nor\n set in the environment. :class:`ValueError` if the project value\n is invalid.\n \"\"\"\n\n def __init__(self, project=None):\n project = self._determine_default(project)\n if project is None:\n raise EnvironmentError('Project was not passed and could not be '\n 'determined from the environment.')\n if isinstance(project, six.binary_type):\n project = project.decode('utf-8')\n if not isinstance(project, six.string_types):\n raise ValueError('Project must be a string.')\n self.project = project\n\n @staticmethod\n def _determine_default(project):\n \"\"\"Helper: use default project detection.\"\"\"\n return _determine_default_project(project)\n\n\nclass ClientWithProject(Client, _ClientProjectMixin):\n \"\"\"Client that also stores a project.\n\n :type project: str\n :param project: the project which the client acts on behalf of. If not\n passed falls back to the default inferred from the\n environment.\n\n :type credentials: :class:`~google.auth.credentials.Credentials`\n :param credentials: (Optional) The OAuth2 Credentials to use for this\n client. If not passed (and if no ``_http`` object is\n passed), falls back to the default inferred from the\n environment.\n\n :type _http: :class:`~httplib2.Http`\n :param _http: (Optional) HTTP object to make requests. Can be any object\n that defines ``request()`` with the same interface as\n :meth:`~httplib2.Http.request`. If not passed, an\n ``_http`` object is created that is bound to the\n ``credentials`` for the current object.\n This parameter should be considered private, and could\n change in the future.\n\n :raises: :class:`ValueError` if the project is neither passed in nor\n set in the environment.\n \"\"\"\n\n _SET_PROJECT = True # Used by from_service_account_json()\n\n def __init__(self, project=None, credentials=None, _http=None):\n _ClientProjectMixin.__init__(self, project=project)\n Client.__init__(self, credentials=credentials, _http=_http)\n", "path": "core/google/cloud/client.py"}]} | 2,778 | 131 |
gh_patches_debug_42452 | rasdani/github-patches | git_diff | freedomofpress__securedrop-6681 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
runtime `i18n` configuration manipulates global state
I guess we already do this for LOCALES, but I think continuing the pattern should be accompanied by a TODO that manipulating/relying on global state is not desirable.
_Originally posted by @legoktm in https://github.com/freedomofpress/securedrop/pull/6406#discussion_r863080227_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/i18n.py`
Content:
```
1 #
2 # SecureDrop whistleblower submission system
3 # Copyright (C) 2017 Loic Dachary <[email protected]>
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Affero General Public License for more details.
14 #
15 # You should have received a copy of the GNU Affero General Public License
16 # along with this program. If not, see <http://www.gnu.org/licenses/>.
17 #
18 import collections
19 from typing import Dict, List, Set
20
21 from babel.core import (
22 Locale,
23 UnknownLocaleError,
24 get_locale_identifier,
25 negotiate_locale,
26 parse_locale,
27 )
28 from flask import Flask, g, request, session
29 from flask_babel import Babel
30 from sdconfig import FALLBACK_LOCALE, SDConfig
31
32
33 class RequestLocaleInfo:
34 """
35 Convenience wrapper around a babel.core.Locale.
36 """
37
38 def __init__(self, locale: str):
39 self.locale = Locale.parse(locale)
40
41 # This attribute can be set to `True` to differentiate multiple
42 # locales currently available (supported) for the same language.
43 self.use_display_name = False
44
45 def __str__(self) -> str:
46 """
47 The Babel string representation of the locale.
48 """
49 return str(self.locale)
50
51 @property
52 def display_name(self) -> str:
53 """
54 Give callers (i.e., templates) the `Locale` object's display name when
55 such resolution is warranted, otherwise the language name---as
56 determined by `map_locale_display_names()`.
57 """
58 if self.use_display_name:
59 return self.locale.display_name
60 return self.locale.language_name
61
62 @property
63 def text_direction(self) -> str:
64 """
65 The Babel text direction: ltr or rtl.
66
67 Used primarily to set text direction in HTML via the "dir"
68 attribute.
69 """
70 return self.locale.text_direction
71
72 @property
73 def language(self) -> str:
74 """
75 The Babel language name.
76
77 Just the language, without subtag info like region or script.
78 """
79 return self.locale.language
80
81 @property
82 def id(self) -> str:
83 """
84 The Babel string representation of the locale.
85
86 This should match the name of the directory containing its
87 translations.
88 """
89 return str(self.locale)
90
91 @property
92 def language_tag(self) -> str:
93 """
94 Returns a BCP47/RFC5646 language tag for the locale.
95
96 Language tags are used in HTTP headers and the HTML lang
97 attribute.
98 """
99 return get_locale_identifier(parse_locale(str(self.locale)), sep="-")
100
101
102 def configure_babel(config: SDConfig, app: Flask) -> Babel:
103 """
104 Set up Flask-Babel according to the SecureDrop configuration.
105 """
106 # Tell Babel where to find our translations.
107 translations_directory = str(config.TRANSLATION_DIRS.absolute())
108 app.config["BABEL_TRANSLATION_DIRECTORIES"] = translations_directory
109
110 # Create the app's Babel instance. Passing the app to the
111 # constructor causes the instance to attach itself to the app.
112 babel = Babel(app)
113
114 # verify that Babel is only using the translations we told it about
115 if list(babel.translation_directories) != [translations_directory]:
116 raise ValueError(
117 "Babel translation directories ({}) do not match SecureDrop configuration ({})".format(
118 babel.translation_directories, [translations_directory]
119 )
120 )
121
122 # register the function used to determine the locale of a request
123 babel.localeselector(lambda: get_locale(config))
124 return babel
125
126
127 def parse_locale_set(codes: List[str]) -> Set[Locale]:
128 return {Locale.parse(code) for code in codes}
129
130
131 def validate_locale_configuration(config: SDConfig, babel: Babel) -> None:
132 """
133 Check that configured locales are available in the filesystem and therefore usable by
134 Babel. Warn about configured locales that are not usable, unless we're left with
135 no usable default or fallback locale, in which case raise an exception.
136 """
137 # These locales are available and loadable from the filesystem.
138 available = set(babel.list_translations())
139 available.add(Locale.parse(FALLBACK_LOCALE))
140
141 # These locales were configured via "securedrop-admin sdconfig", meaning
142 # they were present on the Admin Workstation at "securedrop-admin" runtime.
143 configured = parse_locale_set(config.SUPPORTED_LOCALES)
144
145 # The intersection of these sets is the set of locales usable by Babel.
146 usable = available & configured
147
148 missing = configured - usable
149 if missing:
150 babel.app.logger.error(
151 f"Configured locales {missing} are not in the set of usable locales {usable}"
152 )
153
154 defaults = parse_locale_set([config.DEFAULT_LOCALE, FALLBACK_LOCALE])
155 if not defaults & usable:
156 raise ValueError(
157 f"None of the default locales {defaults} are in the set of usable locales {usable}"
158 )
159
160 global USABLE_LOCALES
161 USABLE_LOCALES = usable
162
163
164 # TODO(#6420): avoid relying on and manipulating on this global state
165 LOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]
166 USABLE_LOCALES = set() # type: Set[Locale]
167
168
169 def map_locale_display_names(config: SDConfig) -> None:
170 """
171 Create a map of locale identifiers to names for display.
172
173 For most of our supported languages, we only provide one
174 translation, so including the full display name is not necessary
175 to distinguish them. For languages with more than one translation,
176 like Chinese, we do need the additional detail.
177 """
178
179 language_locale_counts = collections.defaultdict(int) # type: Dict[str, int]
180 for l in sorted(config.SUPPORTED_LOCALES):
181 locale = RequestLocaleInfo(l)
182 language_locale_counts[locale.language] += 1
183
184 locale_map = collections.OrderedDict()
185 for l in sorted(config.SUPPORTED_LOCALES):
186 if Locale.parse(l) not in USABLE_LOCALES:
187 continue
188
189 locale = RequestLocaleInfo(l)
190 if language_locale_counts[locale.language] > 1:
191 # Disambiguate translations for this language.
192 locale.use_display_name = True
193
194 locale_map[str(locale)] = locale
195
196 global LOCALES
197 LOCALES = locale_map
198
199
200 def configure(config: SDConfig, app: Flask) -> None:
201 babel = configure_babel(config, app)
202 validate_locale_configuration(config, babel)
203 map_locale_display_names(config)
204
205
206 def get_locale(config: SDConfig) -> str:
207 """
208 Return the best supported locale for a request.
209
210 Get the locale as follows, by order of precedence:
211 - l request argument or session['locale']
212 - browser suggested locale, from the Accept-Languages header
213 - config.DEFAULT_LOCALE
214 - config.FALLBACK_LOCALE
215 """
216 preferences = []
217 if session and session.get("locale"):
218 preferences.append(session.get("locale"))
219 if request.args.get("l"):
220 preferences.insert(0, request.args.get("l"))
221 if not preferences:
222 preferences.extend(get_accepted_languages())
223 preferences.append(config.DEFAULT_LOCALE)
224 preferences.append(FALLBACK_LOCALE)
225
226 negotiated = negotiate_locale(preferences, LOCALES.keys())
227
228 if not negotiated:
229 raise ValueError("No usable locale")
230
231 return negotiated
232
233
234 def get_accepted_languages() -> List[str]:
235 """
236 Convert a request's list of accepted languages into locale identifiers.
237 """
238 accept_languages = []
239 for l in request.accept_languages.values():
240 try:
241 parsed = Locale.parse(l, "-")
242 accept_languages.append(str(parsed))
243
244 # We only have two Chinese translations, simplified
245 # and traditional, based on script and not
246 # region. Browsers tend to send identifiers with
247 # region, e.g. zh-CN or zh-TW. Babel can generally
248 # infer the script from those, so we can fabricate a
249 # fallback entry without region, in the hope that it
250 # will match one of our translations and the site will
251 # at least be more legible at first contact than the
252 # probable default locale of English.
253 if parsed.language == "zh" and parsed.script:
254 accept_languages.append(str(Locale(language=parsed.language, script=parsed.script)))
255 except (ValueError, UnknownLocaleError):
256 pass
257 return accept_languages
258
259
260 def set_locale(config: SDConfig) -> None:
261 """
262 Update locale info in request and session.
263 """
264 locale = get_locale(config)
265 g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot
266 session["locale"] = locale
267 g.locales = LOCALES # pylint: disable=assigning-non-slot
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/i18n.py b/securedrop/i18n.py
--- a/securedrop/i18n.py
+++ b/securedrop/i18n.py
@@ -16,7 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import collections
-from typing import Dict, List, Set
+from typing import Dict, List, OrderedDict, Set
from babel.core import (
Locale,
@@ -25,7 +25,7 @@
negotiate_locale,
parse_locale,
)
-from flask import Flask, g, request, session
+from flask import Flask, current_app, g, request, session
from flask_babel import Babel
from sdconfig import FALLBACK_LOCALE, SDConfig
@@ -128,7 +128,7 @@
return {Locale.parse(code) for code in codes}
-def validate_locale_configuration(config: SDConfig, babel: Babel) -> None:
+def validate_locale_configuration(config: SDConfig, babel: Babel) -> Set[Locale]:
"""
Check that configured locales are available in the filesystem and therefore usable by
Babel. Warn about configured locales that are not usable, unless we're left with
@@ -157,16 +157,12 @@
f"None of the default locales {defaults} are in the set of usable locales {usable}"
)
- global USABLE_LOCALES
- USABLE_LOCALES = usable
+ return usable
-# TODO(#6420): avoid relying on and manipulating on this global state
-LOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]
-USABLE_LOCALES = set() # type: Set[Locale]
-
-
-def map_locale_display_names(config: SDConfig) -> None:
+def map_locale_display_names(
+ config: SDConfig, usable_locales: Set[Locale]
+) -> OrderedDict[str, RequestLocaleInfo]:
"""
Create a map of locale identifiers to names for display.
@@ -183,7 +179,7 @@
locale_map = collections.OrderedDict()
for l in sorted(config.SUPPORTED_LOCALES):
- if Locale.parse(l) not in USABLE_LOCALES:
+ if Locale.parse(l) not in usable_locales:
continue
locale = RequestLocaleInfo(l)
@@ -193,14 +189,13 @@
locale_map[str(locale)] = locale
- global LOCALES
- LOCALES = locale_map
+ return locale_map
def configure(config: SDConfig, app: Flask) -> None:
babel = configure_babel(config, app)
- validate_locale_configuration(config, babel)
- map_locale_display_names(config)
+ usable_locales = validate_locale_configuration(config, babel)
+ app.config["LOCALES"] = map_locale_display_names(config, usable_locales)
def get_locale(config: SDConfig) -> str:
@@ -223,7 +218,8 @@
preferences.append(config.DEFAULT_LOCALE)
preferences.append(FALLBACK_LOCALE)
- negotiated = negotiate_locale(preferences, LOCALES.keys())
+ locales = current_app.config["LOCALES"]
+ negotiated = negotiate_locale(preferences, locales.keys())
if not negotiated:
raise ValueError("No usable locale")
@@ -264,4 +260,4 @@
locale = get_locale(config)
g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot
session["locale"] = locale
- g.locales = LOCALES # pylint: disable=assigning-non-slot
+ g.locales = current_app.config["LOCALES"] # pylint: disable=assigning-non-slot
| {"golden_diff": "diff --git a/securedrop/i18n.py b/securedrop/i18n.py\n--- a/securedrop/i18n.py\n+++ b/securedrop/i18n.py\n@@ -16,7 +16,7 @@\n # along with this program. If not, see <http://www.gnu.org/licenses/>.\n #\n import collections\n-from typing import Dict, List, Set\n+from typing import Dict, List, OrderedDict, Set\n \n from babel.core import (\n Locale,\n@@ -25,7 +25,7 @@\n negotiate_locale,\n parse_locale,\n )\n-from flask import Flask, g, request, session\n+from flask import Flask, current_app, g, request, session\n from flask_babel import Babel\n from sdconfig import FALLBACK_LOCALE, SDConfig\n \n@@ -128,7 +128,7 @@\n return {Locale.parse(code) for code in codes}\n \n \n-def validate_locale_configuration(config: SDConfig, babel: Babel) -> None:\n+def validate_locale_configuration(config: SDConfig, babel: Babel) -> Set[Locale]:\n \"\"\"\n Check that configured locales are available in the filesystem and therefore usable by\n Babel. Warn about configured locales that are not usable, unless we're left with\n@@ -157,16 +157,12 @@\n f\"None of the default locales {defaults} are in the set of usable locales {usable}\"\n )\n \n- global USABLE_LOCALES\n- USABLE_LOCALES = usable\n+ return usable\n \n \n-# TODO(#6420): avoid relying on and manipulating on this global state\n-LOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]\n-USABLE_LOCALES = set() # type: Set[Locale]\n-\n-\n-def map_locale_display_names(config: SDConfig) -> None:\n+def map_locale_display_names(\n+ config: SDConfig, usable_locales: Set[Locale]\n+) -> OrderedDict[str, RequestLocaleInfo]:\n \"\"\"\n Create a map of locale identifiers to names for display.\n \n@@ -183,7 +179,7 @@\n \n locale_map = collections.OrderedDict()\n for l in sorted(config.SUPPORTED_LOCALES):\n- if Locale.parse(l) not in USABLE_LOCALES:\n+ if Locale.parse(l) not in usable_locales:\n continue\n \n locale = RequestLocaleInfo(l)\n@@ -193,14 +189,13 @@\n \n locale_map[str(locale)] = locale\n \n- global LOCALES\n- LOCALES = locale_map\n+ return locale_map\n \n \n def configure(config: SDConfig, app: Flask) -> None:\n babel = configure_babel(config, app)\n- validate_locale_configuration(config, babel)\n- map_locale_display_names(config)\n+ usable_locales = validate_locale_configuration(config, babel)\n+ app.config[\"LOCALES\"] = map_locale_display_names(config, usable_locales)\n \n \n def get_locale(config: SDConfig) -> str:\n@@ -223,7 +218,8 @@\n preferences.append(config.DEFAULT_LOCALE)\n preferences.append(FALLBACK_LOCALE)\n \n- negotiated = negotiate_locale(preferences, LOCALES.keys())\n+ locales = current_app.config[\"LOCALES\"]\n+ negotiated = negotiate_locale(preferences, locales.keys())\n \n if not negotiated:\n raise ValueError(\"No usable locale\")\n@@ -264,4 +260,4 @@\n locale = get_locale(config)\n g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot\n session[\"locale\"] = locale\n- g.locales = LOCALES # pylint: disable=assigning-non-slot\n+ g.locales = current_app.config[\"LOCALES\"] # pylint: disable=assigning-non-slot\n", "issue": "runtime `i18n` configuration manipulates global state\nI guess we already do this for LOCALES, but I think continuing the pattern should be accompanied by a TODO that manipulating/relying on global state is not desirable.\r\n\r\n_Originally posted by @legoktm in https://github.com/freedomofpress/securedrop/pull/6406#discussion_r863080227_\n", "before_files": [{"content": "#\n# SecureDrop whistleblower submission system\n# Copyright (C) 2017 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nimport collections\nfrom typing import Dict, List, Set\n\nfrom babel.core import (\n Locale,\n UnknownLocaleError,\n get_locale_identifier,\n negotiate_locale,\n parse_locale,\n)\nfrom flask import Flask, g, request, session\nfrom flask_babel import Babel\nfrom sdconfig import FALLBACK_LOCALE, SDConfig\n\n\nclass RequestLocaleInfo:\n \"\"\"\n Convenience wrapper around a babel.core.Locale.\n \"\"\"\n\n def __init__(self, locale: str):\n self.locale = Locale.parse(locale)\n\n # This attribute can be set to `True` to differentiate multiple\n # locales currently available (supported) for the same language.\n self.use_display_name = False\n\n def __str__(self) -> str:\n \"\"\"\n The Babel string representation of the locale.\n \"\"\"\n return str(self.locale)\n\n @property\n def display_name(self) -> str:\n \"\"\"\n Give callers (i.e., templates) the `Locale` object's display name when\n such resolution is warranted, otherwise the language name---as\n determined by `map_locale_display_names()`.\n \"\"\"\n if self.use_display_name:\n return self.locale.display_name\n return self.locale.language_name\n\n @property\n def text_direction(self) -> str:\n \"\"\"\n The Babel text direction: ltr or rtl.\n\n Used primarily to set text direction in HTML via the \"dir\"\n attribute.\n \"\"\"\n return self.locale.text_direction\n\n @property\n def language(self) -> str:\n \"\"\"\n The Babel language name.\n\n Just the language, without subtag info like region or script.\n \"\"\"\n return self.locale.language\n\n @property\n def id(self) -> str:\n \"\"\"\n The Babel string representation of the locale.\n\n This should match the name of the directory containing its\n translations.\n \"\"\"\n return str(self.locale)\n\n @property\n def language_tag(self) -> str:\n \"\"\"\n Returns a BCP47/RFC5646 language tag for the locale.\n\n Language tags are used in HTTP headers and the HTML lang\n attribute.\n \"\"\"\n return get_locale_identifier(parse_locale(str(self.locale)), sep=\"-\")\n\n\ndef configure_babel(config: SDConfig, app: Flask) -> Babel:\n \"\"\"\n Set up Flask-Babel according to the SecureDrop configuration.\n \"\"\"\n # Tell Babel where to find our translations.\n translations_directory = str(config.TRANSLATION_DIRS.absolute())\n app.config[\"BABEL_TRANSLATION_DIRECTORIES\"] = translations_directory\n\n # Create the app's Babel instance. Passing the app to the\n # constructor causes the instance to attach itself to the app.\n babel = Babel(app)\n\n # verify that Babel is only using the translations we told it about\n if list(babel.translation_directories) != [translations_directory]:\n raise ValueError(\n \"Babel translation directories ({}) do not match SecureDrop configuration ({})\".format(\n babel.translation_directories, [translations_directory]\n )\n )\n\n # register the function used to determine the locale of a request\n babel.localeselector(lambda: get_locale(config))\n return babel\n\n\ndef parse_locale_set(codes: List[str]) -> Set[Locale]:\n return {Locale.parse(code) for code in codes}\n\n\ndef validate_locale_configuration(config: SDConfig, babel: Babel) -> None:\n \"\"\"\n Check that configured locales are available in the filesystem and therefore usable by\n Babel. Warn about configured locales that are not usable, unless we're left with\n no usable default or fallback locale, in which case raise an exception.\n \"\"\"\n # These locales are available and loadable from the filesystem.\n available = set(babel.list_translations())\n available.add(Locale.parse(FALLBACK_LOCALE))\n\n # These locales were configured via \"securedrop-admin sdconfig\", meaning\n # they were present on the Admin Workstation at \"securedrop-admin\" runtime.\n configured = parse_locale_set(config.SUPPORTED_LOCALES)\n\n # The intersection of these sets is the set of locales usable by Babel.\n usable = available & configured\n\n missing = configured - usable\n if missing:\n babel.app.logger.error(\n f\"Configured locales {missing} are not in the set of usable locales {usable}\"\n )\n\n defaults = parse_locale_set([config.DEFAULT_LOCALE, FALLBACK_LOCALE])\n if not defaults & usable:\n raise ValueError(\n f\"None of the default locales {defaults} are in the set of usable locales {usable}\"\n )\n\n global USABLE_LOCALES\n USABLE_LOCALES = usable\n\n\n# TODO(#6420): avoid relying on and manipulating on this global state\nLOCALES = collections.OrderedDict() # type: collections.OrderedDict[str, RequestLocaleInfo]\nUSABLE_LOCALES = set() # type: Set[Locale]\n\n\ndef map_locale_display_names(config: SDConfig) -> None:\n \"\"\"\n Create a map of locale identifiers to names for display.\n\n For most of our supported languages, we only provide one\n translation, so including the full display name is not necessary\n to distinguish them. For languages with more than one translation,\n like Chinese, we do need the additional detail.\n \"\"\"\n\n language_locale_counts = collections.defaultdict(int) # type: Dict[str, int]\n for l in sorted(config.SUPPORTED_LOCALES):\n locale = RequestLocaleInfo(l)\n language_locale_counts[locale.language] += 1\n\n locale_map = collections.OrderedDict()\n for l in sorted(config.SUPPORTED_LOCALES):\n if Locale.parse(l) not in USABLE_LOCALES:\n continue\n\n locale = RequestLocaleInfo(l)\n if language_locale_counts[locale.language] > 1:\n # Disambiguate translations for this language.\n locale.use_display_name = True\n\n locale_map[str(locale)] = locale\n\n global LOCALES\n LOCALES = locale_map\n\n\ndef configure(config: SDConfig, app: Flask) -> None:\n babel = configure_babel(config, app)\n validate_locale_configuration(config, babel)\n map_locale_display_names(config)\n\n\ndef get_locale(config: SDConfig) -> str:\n \"\"\"\n Return the best supported locale for a request.\n\n Get the locale as follows, by order of precedence:\n - l request argument or session['locale']\n - browser suggested locale, from the Accept-Languages header\n - config.DEFAULT_LOCALE\n - config.FALLBACK_LOCALE\n \"\"\"\n preferences = []\n if session and session.get(\"locale\"):\n preferences.append(session.get(\"locale\"))\n if request.args.get(\"l\"):\n preferences.insert(0, request.args.get(\"l\"))\n if not preferences:\n preferences.extend(get_accepted_languages())\n preferences.append(config.DEFAULT_LOCALE)\n preferences.append(FALLBACK_LOCALE)\n\n negotiated = negotiate_locale(preferences, LOCALES.keys())\n\n if not negotiated:\n raise ValueError(\"No usable locale\")\n\n return negotiated\n\n\ndef get_accepted_languages() -> List[str]:\n \"\"\"\n Convert a request's list of accepted languages into locale identifiers.\n \"\"\"\n accept_languages = []\n for l in request.accept_languages.values():\n try:\n parsed = Locale.parse(l, \"-\")\n accept_languages.append(str(parsed))\n\n # We only have two Chinese translations, simplified\n # and traditional, based on script and not\n # region. Browsers tend to send identifiers with\n # region, e.g. zh-CN or zh-TW. Babel can generally\n # infer the script from those, so we can fabricate a\n # fallback entry without region, in the hope that it\n # will match one of our translations and the site will\n # at least be more legible at first contact than the\n # probable default locale of English.\n if parsed.language == \"zh\" and parsed.script:\n accept_languages.append(str(Locale(language=parsed.language, script=parsed.script)))\n except (ValueError, UnknownLocaleError):\n pass\n return accept_languages\n\n\ndef set_locale(config: SDConfig) -> None:\n \"\"\"\n Update locale info in request and session.\n \"\"\"\n locale = get_locale(config)\n g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot\n session[\"locale\"] = locale\n g.locales = LOCALES # pylint: disable=assigning-non-slot\n", "path": "securedrop/i18n.py"}], "after_files": [{"content": "#\n# SecureDrop whistleblower submission system\n# Copyright (C) 2017 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nimport collections\nfrom typing import Dict, List, OrderedDict, Set\n\nfrom babel.core import (\n Locale,\n UnknownLocaleError,\n get_locale_identifier,\n negotiate_locale,\n parse_locale,\n)\nfrom flask import Flask, current_app, g, request, session\nfrom flask_babel import Babel\nfrom sdconfig import FALLBACK_LOCALE, SDConfig\n\n\nclass RequestLocaleInfo:\n \"\"\"\n Convenience wrapper around a babel.core.Locale.\n \"\"\"\n\n def __init__(self, locale: str):\n self.locale = Locale.parse(locale)\n\n # This attribute can be set to `True` to differentiate multiple\n # locales currently available (supported) for the same language.\n self.use_display_name = False\n\n def __str__(self) -> str:\n \"\"\"\n The Babel string representation of the locale.\n \"\"\"\n return str(self.locale)\n\n @property\n def display_name(self) -> str:\n \"\"\"\n Give callers (i.e., templates) the `Locale` object's display name when\n such resolution is warranted, otherwise the language name---as\n determined by `map_locale_display_names()`.\n \"\"\"\n if self.use_display_name:\n return self.locale.display_name\n return self.locale.language_name\n\n @property\n def text_direction(self) -> str:\n \"\"\"\n The Babel text direction: ltr or rtl.\n\n Used primarily to set text direction in HTML via the \"dir\"\n attribute.\n \"\"\"\n return self.locale.text_direction\n\n @property\n def language(self) -> str:\n \"\"\"\n The Babel language name.\n\n Just the language, without subtag info like region or script.\n \"\"\"\n return self.locale.language\n\n @property\n def id(self) -> str:\n \"\"\"\n The Babel string representation of the locale.\n\n This should match the name of the directory containing its\n translations.\n \"\"\"\n return str(self.locale)\n\n @property\n def language_tag(self) -> str:\n \"\"\"\n Returns a BCP47/RFC5646 language tag for the locale.\n\n Language tags are used in HTTP headers and the HTML lang\n attribute.\n \"\"\"\n return get_locale_identifier(parse_locale(str(self.locale)), sep=\"-\")\n\n\ndef configure_babel(config: SDConfig, app: Flask) -> Babel:\n \"\"\"\n Set up Flask-Babel according to the SecureDrop configuration.\n \"\"\"\n # Tell Babel where to find our translations.\n translations_directory = str(config.TRANSLATION_DIRS.absolute())\n app.config[\"BABEL_TRANSLATION_DIRECTORIES\"] = translations_directory\n\n # Create the app's Babel instance. Passing the app to the\n # constructor causes the instance to attach itself to the app.\n babel = Babel(app)\n\n # verify that Babel is only using the translations we told it about\n if list(babel.translation_directories) != [translations_directory]:\n raise ValueError(\n \"Babel translation directories ({}) do not match SecureDrop configuration ({})\".format(\n babel.translation_directories, [translations_directory]\n )\n )\n\n # register the function used to determine the locale of a request\n babel.localeselector(lambda: get_locale(config))\n return babel\n\n\ndef parse_locale_set(codes: List[str]) -> Set[Locale]:\n return {Locale.parse(code) for code in codes}\n\n\ndef validate_locale_configuration(config: SDConfig, babel: Babel) -> Set[Locale]:\n \"\"\"\n Check that configured locales are available in the filesystem and therefore usable by\n Babel. Warn about configured locales that are not usable, unless we're left with\n no usable default or fallback locale, in which case raise an exception.\n \"\"\"\n # These locales are available and loadable from the filesystem.\n available = set(babel.list_translations())\n available.add(Locale.parse(FALLBACK_LOCALE))\n\n # These locales were configured via \"securedrop-admin sdconfig\", meaning\n # they were present on the Admin Workstation at \"securedrop-admin\" runtime.\n configured = parse_locale_set(config.SUPPORTED_LOCALES)\n\n # The intersection of these sets is the set of locales usable by Babel.\n usable = available & configured\n\n missing = configured - usable\n if missing:\n babel.app.logger.error(\n f\"Configured locales {missing} are not in the set of usable locales {usable}\"\n )\n\n defaults = parse_locale_set([config.DEFAULT_LOCALE, FALLBACK_LOCALE])\n if not defaults & usable:\n raise ValueError(\n f\"None of the default locales {defaults} are in the set of usable locales {usable}\"\n )\n\n return usable\n\n\ndef map_locale_display_names(\n config: SDConfig, usable_locales: Set[Locale]\n) -> OrderedDict[str, RequestLocaleInfo]:\n \"\"\"\n Create a map of locale identifiers to names for display.\n\n For most of our supported languages, we only provide one\n translation, so including the full display name is not necessary\n to distinguish them. For languages with more than one translation,\n like Chinese, we do need the additional detail.\n \"\"\"\n\n language_locale_counts = collections.defaultdict(int) # type: Dict[str, int]\n for l in sorted(config.SUPPORTED_LOCALES):\n locale = RequestLocaleInfo(l)\n language_locale_counts[locale.language] += 1\n\n locale_map = collections.OrderedDict()\n for l in sorted(config.SUPPORTED_LOCALES):\n if Locale.parse(l) not in usable_locales:\n continue\n\n locale = RequestLocaleInfo(l)\n if language_locale_counts[locale.language] > 1:\n # Disambiguate translations for this language.\n locale.use_display_name = True\n\n locale_map[str(locale)] = locale\n\n return locale_map\n\n\ndef configure(config: SDConfig, app: Flask) -> None:\n babel = configure_babel(config, app)\n usable_locales = validate_locale_configuration(config, babel)\n app.config[\"LOCALES\"] = map_locale_display_names(config, usable_locales)\n\n\ndef get_locale(config: SDConfig) -> str:\n \"\"\"\n Return the best supported locale for a request.\n\n Get the locale as follows, by order of precedence:\n - l request argument or session['locale']\n - browser suggested locale, from the Accept-Languages header\n - config.DEFAULT_LOCALE\n - config.FALLBACK_LOCALE\n \"\"\"\n preferences = []\n if session and session.get(\"locale\"):\n preferences.append(session.get(\"locale\"))\n if request.args.get(\"l\"):\n preferences.insert(0, request.args.get(\"l\"))\n if not preferences:\n preferences.extend(get_accepted_languages())\n preferences.append(config.DEFAULT_LOCALE)\n preferences.append(FALLBACK_LOCALE)\n\n locales = current_app.config[\"LOCALES\"]\n negotiated = negotiate_locale(preferences, locales.keys())\n\n if not negotiated:\n raise ValueError(\"No usable locale\")\n\n return negotiated\n\n\ndef get_accepted_languages() -> List[str]:\n \"\"\"\n Convert a request's list of accepted languages into locale identifiers.\n \"\"\"\n accept_languages = []\n for l in request.accept_languages.values():\n try:\n parsed = Locale.parse(l, \"-\")\n accept_languages.append(str(parsed))\n\n # We only have two Chinese translations, simplified\n # and traditional, based on script and not\n # region. Browsers tend to send identifiers with\n # region, e.g. zh-CN or zh-TW. Babel can generally\n # infer the script from those, so we can fabricate a\n # fallback entry without region, in the hope that it\n # will match one of our translations and the site will\n # at least be more legible at first contact than the\n # probable default locale of English.\n if parsed.language == \"zh\" and parsed.script:\n accept_languages.append(str(Locale(language=parsed.language, script=parsed.script)))\n except (ValueError, UnknownLocaleError):\n pass\n return accept_languages\n\n\ndef set_locale(config: SDConfig) -> None:\n \"\"\"\n Update locale info in request and session.\n \"\"\"\n locale = get_locale(config)\n g.localeinfo = RequestLocaleInfo(locale) # pylint: disable=assigning-non-slot\n session[\"locale\"] = locale\n g.locales = current_app.config[\"LOCALES\"] # pylint: disable=assigning-non-slot\n", "path": "securedrop/i18n.py"}]} | 3,051 | 850 |
gh_patches_debug_30297 | rasdani/github-patches | git_diff | nltk__nltk-926 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BLEU score brevity penalty does not follow definition
Papineni et al. (2002), if I understand correctly, define the BLEU brevity penalty as follows:
- let _c_ be the length of the candidate
- let _r_ be the length of the reference which is closest in length to the candidate
Then, BP = 1 if c > r, and BP = exp(1 - r / c) otherwise.
(There are some details about doing this at the corpus level, but they're not relevant to the point at hand.)
But this not what `nltk.align.bleu_score._brevity_penalty` (called by `nltk.align.bleu_score`) computes. Rather, it computes _r_ as follows:
```
c = len(candidate)
r = min(abs(len(r) - c) for r in references)
```
So if _c_ is 12 and _r_ under Papineni et al.'s definition is 28, then this function will set `r` to 16, not 28.
However, it is possible I have misunderstood the original paper. And it would be ideal to test this against a canonical implementation. But if I have understood correctly, the fix would be something like:
```
c = len(candidate)
ref_lens = [len(ref) for ref in references]
r = min(ref_lens, key=lambda ref_len: abs(ref_len - c))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nltk/align/bleu_score.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Natural Language Toolkit: BLEU Score
3 #
4 # Copyright (C) 2001-2015 NLTK Project
5 # Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
6 # Contributors: Dmitrijs Milajevs
7 # URL: <http://nltk.org/>
8 # For license information, see LICENSE.TXT
9 """BLEU score implementation."""
10
11 from __future__ import division
12
13 import math
14
15 from nltk.tokenize import word_tokenize
16 from nltk.compat import Counter
17 from nltk.util import ngrams
18
19
20 def bleu(candidate, references, weights):
21 """Calculate BLEU score (Bilingual Evaluation Understudy)
22
23 :param candidate: a candidate sentence
24 :type candidate: list(str)
25 :param references: reference sentences
26 :type references: list(list(str))
27 :param weights: weights for unigrams, bigrams, trigrams and so on
28 :type weights: list(float)
29
30 >>> weights = [0.25, 0.25, 0.25, 0.25]
31 >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
32 ... 'ensures', 'that', 'the', 'military', 'always',
33 ... 'obeys', 'the', 'commands', 'of', 'the', 'party']
34
35 >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
36 ... 'forever', 'hearing', 'the', 'activity', 'guidebook',
37 ... 'that', 'party', 'direct']
38
39 >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
40 ... 'ensures', 'that', 'the', 'military', 'will', 'forever',
41 ... 'heed', 'Party', 'commands']
42
43 >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
44 ... 'guarantees', 'the', 'military', 'forces', 'always',
45 ... 'being', 'under', 'the', 'command', 'of', 'the',
46 ... 'Party']
47
48 >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
49 ... 'army', 'always', 'to', 'heed', 'the', 'directions',
50 ... 'of', 'the', 'party']
51
52 >>> bleu(candidate1, [reference1, reference2, reference3], weights)
53 0.504...
54
55 >>> bleu(candidate2, [reference1, reference2, reference3], weights)
56 0
57
58 Papineni, Kishore, et al. "BLEU: A method for automatic evaluation of
59 machine translation." Proceedings of the 40th annual meeting on association for
60 computational linguistics. Association for Computational Linguistics, 2002.
61 http://www.aclweb.org/anthology/P02-1040.pdf
62
63 """
64 p_ns = (
65 _modified_precision(candidate, references, i)
66 for i, _ in enumerate(weights, start=1)
67 )
68
69 try:
70 s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns))
71 except ValueError:
72 # some p_ns is 0
73 return 0
74
75 bp = _brevity_penalty(candidate, references)
76 return bp * math.exp(s)
77
78
79 def _modified_precision(candidate, references, n):
80 """Calculate modified ngram precision.
81
82 The normal precision method may lead to some wrong translations with
83 high-precision, e.g., the translation, in which a word of reference
84 repeats several times, has very high precision. So in the modified
85 n-gram precision, a reference word will be considered exhausted after
86 a matching candidate word is identified.
87
88 Paper examples:
89
90 >>> _modified_precision(
91 ... 'the the the the the the the'.split(),
92 ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],
93 ... n=1,
94 ... )
95 0.28...
96
97 >>> _modified_precision(
98 ... 'the the the the the the the'.split(),
99 ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],
100 ... n=2,
101 ... )
102 0.0
103
104 >>> _modified_precision(
105 ... 'of the'.split(),
106 ... [
107 ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),
108 ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),
109 ... 'It is the practical guide for the army always to heed the directions of the party'.split(),
110 ... ],
111 ... n=1,
112 ... )
113 1.0
114
115 >>> _modified_precision(
116 ... 'of the'.split(),
117 ... [
118 ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),
119 ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),
120 ... 'It is the practical guide for the army always to heed the directions of the party'.split(),
121 ... ],
122 ... n=2,
123 ... )
124 1.0
125
126 More examples:
127
128 >>> weights = [0.25, 0.25, 0.25, 0.25]
129 >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
130 ... 'ensures', 'that', 'the', 'military', 'always',
131 ... 'obeys', 'the', 'commands', 'of', 'the', 'party']
132
133 >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
134 ... 'forever', 'hearing', 'the', 'activity', 'guidebook',
135 ... 'that', 'party', 'direct']
136
137 >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
138 ... 'ensures', 'that', 'the', 'military', 'will', 'forever',
139 ... 'heed', 'Party', 'commands']
140
141 >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
142 ... 'guarantees', 'the', 'military', 'forces', 'always',
143 ... 'being', 'under', 'the', 'command', 'of', 'the',
144 ... 'Party']
145
146 >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
147 ... 'army', 'always', 'to', 'heed', 'the', 'directions',
148 ... 'of', 'the', 'party']
149
150 Unigrams:
151
152 >>> _modified_precision(
153 ... candidate1,
154 ... [reference1, reference2, reference3],
155 ... n=1,
156 ... )
157 0.94...
158
159 >>> _modified_precision(
160 ... candidate2,
161 ... [reference1, reference2, reference3],
162 ... n=1,
163 ... )
164 0.57...
165
166 Bigrams:
167
168 >>> _modified_precision(
169 ... candidate1,
170 ... [reference1, reference2, reference3],
171 ... n=2,
172 ... )
173 0.58...
174
175 >>> _modified_precision(
176 ... candidate2,
177 ... [reference1, reference2, reference3],
178 ... n=2,
179 ... )
180 0.07...
181
182 """
183 counts = Counter(ngrams(candidate, n))
184
185 if not counts:
186 return 0
187
188 max_counts = {}
189 for reference in references:
190 reference_counts = Counter(ngrams(reference, n))
191 for ngram in counts:
192 max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
193
194 clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items())
195
196 return sum(clipped_counts.values()) / sum(counts.values())
197
198
199 def _brevity_penalty(candidate, references):
200 """Calculate brevity penalty.
201
202 As the modified n-gram precision still has the problem from the short
203 length sentence, brevity penalty is used to modify the overall BLEU
204 score according to length.
205
206 """
207 c = len(candidate)
208 r = min(abs(len(r) - c) for r in references)
209
210 if c > r:
211 return 1
212 else:
213 return math.exp(1 - r / c)
214
215
216 # run doctests
217 if __name__ == "__main__":
218 import doctest
219 doctest.testmod(optionflags=doctest.ELLIPSIS)
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nltk/align/bleu_score.py b/nltk/align/bleu_score.py
--- a/nltk/align/bleu_score.py
+++ b/nltk/align/bleu_score.py
@@ -203,9 +203,57 @@
length sentence, brevity penalty is used to modify the overall BLEU
score according to length.
+ An example from the paper. There are three references with length 12, 15
+ and 17. And a terse candidate of the length 12. The brevity penalty is 1.
+
+ >>> references = [['a'] * 12, ['a'] * 15, ['a'] * 17]
+ >>> candidate = ['a'] * 12
+ >>> _brevity_penalty(candidate, references)
+ 1.0
+
+ In case a candidate translation is shorter than the references, penalty is
+ applied.
+
+ >>> references = [['a'] * 28, ['a'] * 28]
+ >>> candidate = ['a'] * 12
+ >>> _brevity_penalty(candidate, references)
+ 0.2635...
+
+ The length of the closest reference is used to compute the penalty. If the
+ length of a candidate is 12, and the reference lengths are 13 and 2, the
+ penalty is applied because the candidate length (12) is less then the
+ closest reference length (13).
+
+ >>> references = [['a'] * 13, ['a'] * 2]
+ >>> candidate = ['a'] * 12
+ >>> _brevity_penalty(candidate, references)
+ 0.92...
+
+ The brevity penalty doesn't depend on reference order. More importantly,
+ when two reference sentences are at the same distance, the shortest
+ reference sentence length is used.
+
+ >>> references = [['a'] * 13, ['a'] * 11]
+ >>> candidate = ['a'] * 12
+ >>> _brevity_penalty(candidate, references) == _brevity_penalty(candidate, reversed(references)) == 1
+ True
+
+ A test example from mteval-v13a.pl (starting from the line 705):
+
+ >>> references = [['a'] * 11, ['a'] * 8]
+ >>> candidate = ['a'] * 7
+ >>> _brevity_penalty(candidate, references)
+ 0.86...
+
+ >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
+ >>> candidate = ['a'] * 7
+ >>> _brevity_penalty(candidate, references)
+ 1.0
+
"""
c = len(candidate)
- r = min(abs(len(r) - c) for r in references)
+ ref_lens = (len(reference) for reference in references)
+ r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))
if c > r:
return 1
| {"golden_diff": "diff --git a/nltk/align/bleu_score.py b/nltk/align/bleu_score.py\n--- a/nltk/align/bleu_score.py\n+++ b/nltk/align/bleu_score.py\n@@ -203,9 +203,57 @@\n length sentence, brevity penalty is used to modify the overall BLEU\n score according to length.\n \n+ An example from the paper. There are three references with length 12, 15\n+ and 17. And a terse candidate of the length 12. The brevity penalty is 1.\n+\n+ >>> references = [['a'] * 12, ['a'] * 15, ['a'] * 17]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references)\n+ 1.0\n+\n+ In case a candidate translation is shorter than the references, penalty is\n+ applied.\n+\n+ >>> references = [['a'] * 28, ['a'] * 28]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references)\n+ 0.2635...\n+\n+ The length of the closest reference is used to compute the penalty. If the\n+ length of a candidate is 12, and the reference lengths are 13 and 2, the\n+ penalty is applied because the candidate length (12) is less then the\n+ closest reference length (13).\n+\n+ >>> references = [['a'] * 13, ['a'] * 2]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references)\n+ 0.92...\n+\n+ The brevity penalty doesn't depend on reference order. More importantly,\n+ when two reference sentences are at the same distance, the shortest\n+ reference sentence length is used.\n+\n+ >>> references = [['a'] * 13, ['a'] * 11]\n+ >>> candidate = ['a'] * 12\n+ >>> _brevity_penalty(candidate, references) == _brevity_penalty(candidate, reversed(references)) == 1\n+ True\n+\n+ A test example from mteval-v13a.pl (starting from the line 705):\n+\n+ >>> references = [['a'] * 11, ['a'] * 8]\n+ >>> candidate = ['a'] * 7\n+ >>> _brevity_penalty(candidate, references)\n+ 0.86...\n+\n+ >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]\n+ >>> candidate = ['a'] * 7\n+ >>> _brevity_penalty(candidate, references)\n+ 1.0\n+\n \"\"\"\n c = len(candidate)\n- r = min(abs(len(r) - c) for r in references)\n+ ref_lens = (len(reference) for reference in references)\n+ r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))\n \n if c > r:\n return 1\n", "issue": "BLEU score brevity penalty does not follow definition\nPapineni et al. (2002), if I understand correctly, define the BLEU brevity penalty as follows:\n- let _c_ be the length of the candidate\n- let _r_ be the length of the reference which is closest in length to the candidate\n\nThen, BP = 1 if c > r, and BP = exp(1 - r / c) otherwise.\n\n(There are some details about doing this at the corpus level, but they're not relevant to the point at hand.) \n\nBut this not what `nltk.align.bleu_score._brevity_penalty` (called by `nltk.align.bleu_score`) computes. Rather, it computes _r_ as follows:\n\n```\nc = len(candidate)\nr = min(abs(len(r) - c) for r in references)\n```\n\nSo if _c_ is 12 and _r_ under Papineni et al.'s definition is 28, then this function will set `r` to 16, not 28.\n\nHowever, it is possible I have misunderstood the original paper. And it would be ideal to test this against a canonical implementation. But if I have understood correctly, the fix would be something like:\n\n```\nc = len(candidate)\nref_lens = [len(ref) for ref in references]\nr = min(ref_lens, key=lambda ref_len: abs(ref_len - c))\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Natural Language Toolkit: BLEU Score\n#\n# Copyright (C) 2001-2015 NLTK Project\n# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim\n# Contributors: Dmitrijs Milajevs\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\"\"\"BLEU score implementation.\"\"\"\n\nfrom __future__ import division\n\nimport math\n\nfrom nltk.tokenize import word_tokenize\nfrom nltk.compat import Counter\nfrom nltk.util import ngrams\n\n\ndef bleu(candidate, references, weights):\n \"\"\"Calculate BLEU score (Bilingual Evaluation Understudy)\n\n :param candidate: a candidate sentence\n :type candidate: list(str)\n :param references: reference sentences\n :type references: list(list(str))\n :param weights: weights for unigrams, bigrams, trigrams and so on\n :type weights: list(float)\n\n >>> weights = [0.25, 0.25, 0.25, 0.25]\n >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n\n >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n ... 'that', 'party', 'direct']\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n\n >>> bleu(candidate1, [reference1, reference2, reference3], weights)\n 0.504...\n\n >>> bleu(candidate2, [reference1, reference2, reference3], weights)\n 0\n\n Papineni, Kishore, et al. \"BLEU: A method for automatic evaluation of\n machine translation.\" Proceedings of the 40th annual meeting on association for\n computational linguistics. Association for Computational Linguistics, 2002.\n http://www.aclweb.org/anthology/P02-1040.pdf\n\n \"\"\"\n p_ns = (\n _modified_precision(candidate, references, i)\n for i, _ in enumerate(weights, start=1)\n )\n\n try:\n s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns))\n except ValueError:\n # some p_ns is 0\n return 0\n\n bp = _brevity_penalty(candidate, references)\n return bp * math.exp(s)\n\n\ndef _modified_precision(candidate, references, n):\n \"\"\"Calculate modified ngram precision.\n\n The normal precision method may lead to some wrong translations with\n high-precision, e.g., the translation, in which a word of reference\n repeats several times, has very high precision. So in the modified\n n-gram precision, a reference word will be considered exhausted after\n a matching candidate word is identified.\n\n Paper examples:\n\n >>> _modified_precision(\n ... 'the the the the the the the'.split(),\n ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\n ... n=1,\n ... )\n 0.28...\n\n >>> _modified_precision(\n ... 'the the the the the the the'.split(),\n ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\n ... n=2,\n ... )\n 0.0\n\n >>> _modified_precision(\n ... 'of the'.split(),\n ... [\n ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\n ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\n ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\n ... ],\n ... n=1,\n ... )\n 1.0\n\n >>> _modified_precision(\n ... 'of the'.split(),\n ... [\n ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\n ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\n ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\n ... ],\n ... n=2,\n ... )\n 1.0\n\n More examples:\n\n >>> weights = [0.25, 0.25, 0.25, 0.25]\n >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n\n >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n ... 'that', 'party', 'direct']\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n\n Unigrams:\n\n >>> _modified_precision(\n ... candidate1,\n ... [reference1, reference2, reference3],\n ... n=1,\n ... )\n 0.94...\n\n >>> _modified_precision(\n ... candidate2,\n ... [reference1, reference2, reference3],\n ... n=1,\n ... )\n 0.57...\n\n Bigrams:\n\n >>> _modified_precision(\n ... candidate1,\n ... [reference1, reference2, reference3],\n ... n=2,\n ... )\n 0.58...\n\n >>> _modified_precision(\n ... candidate2,\n ... [reference1, reference2, reference3],\n ... n=2,\n ... )\n 0.07...\n\n \"\"\"\n counts = Counter(ngrams(candidate, n))\n\n if not counts:\n return 0\n\n max_counts = {}\n for reference in references:\n reference_counts = Counter(ngrams(reference, n))\n for ngram in counts:\n max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])\n\n clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items())\n\n return sum(clipped_counts.values()) / sum(counts.values())\n\n\ndef _brevity_penalty(candidate, references):\n \"\"\"Calculate brevity penalty.\n\n As the modified n-gram precision still has the problem from the short\n length sentence, brevity penalty is used to modify the overall BLEU\n score according to length.\n\n \"\"\"\n c = len(candidate)\n r = min(abs(len(r) - c) for r in references)\n\n if c > r:\n return 1\n else:\n return math.exp(1 - r / c)\n\n\n# run doctests\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod(optionflags=doctest.ELLIPSIS)\n", "path": "nltk/align/bleu_score.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Natural Language Toolkit: BLEU Score\n#\n# Copyright (C) 2001-2015 NLTK Project\n# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim\n# Contributors: Dmitrijs Milajevs\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\"\"\"BLEU score implementation.\"\"\"\n\nfrom __future__ import division\n\nimport math\n\nfrom nltk.tokenize import word_tokenize\nfrom nltk.compat import Counter\nfrom nltk.util import ngrams\n\n\ndef bleu(candidate, references, weights):\n \"\"\"Calculate BLEU score (Bilingual Evaluation Understudy)\n\n :param candidate: a candidate sentence\n :type candidate: list(str)\n :param references: reference sentences\n :type references: list(list(str))\n :param weights: weights for unigrams, bigrams, trigrams and so on\n :type weights: list(float)\n\n >>> weights = [0.25, 0.25, 0.25, 0.25]\n >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n\n >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n ... 'that', 'party', 'direct']\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n\n >>> bleu(candidate1, [reference1, reference2, reference3], weights)\n 0.504...\n\n >>> bleu(candidate2, [reference1, reference2, reference3], weights)\n 0\n\n Papineni, Kishore, et al. \"BLEU: A method for automatic evaluation of\n machine translation.\" Proceedings of the 40th annual meeting on association for\n computational linguistics. Association for Computational Linguistics, 2002.\n http://www.aclweb.org/anthology/P02-1040.pdf\n\n \"\"\"\n p_ns = (\n _modified_precision(candidate, references, i)\n for i, _ in enumerate(weights, start=1)\n )\n\n try:\n s = math.fsum(w * math.log(p_n) for w, p_n in zip(weights, p_ns))\n except ValueError:\n # some p_ns is 0\n return 0\n\n bp = _brevity_penalty(candidate, references)\n return bp * math.exp(s)\n\n\ndef _modified_precision(candidate, references, n):\n \"\"\"Calculate modified ngram precision.\n\n The normal precision method may lead to some wrong translations with\n high-precision, e.g., the translation, in which a word of reference\n repeats several times, has very high precision. So in the modified\n n-gram precision, a reference word will be considered exhausted after\n a matching candidate word is identified.\n\n Paper examples:\n\n >>> _modified_precision(\n ... 'the the the the the the the'.split(),\n ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\n ... n=1,\n ... )\n 0.28...\n\n >>> _modified_precision(\n ... 'the the the the the the the'.split(),\n ... ['the cat is on the mat'.split(), 'there is a cat on the mat'.split()],\n ... n=2,\n ... )\n 0.0\n\n >>> _modified_precision(\n ... 'of the'.split(),\n ... [\n ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\n ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\n ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\n ... ],\n ... n=1,\n ... )\n 1.0\n\n >>> _modified_precision(\n ... 'of the'.split(),\n ... [\n ... 'It is a guide to action that ensures that the military will forever heed Party commands.'.split(),\n ... 'It is the guiding principle which guarantees the military forces always being under the command of the Party.'.split(),\n ... 'It is the practical guide for the army always to heed the directions of the party'.split(),\n ... ],\n ... n=2,\n ... )\n 1.0\n\n More examples:\n\n >>> weights = [0.25, 0.25, 0.25, 0.25]\n >>> candidate1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n\n >>> candidate2 = ['It', 'is', 'to', 'insure', 'the', 'troops',\n ... 'forever', 'hearing', 'the', 'activity', 'guidebook',\n ... 'that', 'party', 'direct']\n\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n\n >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the',\n ... 'Party']\n\n >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n\n Unigrams:\n\n >>> _modified_precision(\n ... candidate1,\n ... [reference1, reference2, reference3],\n ... n=1,\n ... )\n 0.94...\n\n >>> _modified_precision(\n ... candidate2,\n ... [reference1, reference2, reference3],\n ... n=1,\n ... )\n 0.57...\n\n Bigrams:\n\n >>> _modified_precision(\n ... candidate1,\n ... [reference1, reference2, reference3],\n ... n=2,\n ... )\n 0.58...\n\n >>> _modified_precision(\n ... candidate2,\n ... [reference1, reference2, reference3],\n ... n=2,\n ... )\n 0.07...\n\n \"\"\"\n counts = Counter(ngrams(candidate, n))\n\n if not counts:\n return 0\n\n max_counts = {}\n for reference in references:\n reference_counts = Counter(ngrams(reference, n))\n for ngram in counts:\n max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])\n\n clipped_counts = dict((ngram, min(count, max_counts[ngram])) for ngram, count in counts.items())\n\n return sum(clipped_counts.values()) / sum(counts.values())\n\n\ndef _brevity_penalty(candidate, references):\n \"\"\"Calculate brevity penalty.\n\n As the modified n-gram precision still has the problem from the short\n length sentence, brevity penalty is used to modify the overall BLEU\n score according to length.\n\n An example from the paper. There are three references with length 12, 15\n and 17. And a terse candidate of the length 12. The brevity penalty is 1.\n\n >>> references = [['a'] * 12, ['a'] * 15, ['a'] * 17]\n >>> candidate = ['a'] * 12\n >>> _brevity_penalty(candidate, references)\n 1.0\n\n In case a candidate translation is shorter than the references, penalty is\n applied.\n\n >>> references = [['a'] * 28, ['a'] * 28]\n >>> candidate = ['a'] * 12\n >>> _brevity_penalty(candidate, references)\n 0.2635...\n\n The length of the closest reference is used to compute the penalty. If the\n length of a candidate is 12, and the reference lengths are 13 and 2, the\n penalty is applied because the candidate length (12) is less then the\n closest reference length (13).\n\n >>> references = [['a'] * 13, ['a'] * 2]\n >>> candidate = ['a'] * 12\n >>> _brevity_penalty(candidate, references)\n 0.92...\n\n The brevity penalty doesn't depend on reference order. More importantly,\n when two reference sentences are at the same distance, the shortest\n reference sentence length is used.\n\n >>> references = [['a'] * 13, ['a'] * 11]\n >>> candidate = ['a'] * 12\n >>> _brevity_penalty(candidate, references) == _brevity_penalty(candidate, reversed(references)) == 1\n True\n\n A test example from mteval-v13a.pl (starting from the line 705):\n\n >>> references = [['a'] * 11, ['a'] * 8]\n >>> candidate = ['a'] * 7\n >>> _brevity_penalty(candidate, references)\n 0.86...\n\n >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]\n >>> candidate = ['a'] * 7\n >>> _brevity_penalty(candidate, references)\n 1.0\n\n \"\"\"\n c = len(candidate)\n ref_lens = (len(reference) for reference in references)\n r = min(ref_lens, key=lambda ref_len: (abs(ref_len - c), ref_len))\n\n if c > r:\n return 1\n else:\n return math.exp(1 - r / c)\n\n\n# run doctests\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod(optionflags=doctest.ELLIPSIS)\n", "path": "nltk/align/bleu_score.py"}]} | 3,180 | 728 |
gh_patches_debug_64325 | rasdani/github-patches | git_diff | pex-tool__pex-1725 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.80
On the docket:
+ [x] Support booting via `/bin/sh` with `--sh-boot`. (#1721)
+ [x] Fix more pathologic lock creation slowness. (#1723)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.79"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.79"
+__version__ = "2.1.80"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.79\"\n+__version__ = \"2.1.80\"\n", "issue": "Release 2.1.80\nOn the docket:\r\n+ [x] Support booting via `/bin/sh` with `--sh-boot`. (#1721)\r\n+ [x] Fix more pathologic lock creation slowness. (#1723)\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.79\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.80\"\n", "path": "pex/version.py"}]} | 365 | 96 |
gh_patches_debug_43869 | rasdani/github-patches | git_diff | aws__aws-cli-3331 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aws configure get and aws configure set with multiword profile names are inconsistent
It seems that `aws configure set --profile "two words"` will add single quotes around the profile name, but `aws configure get --profile "two words"` will search for a profile name that does not have single quotes around the profile name.
These two methods should behave in a similar manner.
To reproduce:
```
$ aws --version
aws-cli/1.15.10 Python/3.6.5 Darwin/17.4.0 botocore/1.10.10
$ aws configure set aws_access_key_id test --profile "test profile"
$ aws configure get aws_access_key_id --profile "test profile"
The config profile (test profile) could not be found
$ aws configure get aws_access_key_id --profile "'test profile'"
test
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/configure/set.py`
Content:
```
1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import os
14
15 from awscli.customizations.commands import BasicCommand
16 from awscli.customizations.configure.writer import ConfigFileWriter
17
18 from . import PREDEFINED_SECTION_NAMES, profile_to_section
19
20
21 class ConfigureSetCommand(BasicCommand):
22 NAME = 'set'
23 DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',
24 '_description.rst')
25 SYNOPSIS = 'aws configure set varname value [--profile profile-name]'
26 EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')
27 ARG_TABLE = [
28 {'name': 'varname',
29 'help_text': 'The name of the config value to set.',
30 'action': 'store',
31 'cli_type_name': 'string', 'positional_arg': True},
32 {'name': 'value',
33 'help_text': 'The value to set.',
34 'action': 'store',
35 'no_paramfile': True, # To disable the default paramfile behavior
36 'cli_type_name': 'string', 'positional_arg': True},
37 ]
38 # Any variables specified in this list will be written to
39 # the ~/.aws/credentials file instead of ~/.aws/config.
40 _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',
41 'aws_session_token']
42
43 def __init__(self, session, config_writer=None):
44 super(ConfigureSetCommand, self).__init__(session)
45 if config_writer is None:
46 config_writer = ConfigFileWriter()
47 self._config_writer = config_writer
48
49 def _run_main(self, args, parsed_globals):
50 varname = args.varname
51 value = args.value
52 section = 'default'
53 # Before handing things off to the config writer,
54 # we need to find out three things:
55 # 1. What section we're writing to (section).
56 # 2. The name of the config key (varname)
57 # 3. The actual value (value).
58 if '.' not in varname:
59 # unqualified name, scope it to the current
60 # profile (or leave it as the 'default' section if
61 # no profile is set).
62 if self._session.profile is not None:
63 section = profile_to_section(self._session.profile)
64 else:
65 # First figure out if it's been scoped to a profile.
66 parts = varname.split('.')
67 if parts[0] in ('default', 'profile'):
68 # Then we know we're scoped to a profile.
69 if parts[0] == 'default':
70 section = 'default'
71 remaining = parts[1:]
72 else:
73 # [profile, profile_name, ...]
74 section = profile_to_section(parts[1])
75 remaining = parts[2:]
76 varname = remaining[0]
77 if len(remaining) == 2:
78 value = {remaining[1]: value}
79 elif parts[0] not in PREDEFINED_SECTION_NAMES:
80 if self._session.profile is not None:
81 section = profile_to_section(self._session.profile)
82 else:
83 profile_name = self._session.get_config_variable('profile')
84 if profile_name is not None:
85 section = profile_name
86 varname = parts[0]
87 if len(parts) == 2:
88 value = {parts[1]: value}
89 elif len(parts) == 2:
90 # Otherwise it's something like "set preview.service true"
91 # of something in the [plugin] section.
92 section, varname = parts
93 config_filename = os.path.expanduser(
94 self._session.get_config_variable('config_file'))
95 updated_config = {'__section__': section, varname: value}
96 if varname in self._WRITE_TO_CREDS_FILE:
97 config_filename = os.path.expanduser(
98 self._session.get_config_variable('credentials_file'))
99 section_name = updated_config['__section__']
100 if section_name.startswith('profile '):
101 updated_config['__section__'] = section_name[8:]
102 self._config_writer.update_config(updated_config, config_filename)
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/configure/set.py b/awscli/customizations/configure/set.py
--- a/awscli/customizations/configure/set.py
+++ b/awscli/customizations/configure/set.py
@@ -46,13 +46,17 @@
config_writer = ConfigFileWriter()
self._config_writer = config_writer
+ def _get_config_file(self, path):
+ config_path = self._session.get_config_variable(path)
+ return os.path.expanduser(config_path)
+
def _run_main(self, args, parsed_globals):
varname = args.varname
value = args.value
- section = 'default'
+ profile = 'default'
# Before handing things off to the config writer,
# we need to find out three things:
- # 1. What section we're writing to (section).
+ # 1. What section we're writing to (profile).
# 2. The name of the config key (varname)
# 3. The actual value (value).
if '.' not in varname:
@@ -60,43 +64,44 @@
# profile (or leave it as the 'default' section if
# no profile is set).
if self._session.profile is not None:
- section = profile_to_section(self._session.profile)
+ profile = self._session.profile
else:
# First figure out if it's been scoped to a profile.
parts = varname.split('.')
if parts[0] in ('default', 'profile'):
# Then we know we're scoped to a profile.
if parts[0] == 'default':
- section = 'default'
+ profile = 'default'
remaining = parts[1:]
else:
# [profile, profile_name, ...]
- section = profile_to_section(parts[1])
+ profile = parts[1]
remaining = parts[2:]
varname = remaining[0]
if len(remaining) == 2:
value = {remaining[1]: value}
elif parts[0] not in PREDEFINED_SECTION_NAMES:
if self._session.profile is not None:
- section = profile_to_section(self._session.profile)
+ profile = self._session.profile
else:
profile_name = self._session.get_config_variable('profile')
if profile_name is not None:
- section = profile_name
+ profile = profile_name
varname = parts[0]
if len(parts) == 2:
value = {parts[1]: value}
elif len(parts) == 2:
# Otherwise it's something like "set preview.service true"
# of something in the [plugin] section.
- section, varname = parts
- config_filename = os.path.expanduser(
- self._session.get_config_variable('config_file'))
- updated_config = {'__section__': section, varname: value}
+ profile, varname = parts
+ config_filename = self._get_config_file('config_file')
if varname in self._WRITE_TO_CREDS_FILE:
- config_filename = os.path.expanduser(
- self._session.get_config_variable('credentials_file'))
- section_name = updated_config['__section__']
- if section_name.startswith('profile '):
- updated_config['__section__'] = section_name[8:]
+ # When writing to the creds file, the section is just the profile
+ section = profile
+ config_filename = self._get_config_file('credentials_file')
+ elif profile in PREDEFINED_SECTION_NAMES or profile == 'default':
+ section = profile
+ else:
+ section = profile_to_section(profile)
+ updated_config = {'__section__': section, varname: value}
self._config_writer.update_config(updated_config, config_filename)
| {"golden_diff": "diff --git a/awscli/customizations/configure/set.py b/awscli/customizations/configure/set.py\n--- a/awscli/customizations/configure/set.py\n+++ b/awscli/customizations/configure/set.py\n@@ -46,13 +46,17 @@\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n \n+ def _get_config_file(self, path):\n+ config_path = self._session.get_config_variable(path)\n+ return os.path.expanduser(config_path)\n+\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = args.value\n- section = 'default'\n+ profile = 'default'\n # Before handing things off to the config writer,\n # we need to find out three things:\n- # 1. What section we're writing to (section).\n+ # 1. What section we're writing to (profile).\n # 2. The name of the config key (varname)\n # 3. The actual value (value).\n if '.' not in varname:\n@@ -60,43 +64,44 @@\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n- section = profile_to_section(self._session.profile)\n+ profile = self._session.profile\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n if parts[0] in ('default', 'profile'):\n # Then we know we're scoped to a profile.\n if parts[0] == 'default':\n- section = 'default'\n+ profile = 'default'\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n- section = profile_to_section(parts[1])\n+ profile = parts[1]\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n- section = profile_to_section(self._session.profile)\n+ profile = self._session.profile\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n- section = profile_name\n+ profile = profile_name\n varname = parts[0]\n if len(parts) == 2:\n value = {parts[1]: value}\n elif len(parts) == 2:\n # Otherwise it's something like \"set preview.service true\"\n # of something in the [plugin] section.\n- section, varname = parts\n- config_filename = os.path.expanduser(\n- self._session.get_config_variable('config_file'))\n- updated_config = {'__section__': section, varname: value}\n+ profile, varname = parts\n+ config_filename = self._get_config_file('config_file')\n if varname in self._WRITE_TO_CREDS_FILE:\n- config_filename = os.path.expanduser(\n- self._session.get_config_variable('credentials_file'))\n- section_name = updated_config['__section__']\n- if section_name.startswith('profile '):\n- updated_config['__section__'] = section_name[8:]\n+ # When writing to the creds file, the section is just the profile\n+ section = profile\n+ config_filename = self._get_config_file('credentials_file')\n+ elif profile in PREDEFINED_SECTION_NAMES or profile == 'default':\n+ section = profile\n+ else:\n+ section = profile_to_section(profile)\n+ updated_config = {'__section__': section, varname: value}\n self._config_writer.update_config(updated_config, config_filename)\n", "issue": "aws configure get and aws configure set with multiword profile names are inconsistent\nIt seems that `aws configure set --profile \"two words\"` will add single quotes around the profile name, but `aws configure get --profile \"two words\"` will search for a profile name that does not have single quotes around the profile name.\r\n\r\nThese two methods should behave in a similar manner.\r\n\r\nTo reproduce:\r\n\r\n```\r\n$ aws --version\r\naws-cli/1.15.10 Python/3.6.5 Darwin/17.4.0 botocore/1.10.10\r\n$ aws configure set aws_access_key_id test --profile \"test profile\"\r\n$ aws configure get aws_access_key_id --profile \"test profile\"\r\nThe config profile (test profile) could not be found\r\n$ aws configure get aws_access_key_id --profile \"'test profile'\"\r\ntest\r\n```\n", "before_files": [{"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import PREDEFINED_SECTION_NAMES, profile_to_section\n\n\nclass ConfigureSetCommand(BasicCommand):\n NAME = 'set'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',\n '_description.rst')\n SYNOPSIS = 'aws configure set varname value [--profile profile-name]'\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n 'help_text': 'The name of the config value to set.',\n 'action': 'store',\n 'cli_type_name': 'string', 'positional_arg': True},\n {'name': 'value',\n 'help_text': 'The value to set.',\n 'action': 'store',\n 'no_paramfile': True, # To disable the default paramfile behavior\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n # Any variables specified in this list will be written to\n # the ~/.aws/credentials file instead of ~/.aws/config.\n _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',\n 'aws_session_token']\n\n def __init__(self, session, config_writer=None):\n super(ConfigureSetCommand, self).__init__(session)\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = args.value\n section = 'default'\n # Before handing things off to the config writer,\n # we need to find out three things:\n # 1. What section we're writing to (section).\n # 2. The name of the config key (varname)\n # 3. The actual value (value).\n if '.' not in varname:\n # unqualified name, scope it to the current\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n section = profile_to_section(self._session.profile)\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n if parts[0] in ('default', 'profile'):\n # Then we know we're scoped to a profile.\n if parts[0] == 'default':\n section = 'default'\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n section = profile_to_section(parts[1])\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n section = profile_to_section(self._session.profile)\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n section = profile_name\n varname = parts[0]\n if len(parts) == 2:\n value = {parts[1]: value}\n elif len(parts) == 2:\n # Otherwise it's something like \"set preview.service true\"\n # of something in the [plugin] section.\n section, varname = parts\n config_filename = os.path.expanduser(\n self._session.get_config_variable('config_file'))\n updated_config = {'__section__': section, varname: value}\n if varname in self._WRITE_TO_CREDS_FILE:\n config_filename = os.path.expanduser(\n self._session.get_config_variable('credentials_file'))\n section_name = updated_config['__section__']\n if section_name.startswith('profile '):\n updated_config['__section__'] = section_name[8:]\n self._config_writer.update_config(updated_config, config_filename)\n", "path": "awscli/customizations/configure/set.py"}], "after_files": [{"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import PREDEFINED_SECTION_NAMES, profile_to_section\n\n\nclass ConfigureSetCommand(BasicCommand):\n NAME = 'set'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',\n '_description.rst')\n SYNOPSIS = 'aws configure set varname value [--profile profile-name]'\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n 'help_text': 'The name of the config value to set.',\n 'action': 'store',\n 'cli_type_name': 'string', 'positional_arg': True},\n {'name': 'value',\n 'help_text': 'The value to set.',\n 'action': 'store',\n 'no_paramfile': True, # To disable the default paramfile behavior\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n # Any variables specified in this list will be written to\n # the ~/.aws/credentials file instead of ~/.aws/config.\n _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',\n 'aws_session_token']\n\n def __init__(self, session, config_writer=None):\n super(ConfigureSetCommand, self).__init__(session)\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _get_config_file(self, path):\n config_path = self._session.get_config_variable(path)\n return os.path.expanduser(config_path)\n\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = args.value\n profile = 'default'\n # Before handing things off to the config writer,\n # we need to find out three things:\n # 1. What section we're writing to (profile).\n # 2. The name of the config key (varname)\n # 3. The actual value (value).\n if '.' not in varname:\n # unqualified name, scope it to the current\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n profile = self._session.profile\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n if parts[0] in ('default', 'profile'):\n # Then we know we're scoped to a profile.\n if parts[0] == 'default':\n profile = 'default'\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n profile = parts[1]\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n profile = self._session.profile\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n profile = profile_name\n varname = parts[0]\n if len(parts) == 2:\n value = {parts[1]: value}\n elif len(parts) == 2:\n # Otherwise it's something like \"set preview.service true\"\n # of something in the [plugin] section.\n profile, varname = parts\n config_filename = self._get_config_file('config_file')\n if varname in self._WRITE_TO_CREDS_FILE:\n # When writing to the creds file, the section is just the profile\n section = profile\n config_filename = self._get_config_file('credentials_file')\n elif profile in PREDEFINED_SECTION_NAMES or profile == 'default':\n section = profile\n else:\n section = profile_to_section(profile)\n updated_config = {'__section__': section, varname: value}\n self._config_writer.update_config(updated_config, config_filename)\n", "path": "awscli/customizations/configure/set.py"}]} | 1,641 | 833 |
gh_patches_debug_41410 | rasdani/github-patches | git_diff | qtile__qtile-1943 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Popups and child-windows are drawn behind parent windows in certain situations
# Issue description
In some situations I noticed that popups or child-windows are drawn (or moved) behind their respective parents; for child-windows this can be quite a problem in case the child is blocking, since then the software might be completely unresponsive.
Places where I have noticed this and that I remember:
1. Virtualbox (fullscreen) (the main window, not a client): Context menus on VM-list open in the background.
2. Veracrypt (floating) "mount" child-window: The window either spawns in the back or is moved there once it loses focus.
There have been some other cases, but due to testing PR #1870, which I initially expected to be the cause for this, I haven't taken note of all of them.
Some software definitely has working popup-windows, so Virtualbox might be doing something weird.
# Qtile version
https://github.com/qtile/qtile/commit/c6b80e9444e9b4e5dc2f52ca1954dc502c45f2eb
(newer versions not tested yet)
# Configuration
I'll only include one snippet here, since verything else (setting up Keys and Widgets) most likely has nothing to do with the problem.
```python
@hook.subscribe.client_new
def modify_window(client):
if (client.window.get_wm_transient_for() or client.window.get_wm_type() in floating_types):
client.floating = True
```
On top of this I am currently using fake screens instead of normal screens.
It would be nice if somebody could confirm this, since I am not entirely sure this isn't all caused by my own PR.
Related to #1870, which could fix at least the described issue with child-windows.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/layout/floating.py`
Content:
```
1 # Copyright (c) 2010 matt
2 # Copyright (c) 2010-2011 Paul Colomiets
3 # Copyright (c) 2011 Mounier Florian
4 # Copyright (c) 2012 Craig Barnes
5 # Copyright (c) 2012, 2014-2015 Tycho Andersen
6 # Copyright (c) 2013 Tao Sauvage
7 # Copyright (c) 2013 Julien Iguchi-Cartigny
8 # Copyright (c) 2014 ramnes
9 # Copyright (c) 2014 Sean Vig
10 # Copyright (c) 2014 dequis
11 # Copyright (c) 2018 Nazar Mokrynskyi
12 #
13 # Permission is hereby granted, free of charge, to any person obtaining a copy
14 # of this software and associated documentation files (the "Software"), to deal
15 # in the Software without restriction, including without limitation the rights
16 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 # copies of the Software, and to permit persons to whom the Software is
18 # furnished to do so, subject to the following conditions:
19 #
20 # The above copyright notice and this permission notice shall be included in
21 # all copies or substantial portions of the Software.
22 #
23 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 # SOFTWARE.
30
31 import warnings
32
33 from libqtile.config import Match
34 from libqtile.layout.base import Layout
35 from libqtile.log_utils import logger
36
37
38 class Floating(Layout):
39 """
40 Floating layout, which does nothing with windows but handles focus order
41 """
42 defaults = [
43 ("border_focus", "#0000ff", "Border colour for the focused window."),
44 ("border_normal", "#000000", "Border colour for un-focused windows."),
45 ("border_width", 1, "Border width."),
46 ("max_border_width", 0, "Border width for maximize."),
47 ("fullscreen_border_width", 0, "Border width for fullscreen."),
48 ("name", "floating", "Name of this layout."),
49 ]
50
51 def __init__(self, float_rules=None, no_reposition_rules=None, **config):
52 """
53 If you have certain apps that you always want to float you can provide
54 ``float_rules`` to do so. ``float_rules`` are a list of
55 Match objects::
56
57 from libqtile.config import Match
58 Match(title=WM_NAME, wm_class=WM_CLASS, role=WM_WINDOW_ROLE)
59
60 When a new window is opened its ``match`` method is called with each of
61 these rules. If one matches, the window will float. The following
62 will float GIMP and Skype::
63
64 from libqtile.config import Match
65 float_rules=[Match(wm_class="skype"), Match(wm_class="gimp")]
66
67 Specify these in the ``floating_layout`` in your config.
68
69 Floating layout will try to center most of floating windows by default,
70 but if you don't want this to happen for certain windows that are
71 centered by mistake, you can use ``no_reposition_rules`` option to
72 specify them and layout will rely on windows to position themselves in
73 correct location on the screen.
74 """
75 Layout.__init__(self, **config)
76 self.clients = []
77 self.focused = None
78 self.group = None
79 self.float_rules = float_rules or []
80
81 warned = False
82 for index, rule in enumerate(self.float_rules):
83 if isinstance(rule, Match):
84 continue
85
86 if not warned:
87 message = "Non-config.Match objects in float_rules are " \
88 "deprecated"
89 warnings.warn(message, DeprecationWarning)
90 logger.warning(message)
91 warned = True
92
93 match = Match(
94 title=rule.get("wname"), wm_class=rule.get("wmclass"),
95 role=rule.get("role"), wm_type=rule.get("wm_type"),
96 wm_instance_class=rule.get("wm_instance_class"),
97 net_wm_pid=rule.get("net_wm_pid"))
98
99 self.float_rules[index] = match
100
101 self.no_reposition_rules = no_reposition_rules or []
102 self.add_defaults(Floating.defaults)
103
104 def match(self, win):
105 """Used to default float some windows"""
106 return any(win.match(rule) for rule in self.float_rules)
107
108 def find_clients(self, group):
109 """Find all clients belonging to a given group"""
110 return [c for c in self.clients if c.group is group]
111
112 def to_screen(self, group, new_screen):
113 """Adjust offsets of clients within current screen"""
114 for win in self.find_clients(group):
115 if win.maximized:
116 win.maximized = True
117 elif win.fullscreen:
118 win.fullscreen = True
119 else:
120 # catch if the client hasn't been configured
121 try:
122 # By default, place window at same offset from top corner
123 new_x = new_screen.x + win.float_x
124 new_y = new_screen.y + win.float_y
125 except AttributeError:
126 # this will be handled in .configure()
127 pass
128 else:
129 # make sure window isn't off screen left/right...
130 new_x = min(new_x, new_screen.x + new_screen.width - win.width)
131 new_x = max(new_x, new_screen.x)
132 # and up/down
133 new_y = min(new_y, new_screen.y + new_screen.height - win.height)
134 new_y = max(new_y, new_screen.y)
135
136 win.x = new_x
137 win.y = new_y
138 win.group = new_screen.group
139
140 def focus_first(self, group=None):
141 if group is None:
142 clients = self.clients
143 else:
144 clients = self.find_clients(group)
145
146 if clients:
147 return clients[0]
148
149 def focus_next(self, win):
150 if win not in self.clients or win.group is None:
151 return
152
153 clients = self.find_clients(win.group)
154 idx = clients.index(win)
155 if len(clients) > idx + 1:
156 return clients[idx + 1]
157
158 def focus_last(self, group=None):
159 if group is None:
160 clients = self.clients
161 else:
162 clients = self.find_clients(group)
163
164 if clients:
165 return clients[-1]
166
167 def focus_previous(self, win):
168 if win not in self.clients or win.group is None:
169 return
170
171 clients = self.find_clients(win.group)
172 idx = clients.index(win)
173 if idx > 0:
174 return clients[idx - 1]
175
176 def focus(self, client):
177 self.focused = client
178
179 def blur(self):
180 self.focused = None
181
182 def compute_client_position(self, client, screen_rect):
183 """ recompute client.x and client.y, returning whether or not to place
184 this client above other windows or not """
185 above = False
186 transient_for = client.window.get_wm_transient_for()
187 win = client.group.qtile.windows_map.get(transient_for)
188 if win is not None:
189 # if transient for a window, place in the center of the window
190 center_x = win.x + win.width / 2
191 center_y = win.y + win.height / 2
192 else:
193 center_x = screen_rect.x + screen_rect.width / 2
194 center_y = screen_rect.y + screen_rect.height / 2
195 above = True
196
197 x = center_x - client.width / 2
198 y = center_y - client.height / 2
199
200 # don't go off the right...
201 x = min(x, screen_rect.x + screen_rect.width)
202 # or left...
203 x = max(x, screen_rect.x)
204 # or bottom...
205 y = min(y, screen_rect.y + screen_rect.height)
206 # or top
207 y = max(y, screen_rect.y)
208
209 client.x = int(round(x))
210 client.y = int(round(y))
211 return above
212
213 def configure(self, client, screen_rect):
214 if client.has_focus:
215 bc = self.border_focus
216 else:
217 bc = self.border_normal
218
219 if client.maximized:
220 bw = self.max_border_width
221 elif client.fullscreen:
222 bw = self.fullscreen_border_width
223 else:
224 bw = self.border_width
225
226 # 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,
227 # don't reposition it anywhere, let Java app to control it
228 cls = client.window.get_wm_class() or ''
229 is_java_dropdown = 'sun-awt-X11-XWindowPeer' in cls
230 if is_java_dropdown:
231 client.paint_borders(bc, bw)
232 client.cmd_bring_to_front()
233
234 # similar to above but the X11 version, the client may have already
235 # placed itself. let's respect that
236 elif client.has_user_set_position():
237 client.paint_borders(bc, bw)
238 client.cmd_bring_to_front()
239
240 # ok, it's not java and the window itself didn't position it, but users
241 # may still have asked us not to mess with it
242 elif any(m.compare(client) for m in self.no_reposition_rules):
243 client.paint_borders(bc, bw)
244 client.cmd_bring_to_front()
245
246 else:
247 above = False
248
249 # We definitely have a screen here, so let's be sure we'll float on screen
250 try:
251 client.float_x
252 client.float_y
253 except AttributeError:
254 # this window hasn't been placed before, let's put it in a sensible spot
255 above = self.compute_client_position(client, screen_rect)
256
257 client.place(
258 client.x,
259 client.y,
260 client.width,
261 client.height,
262 bw,
263 bc,
264 above,
265 )
266 client.unhide()
267
268 def add(self, client):
269 self.clients.append(client)
270 self.focused = client
271
272 def remove(self, client):
273 if client not in self.clients:
274 return
275
276 next_focus = self.focus_next(client)
277 if client is self.focused:
278 self.blur()
279 self.clients.remove(client)
280 return next_focus
281
282 def info(self):
283 d = Layout.info(self)
284 d["clients"] = [c.name for c in self.clients]
285 return d
286
287 def cmd_next(self):
288 # This can't ever be called, but implement the abstract method
289 pass
290
291 def cmd_previous(self):
292 # This can't ever be called, but implement the abstract method
293 pass
294
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/layout/floating.py b/libqtile/layout/floating.py
--- a/libqtile/layout/floating.py
+++ b/libqtile/layout/floating.py
@@ -179,35 +179,53 @@
def blur(self):
self.focused = None
+ def on_screen(self, client, screen_rect):
+ if client.x < screen_rect.x: # client's left edge
+ return False
+ if screen_rect.x + screen_rect.width < client.x + client.width: # right
+ return False
+ if client.y < screen_rect.y: # top
+ return False
+ if screen_rect.y + screen_rect.width < client.y + client.height: # bottom
+ return False
+ return True
+
def compute_client_position(self, client, screen_rect):
""" recompute client.x and client.y, returning whether or not to place
this client above other windows or not """
- above = False
- transient_for = client.window.get_wm_transient_for()
- win = client.group.qtile.windows_map.get(transient_for)
- if win is not None:
- # if transient for a window, place in the center of the window
- center_x = win.x + win.width / 2
- center_y = win.y + win.height / 2
- else:
- center_x = screen_rect.x + screen_rect.width / 2
- center_y = screen_rect.y + screen_rect.height / 2
- above = True
-
- x = center_x - client.width / 2
- y = center_y - client.height / 2
-
- # don't go off the right...
- x = min(x, screen_rect.x + screen_rect.width)
- # or left...
- x = max(x, screen_rect.x)
- # or bottom...
- y = min(y, screen_rect.y + screen_rect.height)
- # or top
- y = max(y, screen_rect.y)
-
- client.x = int(round(x))
- client.y = int(round(y))
+ above = True
+
+ if client.has_user_set_position() and not self.on_screen(client, screen_rect):
+ # move to screen
+ client.x = screen_rect.x + client.x
+ client.y = screen_rect.y + client.y
+ if not client.has_user_set_position() or not self.on_screen(client, screen_rect):
+ # client has not been properly placed before or it is off screen
+ transient_for = client.window.get_wm_transient_for()
+ win = client.group.qtile.windows_map.get(transient_for)
+ if win is not None:
+ # if transient for a window, place in the center of the window
+ center_x = win.x + win.width / 2
+ center_y = win.y + win.height / 2
+ above = False
+ else:
+ center_x = screen_rect.x + screen_rect.width / 2
+ center_y = screen_rect.y + screen_rect.height / 2
+
+ x = center_x - client.width / 2
+ y = center_y - client.height / 2
+
+ # don't go off the right...
+ x = min(x, screen_rect.x + screen_rect.width - client.width)
+ # or left...
+ x = max(x, screen_rect.x)
+ # or bottom...
+ y = min(y, screen_rect.y + screen_rect.height - client.height)
+ # or top
+ y = max(y, screen_rect.y)
+
+ client.x = int(round(x))
+ client.y = int(round(y))
return above
def configure(self, client, screen_rect):
@@ -231,14 +249,7 @@
client.paint_borders(bc, bw)
client.cmd_bring_to_front()
- # similar to above but the X11 version, the client may have already
- # placed itself. let's respect that
- elif client.has_user_set_position():
- client.paint_borders(bc, bw)
- client.cmd_bring_to_front()
-
- # ok, it's not java and the window itself didn't position it, but users
- # may still have asked us not to mess with it
+ # alternatively, users may have asked us explicitly to leave the client alone
elif any(m.compare(client) for m in self.no_reposition_rules):
client.paint_borders(bc, bw)
client.cmd_bring_to_front()
| {"golden_diff": "diff --git a/libqtile/layout/floating.py b/libqtile/layout/floating.py\n--- a/libqtile/layout/floating.py\n+++ b/libqtile/layout/floating.py\n@@ -179,35 +179,53 @@\n def blur(self):\n self.focused = None\n \n+ def on_screen(self, client, screen_rect):\n+ if client.x < screen_rect.x: # client's left edge\n+ return False\n+ if screen_rect.x + screen_rect.width < client.x + client.width: # right\n+ return False\n+ if client.y < screen_rect.y: # top\n+ return False\n+ if screen_rect.y + screen_rect.width < client.y + client.height: # bottom\n+ return False\n+ return True\n+\n def compute_client_position(self, client, screen_rect):\n \"\"\" recompute client.x and client.y, returning whether or not to place\n this client above other windows or not \"\"\"\n- above = False\n- transient_for = client.window.get_wm_transient_for()\n- win = client.group.qtile.windows_map.get(transient_for)\n- if win is not None:\n- # if transient for a window, place in the center of the window\n- center_x = win.x + win.width / 2\n- center_y = win.y + win.height / 2\n- else:\n- center_x = screen_rect.x + screen_rect.width / 2\n- center_y = screen_rect.y + screen_rect.height / 2\n- above = True\n-\n- x = center_x - client.width / 2\n- y = center_y - client.height / 2\n-\n- # don't go off the right...\n- x = min(x, screen_rect.x + screen_rect.width)\n- # or left...\n- x = max(x, screen_rect.x)\n- # or bottom...\n- y = min(y, screen_rect.y + screen_rect.height)\n- # or top\n- y = max(y, screen_rect.y)\n-\n- client.x = int(round(x))\n- client.y = int(round(y))\n+ above = True\n+\n+ if client.has_user_set_position() and not self.on_screen(client, screen_rect):\n+ # move to screen\n+ client.x = screen_rect.x + client.x\n+ client.y = screen_rect.y + client.y\n+ if not client.has_user_set_position() or not self.on_screen(client, screen_rect):\n+ # client has not been properly placed before or it is off screen\n+ transient_for = client.window.get_wm_transient_for()\n+ win = client.group.qtile.windows_map.get(transient_for)\n+ if win is not None:\n+ # if transient for a window, place in the center of the window\n+ center_x = win.x + win.width / 2\n+ center_y = win.y + win.height / 2\n+ above = False\n+ else:\n+ center_x = screen_rect.x + screen_rect.width / 2\n+ center_y = screen_rect.y + screen_rect.height / 2\n+\n+ x = center_x - client.width / 2\n+ y = center_y - client.height / 2\n+\n+ # don't go off the right...\n+ x = min(x, screen_rect.x + screen_rect.width - client.width)\n+ # or left...\n+ x = max(x, screen_rect.x)\n+ # or bottom...\n+ y = min(y, screen_rect.y + screen_rect.height - client.height)\n+ # or top\n+ y = max(y, screen_rect.y)\n+\n+ client.x = int(round(x))\n+ client.y = int(round(y))\n return above\n \n def configure(self, client, screen_rect):\n@@ -231,14 +249,7 @@\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n \n- # similar to above but the X11 version, the client may have already\n- # placed itself. let's respect that\n- elif client.has_user_set_position():\n- client.paint_borders(bc, bw)\n- client.cmd_bring_to_front()\n-\n- # ok, it's not java and the window itself didn't position it, but users\n- # may still have asked us not to mess with it\n+ # alternatively, users may have asked us explicitly to leave the client alone\n elif any(m.compare(client) for m in self.no_reposition_rules):\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n", "issue": "Popups and child-windows are drawn behind parent windows in certain situations\n# Issue description\r\nIn some situations I noticed that popups or child-windows are drawn (or moved) behind their respective parents; for child-windows this can be quite a problem in case the child is blocking, since then the software might be completely unresponsive.\r\n\r\nPlaces where I have noticed this and that I remember:\r\n1. Virtualbox (fullscreen) (the main window, not a client): Context menus on VM-list open in the background.\r\n2. Veracrypt (floating) \"mount\" child-window: The window either spawns in the back or is moved there once it loses focus.\r\n\r\nThere have been some other cases, but due to testing PR #1870, which I initially expected to be the cause for this, I haven't taken note of all of them.\r\nSome software definitely has working popup-windows, so Virtualbox might be doing something weird.\r\n\r\n# Qtile version\r\n\r\nhttps://github.com/qtile/qtile/commit/c6b80e9444e9b4e5dc2f52ca1954dc502c45f2eb\r\n(newer versions not tested yet)\r\n\r\n# Configuration\r\n\r\nI'll only include one snippet here, since verything else (setting up Keys and Widgets) most likely has nothing to do with the problem.\r\n\r\n```python\r\[email protected]_new\r\ndef modify_window(client):\r\n if (client.window.get_wm_transient_for() or client.window.get_wm_type() in floating_types):\r\n client.floating = True\r\n```\r\n\r\nOn top of this I am currently using fake screens instead of normal screens.\r\n\r\nIt would be nice if somebody could confirm this, since I am not entirely sure this isn't all caused by my own PR.\r\n\r\nRelated to #1870, which could fix at least the described issue with child-windows.\n", "before_files": [{"content": "# Copyright (c) 2010 matt\n# Copyright (c) 2010-2011 Paul Colomiets\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 Craig Barnes\n# Copyright (c) 2012, 2014-2015 Tycho Andersen\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2013 Julien Iguchi-Cartigny\n# Copyright (c) 2014 ramnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 dequis\n# Copyright (c) 2018 Nazar Mokrynskyi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport warnings\n\nfrom libqtile.config import Match\nfrom libqtile.layout.base import Layout\nfrom libqtile.log_utils import logger\n\n\nclass Floating(Layout):\n \"\"\"\n Floating layout, which does nothing with windows but handles focus order\n \"\"\"\n defaults = [\n (\"border_focus\", \"#0000ff\", \"Border colour for the focused window.\"),\n (\"border_normal\", \"#000000\", \"Border colour for un-focused windows.\"),\n (\"border_width\", 1, \"Border width.\"),\n (\"max_border_width\", 0, \"Border width for maximize.\"),\n (\"fullscreen_border_width\", 0, \"Border width for fullscreen.\"),\n (\"name\", \"floating\", \"Name of this layout.\"),\n ]\n\n def __init__(self, float_rules=None, no_reposition_rules=None, **config):\n \"\"\"\n If you have certain apps that you always want to float you can provide\n ``float_rules`` to do so. ``float_rules`` are a list of\n Match objects::\n\n from libqtile.config import Match\n Match(title=WM_NAME, wm_class=WM_CLASS, role=WM_WINDOW_ROLE)\n\n When a new window is opened its ``match`` method is called with each of\n these rules. If one matches, the window will float. The following\n will float GIMP and Skype::\n\n from libqtile.config import Match\n float_rules=[Match(wm_class=\"skype\"), Match(wm_class=\"gimp\")]\n\n Specify these in the ``floating_layout`` in your config.\n\n Floating layout will try to center most of floating windows by default,\n but if you don't want this to happen for certain windows that are\n centered by mistake, you can use ``no_reposition_rules`` option to\n specify them and layout will rely on windows to position themselves in\n correct location on the screen.\n \"\"\"\n Layout.__init__(self, **config)\n self.clients = []\n self.focused = None\n self.group = None\n self.float_rules = float_rules or []\n\n warned = False\n for index, rule in enumerate(self.float_rules):\n if isinstance(rule, Match):\n continue\n\n if not warned:\n message = \"Non-config.Match objects in float_rules are \" \\\n \"deprecated\"\n warnings.warn(message, DeprecationWarning)\n logger.warning(message)\n warned = True\n\n match = Match(\n title=rule.get(\"wname\"), wm_class=rule.get(\"wmclass\"),\n role=rule.get(\"role\"), wm_type=rule.get(\"wm_type\"),\n wm_instance_class=rule.get(\"wm_instance_class\"),\n net_wm_pid=rule.get(\"net_wm_pid\"))\n\n self.float_rules[index] = match\n\n self.no_reposition_rules = no_reposition_rules or []\n self.add_defaults(Floating.defaults)\n\n def match(self, win):\n \"\"\"Used to default float some windows\"\"\"\n return any(win.match(rule) for rule in self.float_rules)\n\n def find_clients(self, group):\n \"\"\"Find all clients belonging to a given group\"\"\"\n return [c for c in self.clients if c.group is group]\n\n def to_screen(self, group, new_screen):\n \"\"\"Adjust offsets of clients within current screen\"\"\"\n for win in self.find_clients(group):\n if win.maximized:\n win.maximized = True\n elif win.fullscreen:\n win.fullscreen = True\n else:\n # catch if the client hasn't been configured\n try:\n # By default, place window at same offset from top corner\n new_x = new_screen.x + win.float_x\n new_y = new_screen.y + win.float_y\n except AttributeError:\n # this will be handled in .configure()\n pass\n else:\n # make sure window isn't off screen left/right...\n new_x = min(new_x, new_screen.x + new_screen.width - win.width)\n new_x = max(new_x, new_screen.x)\n # and up/down\n new_y = min(new_y, new_screen.y + new_screen.height - win.height)\n new_y = max(new_y, new_screen.y)\n\n win.x = new_x\n win.y = new_y\n win.group = new_screen.group\n\n def focus_first(self, group=None):\n if group is None:\n clients = self.clients\n else:\n clients = self.find_clients(group)\n\n if clients:\n return clients[0]\n\n def focus_next(self, win):\n if win not in self.clients or win.group is None:\n return\n\n clients = self.find_clients(win.group)\n idx = clients.index(win)\n if len(clients) > idx + 1:\n return clients[idx + 1]\n\n def focus_last(self, group=None):\n if group is None:\n clients = self.clients\n else:\n clients = self.find_clients(group)\n\n if clients:\n return clients[-1]\n\n def focus_previous(self, win):\n if win not in self.clients or win.group is None:\n return\n\n clients = self.find_clients(win.group)\n idx = clients.index(win)\n if idx > 0:\n return clients[idx - 1]\n\n def focus(self, client):\n self.focused = client\n\n def blur(self):\n self.focused = None\n\n def compute_client_position(self, client, screen_rect):\n \"\"\" recompute client.x and client.y, returning whether or not to place\n this client above other windows or not \"\"\"\n above = False\n transient_for = client.window.get_wm_transient_for()\n win = client.group.qtile.windows_map.get(transient_for)\n if win is not None:\n # if transient for a window, place in the center of the window\n center_x = win.x + win.width / 2\n center_y = win.y + win.height / 2\n else:\n center_x = screen_rect.x + screen_rect.width / 2\n center_y = screen_rect.y + screen_rect.height / 2\n above = True\n\n x = center_x - client.width / 2\n y = center_y - client.height / 2\n\n # don't go off the right...\n x = min(x, screen_rect.x + screen_rect.width)\n # or left...\n x = max(x, screen_rect.x)\n # or bottom...\n y = min(y, screen_rect.y + screen_rect.height)\n # or top\n y = max(y, screen_rect.y)\n\n client.x = int(round(x))\n client.y = int(round(y))\n return above\n\n def configure(self, client, screen_rect):\n if client.has_focus:\n bc = self.border_focus\n else:\n bc = self.border_normal\n\n if client.maximized:\n bw = self.max_border_width\n elif client.fullscreen:\n bw = self.fullscreen_border_width\n else:\n bw = self.border_width\n\n # 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,\n # don't reposition it anywhere, let Java app to control it\n cls = client.window.get_wm_class() or ''\n is_java_dropdown = 'sun-awt-X11-XWindowPeer' in cls\n if is_java_dropdown:\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n\n # similar to above but the X11 version, the client may have already\n # placed itself. let's respect that\n elif client.has_user_set_position():\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n\n # ok, it's not java and the window itself didn't position it, but users\n # may still have asked us not to mess with it\n elif any(m.compare(client) for m in self.no_reposition_rules):\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n\n else:\n above = False\n\n # We definitely have a screen here, so let's be sure we'll float on screen\n try:\n client.float_x\n client.float_y\n except AttributeError:\n # this window hasn't been placed before, let's put it in a sensible spot\n above = self.compute_client_position(client, screen_rect)\n\n client.place(\n client.x,\n client.y,\n client.width,\n client.height,\n bw,\n bc,\n above,\n )\n client.unhide()\n\n def add(self, client):\n self.clients.append(client)\n self.focused = client\n\n def remove(self, client):\n if client not in self.clients:\n return\n\n next_focus = self.focus_next(client)\n if client is self.focused:\n self.blur()\n self.clients.remove(client)\n return next_focus\n\n def info(self):\n d = Layout.info(self)\n d[\"clients\"] = [c.name for c in self.clients]\n return d\n\n def cmd_next(self):\n # This can't ever be called, but implement the abstract method\n pass\n\n def cmd_previous(self):\n # This can't ever be called, but implement the abstract method\n pass\n", "path": "libqtile/layout/floating.py"}], "after_files": [{"content": "# Copyright (c) 2010 matt\n# Copyright (c) 2010-2011 Paul Colomiets\n# Copyright (c) 2011 Mounier Florian\n# Copyright (c) 2012 Craig Barnes\n# Copyright (c) 2012, 2014-2015 Tycho Andersen\n# Copyright (c) 2013 Tao Sauvage\n# Copyright (c) 2013 Julien Iguchi-Cartigny\n# Copyright (c) 2014 ramnes\n# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 dequis\n# Copyright (c) 2018 Nazar Mokrynskyi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport warnings\n\nfrom libqtile.config import Match\nfrom libqtile.layout.base import Layout\nfrom libqtile.log_utils import logger\n\n\nclass Floating(Layout):\n \"\"\"\n Floating layout, which does nothing with windows but handles focus order\n \"\"\"\n defaults = [\n (\"border_focus\", \"#0000ff\", \"Border colour for the focused window.\"),\n (\"border_normal\", \"#000000\", \"Border colour for un-focused windows.\"),\n (\"border_width\", 1, \"Border width.\"),\n (\"max_border_width\", 0, \"Border width for maximize.\"),\n (\"fullscreen_border_width\", 0, \"Border width for fullscreen.\"),\n (\"name\", \"floating\", \"Name of this layout.\"),\n ]\n\n def __init__(self, float_rules=None, no_reposition_rules=None, **config):\n \"\"\"\n If you have certain apps that you always want to float you can provide\n ``float_rules`` to do so. ``float_rules`` are a list of\n Match objects::\n\n from libqtile.config import Match\n Match(title=WM_NAME, wm_class=WM_CLASS, role=WM_WINDOW_ROLE)\n\n When a new window is opened its ``match`` method is called with each of\n these rules. If one matches, the window will float. The following\n will float GIMP and Skype::\n\n from libqtile.config import Match\n float_rules=[Match(wm_class=\"skype\"), Match(wm_class=\"gimp\")]\n\n Specify these in the ``floating_layout`` in your config.\n\n Floating layout will try to center most of floating windows by default,\n but if you don't want this to happen for certain windows that are\n centered by mistake, you can use ``no_reposition_rules`` option to\n specify them and layout will rely on windows to position themselves in\n correct location on the screen.\n \"\"\"\n Layout.__init__(self, **config)\n self.clients = []\n self.focused = None\n self.group = None\n self.float_rules = float_rules or []\n\n warned = False\n for index, rule in enumerate(self.float_rules):\n if isinstance(rule, Match):\n continue\n\n if not warned:\n message = \"Non-config.Match objects in float_rules are \" \\\n \"deprecated\"\n warnings.warn(message, DeprecationWarning)\n logger.warning(message)\n warned = True\n\n match = Match(\n title=rule.get(\"wname\"), wm_class=rule.get(\"wmclass\"),\n role=rule.get(\"role\"), wm_type=rule.get(\"wm_type\"),\n wm_instance_class=rule.get(\"wm_instance_class\"),\n net_wm_pid=rule.get(\"net_wm_pid\"))\n\n self.float_rules[index] = match\n\n self.no_reposition_rules = no_reposition_rules or []\n self.add_defaults(Floating.defaults)\n\n def match(self, win):\n \"\"\"Used to default float some windows\"\"\"\n return any(win.match(rule) for rule in self.float_rules)\n\n def find_clients(self, group):\n \"\"\"Find all clients belonging to a given group\"\"\"\n return [c for c in self.clients if c.group is group]\n\n def to_screen(self, group, new_screen):\n \"\"\"Adjust offsets of clients within current screen\"\"\"\n for win in self.find_clients(group):\n if win.maximized:\n win.maximized = True\n elif win.fullscreen:\n win.fullscreen = True\n else:\n # catch if the client hasn't been configured\n try:\n # By default, place window at same offset from top corner\n new_x = new_screen.x + win.float_x\n new_y = new_screen.y + win.float_y\n except AttributeError:\n # this will be handled in .configure()\n pass\n else:\n # make sure window isn't off screen left/right...\n new_x = min(new_x, new_screen.x + new_screen.width - win.width)\n new_x = max(new_x, new_screen.x)\n # and up/down\n new_y = min(new_y, new_screen.y + new_screen.height - win.height)\n new_y = max(new_y, new_screen.y)\n\n win.x = new_x\n win.y = new_y\n win.group = new_screen.group\n\n def focus_first(self, group=None):\n if group is None:\n clients = self.clients\n else:\n clients = self.find_clients(group)\n\n if clients:\n return clients[0]\n\n def focus_next(self, win):\n if win not in self.clients or win.group is None:\n return\n\n clients = self.find_clients(win.group)\n idx = clients.index(win)\n if len(clients) > idx + 1:\n return clients[idx + 1]\n\n def focus_last(self, group=None):\n if group is None:\n clients = self.clients\n else:\n clients = self.find_clients(group)\n\n if clients:\n return clients[-1]\n\n def focus_previous(self, win):\n if win not in self.clients or win.group is None:\n return\n\n clients = self.find_clients(win.group)\n idx = clients.index(win)\n if idx > 0:\n return clients[idx - 1]\n\n def focus(self, client):\n self.focused = client\n\n def blur(self):\n self.focused = None\n\n def on_screen(self, client, screen_rect):\n if client.x < screen_rect.x: # client's left edge\n return False\n if screen_rect.x + screen_rect.width < client.x + client.width: # right\n return False\n if client.y < screen_rect.y: # top\n return False\n if screen_rect.y + screen_rect.width < client.y + client.height: # bottom\n return False\n return True\n\n def compute_client_position(self, client, screen_rect):\n \"\"\" recompute client.x and client.y, returning whether or not to place\n this client above other windows or not \"\"\"\n above = True\n\n if client.has_user_set_position() and not self.on_screen(client, screen_rect):\n # move to screen\n client.x = screen_rect.x + client.x\n client.y = screen_rect.y + client.y\n if not client.has_user_set_position() or not self.on_screen(client, screen_rect):\n # client has not been properly placed before or it is off screen\n transient_for = client.window.get_wm_transient_for()\n win = client.group.qtile.windows_map.get(transient_for)\n if win is not None:\n # if transient for a window, place in the center of the window\n center_x = win.x + win.width / 2\n center_y = win.y + win.height / 2\n above = False\n else:\n center_x = screen_rect.x + screen_rect.width / 2\n center_y = screen_rect.y + screen_rect.height / 2\n\n x = center_x - client.width / 2\n y = center_y - client.height / 2\n\n # don't go off the right...\n x = min(x, screen_rect.x + screen_rect.width - client.width)\n # or left...\n x = max(x, screen_rect.x)\n # or bottom...\n y = min(y, screen_rect.y + screen_rect.height - client.height)\n # or top\n y = max(y, screen_rect.y)\n\n client.x = int(round(x))\n client.y = int(round(y))\n return above\n\n def configure(self, client, screen_rect):\n if client.has_focus:\n bc = self.border_focus\n else:\n bc = self.border_normal\n\n if client.maximized:\n bw = self.max_border_width\n elif client.fullscreen:\n bw = self.fullscreen_border_width\n else:\n bw = self.border_width\n\n # 'sun-awt-X11-XWindowPeer' is a dropdown used in Java application,\n # don't reposition it anywhere, let Java app to control it\n cls = client.window.get_wm_class() or ''\n is_java_dropdown = 'sun-awt-X11-XWindowPeer' in cls\n if is_java_dropdown:\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n\n # alternatively, users may have asked us explicitly to leave the client alone\n elif any(m.compare(client) for m in self.no_reposition_rules):\n client.paint_borders(bc, bw)\n client.cmd_bring_to_front()\n\n else:\n above = False\n\n # We definitely have a screen here, so let's be sure we'll float on screen\n try:\n client.float_x\n client.float_y\n except AttributeError:\n # this window hasn't been placed before, let's put it in a sensible spot\n above = self.compute_client_position(client, screen_rect)\n\n client.place(\n client.x,\n client.y,\n client.width,\n client.height,\n bw,\n bc,\n above,\n )\n client.unhide()\n\n def add(self, client):\n self.clients.append(client)\n self.focused = client\n\n def remove(self, client):\n if client not in self.clients:\n return\n\n next_focus = self.focus_next(client)\n if client is self.focused:\n self.blur()\n self.clients.remove(client)\n return next_focus\n\n def info(self):\n d = Layout.info(self)\n d[\"clients\"] = [c.name for c in self.clients]\n return d\n\n def cmd_next(self):\n # This can't ever be called, but implement the abstract method\n pass\n\n def cmd_previous(self):\n # This can't ever be called, but implement the abstract method\n pass\n", "path": "libqtile/layout/floating.py"}]} | 3,803 | 1,013 |
gh_patches_debug_36041 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-2700 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove pytest references from usage framework
https://github.com/lehinevych/cfme_tests/blob/templates_rest/cfme/rest/__init__.py - can we remove the skips in here. This code should raise and Exception which should be caught in the test/fixture code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cfme/rest/__init__.py`
Content:
```
1 import fauxfactory
2 import pytest
3
4 from cfme.automate.service_dialogs import ServiceDialog
5 from cfme.services.catalogs.catalog_item import CatalogItem
6 from cfme.services.catalogs.service_catalogs import ServiceCatalogs
7 from cfme.services import requests
8 from utils.providers import setup_a_provider as _setup_a_provider
9 from utils.virtual_machines import deploy_template
10 from utils.wait import wait_for
11 from utils import version
12
13
14 def service_catalogs(request, rest_api):
15 name = fauxfactory.gen_alphanumeric()
16 scls_data = [{
17 "name": "name_{}_{}".format(name, index),
18 "description": "description_{}_{}".format(name, index),
19 "service_templates": []
20 } for index in range(1, 5)]
21
22 scls = rest_api.collections.service_catalogs.action.add(*scls_data)
23 for scl in scls:
24 wait_for(
25 lambda: rest_api.collections.service_catalogs.find_by(name=scl.name),
26 num_sec=180,
27 delay=10,
28 )
29
30 @request.addfinalizer
31 def _finished():
32 ids = [s.id for s in scls]
33 delete_scls = [s for s in rest_api.collections.service_catalogs if s.id in ids]
34 if len(delete_scls) != 0:
35 rest_api.collections.service_catalogs.action.delete(*delete_scls)
36
37 return scls
38
39
40 def categories(request, rest_api, num=1):
41 ctg_data = [{
42 'name': 'test_category_{}_{}'.format(fauxfactory.gen_alphanumeric().lower(), _index),
43 'description': 'test_category_{}_{}'.format(fauxfactory.gen_alphanumeric().lower(), _index)
44 } for _index in range(0, num)]
45 ctgs = rest_api.collections.categories.action.create(*ctg_data)
46 for ctg in ctgs:
47 wait_for(
48 lambda: rest_api.collections.categories.find_by(description=ctg.description),
49 num_sec=180,
50 delay=10,
51 )
52
53 @request.addfinalizer
54 def _finished():
55 ids = [ctg.id for ctg in ctgs]
56 delete_ctgs = [ctg for ctg in rest_api.collections.categories
57 if ctg.id in ids]
58 if len(delete_ctgs) != 0:
59 rest_api.collections.categories.action.delete(*delete_ctgs)
60
61 return ctgs
62
63
64 def tags(request, rest_api, categories):
65 # Category id, href or name needs to be specified for creating a new tag resource
66 tags = []
67 for ctg in categories:
68 data = {
69 'name': 'test_tag_{}'.format(fauxfactory.gen_alphanumeric().lower()),
70 'description': 'test_tag_{}'.format(fauxfactory.gen_alphanumeric().lower()),
71 'category': {'href': ctg.href}
72 }
73 tags.append(data)
74 tags = rest_api.collections.tags.action.create(*tags)
75 for tag in tags:
76 wait_for(
77 lambda: rest_api.collections.tags.find_by(name=tag.name),
78 num_sec=180,
79 delay=10,
80 )
81
82 @request.addfinalizer
83 def _finished():
84 ids = [tag.id for tag in tags]
85 delete_tags = [tag for tag in rest_api.collections.tags if tag.id in ids]
86 if len(delete_tags) != 0:
87 rest_api.collections.tags.action.delete(*delete_tags)
88
89 return tags
90
91
92 def dialog():
93 dialog = "dialog_{}".format(fauxfactory.gen_alphanumeric())
94 element_data = dict(
95 ele_label="ele_{}".format(fauxfactory.gen_alphanumeric()),
96 ele_name=fauxfactory.gen_alphanumeric(),
97 ele_desc="my ele desc",
98 choose_type="Text Box",
99 default_text_box="default value"
100 )
101 service_dialog = ServiceDialog(
102 label=dialog,
103 description="my dialog",
104 submit=True,
105 cancel=True,
106 tab_label="tab_{}".format(fauxfactory.gen_alphanumeric()),
107 tab_desc="my tab desc",
108 box_label="box_{}".format(fauxfactory.gen_alphanumeric()),
109 box_desc="my box desc")
110 service_dialog.create(element_data)
111 return service_dialog
112
113
114 def services(request, rest_api, a_provider, dialog, service_catalogs):
115 """
116 The attempt to add the service entities via web
117 """
118 template, host, datastore, iso_file, vlan, catalog_item_type = map(a_provider.data.get(
119 "provisioning").get,
120 ('template', 'host', 'datastore', 'iso_file', 'vlan', 'catalog_item_type'))
121
122 provisioning_data = {
123 'vm_name': 'test_rest_{}'.format(fauxfactory.gen_alphanumeric()),
124 'host_name': {'name': [host]},
125 'datastore_name': {'name': [datastore]}
126 }
127
128 if a_provider.type == 'rhevm':
129 provisioning_data['provision_type'] = 'Native Clone'
130 provisioning_data['vlan'] = vlan
131 catalog_item_type = version.pick({
132 version.LATEST: "RHEV",
133 '5.3': "RHEV",
134 '5.2': "Redhat"
135 })
136 elif a_provider.type == 'virtualcenter':
137 provisioning_data['provision_type'] = 'VMware'
138 catalog = service_catalogs[0].name
139 item_name = fauxfactory.gen_alphanumeric()
140 catalog_item = CatalogItem(item_type=catalog_item_type, name=item_name,
141 description="my catalog", display_in=True,
142 catalog=catalog,
143 dialog=dialog.label,
144 catalog_name=template,
145 provider=a_provider.name,
146 prov_data=provisioning_data)
147
148 catalog_item.create()
149 service_catalogs = ServiceCatalogs("service_name")
150 service_catalogs.order(catalog_item.catalog, catalog_item)
151 row_description = catalog_item.name
152 cells = {'Description': row_description}
153 row, __ = wait_for(requests.wait_for_request, [cells, True],
154 fail_func=requests.reload, num_sec=2000, delay=20)
155 assert row.last_message.text == 'Request complete'
156 try:
157 services = [_ for _ in rest_api.collections.services]
158 services[0]
159 except IndexError:
160 pytest.skip("There is no service to be taken")
161
162 @request.addfinalizer
163 def _finished():
164 services = [_ for _ in rest_api.collections.services]
165 if len(services) != 0:
166 rest_api.collections.services.action.delete(*services)
167
168 return services
169
170
171 def rates(request, rest_api):
172 chargeback = rest_api.collections.chargebacks.get(rate_type='Compute')
173 data = [{
174 'description': 'test_rate_{}_{}'.format(_index, fauxfactory.gen_alphanumeric()),
175 'rate': 1,
176 'group': 'cpu',
177 'per_time': 'daily',
178 'per_unit': 'megahertz',
179 'chargeback_rate_id': chargeback.id
180 } for _index in range(0, 3)]
181
182 rates = rest_api.collections.rates.action.create(*data)
183 for rate in data:
184 wait_for(
185 lambda: rest_api.collections.rates.find_by(description=rate.get('description')),
186 num_sec=180,
187 delay=10,
188 )
189
190 @request.addfinalizer
191 def _finished():
192 ids = [rate.id for rate in rates]
193 delete_rates = [rate for rate in rest_api.collections.rates if rate.id in ids]
194 if len(delete_rates) != 0:
195 rest_api.collections.rates.action.delete(*delete_rates)
196
197 return rates
198
199
200 def a_provider():
201 return _setup_a_provider("infra")
202
203
204 def vm(request, a_provider, rest_api):
205 if "refresh" not in rest_api.collections.providers.action.all:
206 pytest.skip("Refresh action is not implemented in this version")
207 provider_rest = rest_api.collections.providers.get(name=a_provider.name)
208 vm_name = deploy_template(
209 a_provider.key,
210 "test_rest_vm_{}".format(fauxfactory.gen_alphanumeric(length=4)))
211 request.addfinalizer(lambda: a_provider.mgmt.delete_vm(vm_name))
212 provider_rest.action.refresh()
213 wait_for(
214 lambda: len(rest_api.collections.vms.find_by(name=vm_name)) > 0,
215 num_sec=600, delay=5)
216 return vm_name
217
218
219 def service_templates(request, rest_api, dialog):
220 catalog_items = []
221 for index in range(1, 5):
222 catalog_items.append(
223 CatalogItem(
224 item_type="Generic",
225 name="item_{}_{}".format(index, fauxfactory.gen_alphanumeric()),
226 description="my catalog", display_in=True,
227 dialog=dialog.label)
228 )
229
230 for catalog_item in catalog_items:
231 catalog_item.create()
232
233 try:
234 s_tpls = [_ for _ in rest_api.collections.service_templates]
235 s_tpls[0]
236 except IndexError:
237 pytest.skip("There is no service template to be taken")
238
239 @request.addfinalizer
240 def _finished():
241 s_tpls = [_ for _ in rest_api.collections.service_templates]
242 if len(s_tpls) != 0:
243 rest_api.collections.service_templates.action.delete(*s_tpls)
244
245 return s_tpls
246
247
248 def automation_requests_data(vm):
249 return [{
250 "uri_parts": {
251 "namespace": "System",
252 "class": "Request",
253 "instance": "InspectME",
254 "message": "create",
255 },
256 "parameters": {
257 "vm_name": vm,
258 },
259 "requester": {
260 "auto_approve": True
261 }
262 } for index in range(1, 5)]
263
264
265 def groups(request, rest_api, role, tenant, num=1):
266 data = [{
267 "description": "group_description_{}".format(fauxfactory.gen_alphanumeric()),
268 "role": {"href": role.href},
269 "tenant": {"href": tenant.href}
270 } for index in range(0, num)]
271
272 groups = _creating_skeleton(request, rest_api, "groups", data)
273 if num == 1:
274 return groups.pop()
275 return groups
276
277
278 def roles(request, rest_api, num=1):
279 data = [{
280 "name": "role_name_{}".format(fauxfactory.gen_alphanumeric())
281 } for index in range(0, num)]
282
283 roles = _creating_skeleton(request, rest_api, "roles", data)
284 if num == 1:
285 return roles.pop()
286 return roles
287
288
289 def tenants(request, rest_api, num=1):
290 parent = rest_api.collections.tenants.get(name='My Company')
291 data = [{
292 'description': 'test_tenants_{}_{}'.format(_index, fauxfactory.gen_alphanumeric()),
293 'name': 'test_tenants_{}_{}'.format(_index, fauxfactory.gen_alphanumeric()),
294 'divisible': 'true',
295 'use_config_for_attributes': 'false',
296 'parent': {'href': parent.href}
297 } for _index in range(0, num)]
298
299 tenants = _creating_skeleton(request, rest_api, "tenants", data)
300 if num == 1:
301 return tenants.pop()
302 return tenants
303
304
305 def users(request, rest_api, num=1):
306 data = [{
307 "userid": "user_{}_{}".format(_index, fauxfactory.gen_alphanumeric(3)),
308 "name": "name_{}_{}".format(_index, fauxfactory.gen_alphanumeric()),
309 "password": "pass_{}_{}".format(_index, fauxfactory.gen_alphanumeric(3)),
310 "group": {"description": "EvmGroup-user"}
311 } for _index in range(0, num)]
312
313 users = _creating_skeleton(request, rest_api, "users", data)
314 if num == 1:
315 return users.pop()
316 return users
317
318
319 def _creating_skeleton(request, rest_api, col_name, col_data):
320 collection = getattr(rest_api.collections, col_name)
321 if "create" not in collection.action.all:
322 pytest.skip("Create action for {} is not implemented in this version".format(col_name))
323 entities = collection.action.create(*col_data)
324 for entity in col_data:
325 if entity.get('name', None):
326 wait_for(lambda: collection.find_by(name=entity.get('name')), num_sec=180, delay=10)
327 elif entity.get('description', None):
328 wait_for(lambda: collection.find_by(
329 description=entity.get('description')), num_sec=180, delay=10)
330 else:
331 raise NotImplementedError
332
333 @request.addfinalizer
334 def _finished():
335 collection.reload()
336 ids = [e.id for e in entities]
337 delete_entities = [e for e in collection if e.id in ids]
338 if len(delete_entities) != 0:
339 collection.action.delete(*delete_entities)
340
341 return entities
342
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cfme/rest/__init__.py b/cfme/rest/__init__.py
--- a/cfme/rest/__init__.py
+++ b/cfme/rest/__init__.py
@@ -1,7 +1,7 @@
import fauxfactory
-import pytest
from cfme.automate.service_dialogs import ServiceDialog
+from cfme.exceptions import OptionNotAvailable
from cfme.services.catalogs.catalog_item import CatalogItem
from cfme.services.catalogs.service_catalogs import ServiceCatalogs
from cfme.services import requests
@@ -157,7 +157,7 @@
services = [_ for _ in rest_api.collections.services]
services[0]
except IndexError:
- pytest.skip("There is no service to be taken")
+ raise Exception("No options are selected")
@request.addfinalizer
def _finished():
@@ -202,8 +202,6 @@
def vm(request, a_provider, rest_api):
- if "refresh" not in rest_api.collections.providers.action.all:
- pytest.skip("Refresh action is not implemented in this version")
provider_rest = rest_api.collections.providers.get(name=a_provider.name)
vm_name = deploy_template(
a_provider.key,
@@ -234,7 +232,7 @@
s_tpls = [_ for _ in rest_api.collections.service_templates]
s_tpls[0]
except IndexError:
- pytest.skip("There is no service template to be taken")
+ raise Exception("There is no service template to be taken")
@request.addfinalizer
def _finished():
@@ -319,7 +317,8 @@
def _creating_skeleton(request, rest_api, col_name, col_data):
collection = getattr(rest_api.collections, col_name)
if "create" not in collection.action.all:
- pytest.skip("Create action for {} is not implemented in this version".format(col_name))
+ raise OptionNotAvailable(
+ "Create action for {} is not implemented in this version".format(col_name))
entities = collection.action.create(*col_data)
for entity in col_data:
if entity.get('name', None):
| {"golden_diff": "diff --git a/cfme/rest/__init__.py b/cfme/rest/__init__.py\n--- a/cfme/rest/__init__.py\n+++ b/cfme/rest/__init__.py\n@@ -1,7 +1,7 @@\n import fauxfactory\n-import pytest\n \n from cfme.automate.service_dialogs import ServiceDialog\n+from cfme.exceptions import OptionNotAvailable\n from cfme.services.catalogs.catalog_item import CatalogItem\n from cfme.services.catalogs.service_catalogs import ServiceCatalogs\n from cfme.services import requests\n@@ -157,7 +157,7 @@\n services = [_ for _ in rest_api.collections.services]\n services[0]\n except IndexError:\n- pytest.skip(\"There is no service to be taken\")\n+ raise Exception(\"No options are selected\")\n \n @request.addfinalizer\n def _finished():\n@@ -202,8 +202,6 @@\n \n \n def vm(request, a_provider, rest_api):\n- if \"refresh\" not in rest_api.collections.providers.action.all:\n- pytest.skip(\"Refresh action is not implemented in this version\")\n provider_rest = rest_api.collections.providers.get(name=a_provider.name)\n vm_name = deploy_template(\n a_provider.key,\n@@ -234,7 +232,7 @@\n s_tpls = [_ for _ in rest_api.collections.service_templates]\n s_tpls[0]\n except IndexError:\n- pytest.skip(\"There is no service template to be taken\")\n+ raise Exception(\"There is no service template to be taken\")\n \n @request.addfinalizer\n def _finished():\n@@ -319,7 +317,8 @@\n def _creating_skeleton(request, rest_api, col_name, col_data):\n collection = getattr(rest_api.collections, col_name)\n if \"create\" not in collection.action.all:\n- pytest.skip(\"Create action for {} is not implemented in this version\".format(col_name))\n+ raise OptionNotAvailable(\n+ \"Create action for {} is not implemented in this version\".format(col_name))\n entities = collection.action.create(*col_data)\n for entity in col_data:\n if entity.get('name', None):\n", "issue": "Remove pytest references from usage framework\nhttps://github.com/lehinevych/cfme_tests/blob/templates_rest/cfme/rest/__init__.py - can we remove the skips in here. This code should raise and Exception which should be caught in the test/fixture code.\n\n", "before_files": [{"content": "import fauxfactory\nimport pytest\n\nfrom cfme.automate.service_dialogs import ServiceDialog\nfrom cfme.services.catalogs.catalog_item import CatalogItem\nfrom cfme.services.catalogs.service_catalogs import ServiceCatalogs\nfrom cfme.services import requests\nfrom utils.providers import setup_a_provider as _setup_a_provider\nfrom utils.virtual_machines import deploy_template\nfrom utils.wait import wait_for\nfrom utils import version\n\n\ndef service_catalogs(request, rest_api):\n name = fauxfactory.gen_alphanumeric()\n scls_data = [{\n \"name\": \"name_{}_{}\".format(name, index),\n \"description\": \"description_{}_{}\".format(name, index),\n \"service_templates\": []\n } for index in range(1, 5)]\n\n scls = rest_api.collections.service_catalogs.action.add(*scls_data)\n for scl in scls:\n wait_for(\n lambda: rest_api.collections.service_catalogs.find_by(name=scl.name),\n num_sec=180,\n delay=10,\n )\n\n @request.addfinalizer\n def _finished():\n ids = [s.id for s in scls]\n delete_scls = [s for s in rest_api.collections.service_catalogs if s.id in ids]\n if len(delete_scls) != 0:\n rest_api.collections.service_catalogs.action.delete(*delete_scls)\n\n return scls\n\n\ndef categories(request, rest_api, num=1):\n ctg_data = [{\n 'name': 'test_category_{}_{}'.format(fauxfactory.gen_alphanumeric().lower(), _index),\n 'description': 'test_category_{}_{}'.format(fauxfactory.gen_alphanumeric().lower(), _index)\n } for _index in range(0, num)]\n ctgs = rest_api.collections.categories.action.create(*ctg_data)\n for ctg in ctgs:\n wait_for(\n lambda: rest_api.collections.categories.find_by(description=ctg.description),\n num_sec=180,\n delay=10,\n )\n\n @request.addfinalizer\n def _finished():\n ids = [ctg.id for ctg in ctgs]\n delete_ctgs = [ctg for ctg in rest_api.collections.categories\n if ctg.id in ids]\n if len(delete_ctgs) != 0:\n rest_api.collections.categories.action.delete(*delete_ctgs)\n\n return ctgs\n\n\ndef tags(request, rest_api, categories):\n # Category id, href or name needs to be specified for creating a new tag resource\n tags = []\n for ctg in categories:\n data = {\n 'name': 'test_tag_{}'.format(fauxfactory.gen_alphanumeric().lower()),\n 'description': 'test_tag_{}'.format(fauxfactory.gen_alphanumeric().lower()),\n 'category': {'href': ctg.href}\n }\n tags.append(data)\n tags = rest_api.collections.tags.action.create(*tags)\n for tag in tags:\n wait_for(\n lambda: rest_api.collections.tags.find_by(name=tag.name),\n num_sec=180,\n delay=10,\n )\n\n @request.addfinalizer\n def _finished():\n ids = [tag.id for tag in tags]\n delete_tags = [tag for tag in rest_api.collections.tags if tag.id in ids]\n if len(delete_tags) != 0:\n rest_api.collections.tags.action.delete(*delete_tags)\n\n return tags\n\n\ndef dialog():\n dialog = \"dialog_{}\".format(fauxfactory.gen_alphanumeric())\n element_data = dict(\n ele_label=\"ele_{}\".format(fauxfactory.gen_alphanumeric()),\n ele_name=fauxfactory.gen_alphanumeric(),\n ele_desc=\"my ele desc\",\n choose_type=\"Text Box\",\n default_text_box=\"default value\"\n )\n service_dialog = ServiceDialog(\n label=dialog,\n description=\"my dialog\",\n submit=True,\n cancel=True,\n tab_label=\"tab_{}\".format(fauxfactory.gen_alphanumeric()),\n tab_desc=\"my tab desc\",\n box_label=\"box_{}\".format(fauxfactory.gen_alphanumeric()),\n box_desc=\"my box desc\")\n service_dialog.create(element_data)\n return service_dialog\n\n\ndef services(request, rest_api, a_provider, dialog, service_catalogs):\n \"\"\"\n The attempt to add the service entities via web\n \"\"\"\n template, host, datastore, iso_file, vlan, catalog_item_type = map(a_provider.data.get(\n \"provisioning\").get,\n ('template', 'host', 'datastore', 'iso_file', 'vlan', 'catalog_item_type'))\n\n provisioning_data = {\n 'vm_name': 'test_rest_{}'.format(fauxfactory.gen_alphanumeric()),\n 'host_name': {'name': [host]},\n 'datastore_name': {'name': [datastore]}\n }\n\n if a_provider.type == 'rhevm':\n provisioning_data['provision_type'] = 'Native Clone'\n provisioning_data['vlan'] = vlan\n catalog_item_type = version.pick({\n version.LATEST: \"RHEV\",\n '5.3': \"RHEV\",\n '5.2': \"Redhat\"\n })\n elif a_provider.type == 'virtualcenter':\n provisioning_data['provision_type'] = 'VMware'\n catalog = service_catalogs[0].name\n item_name = fauxfactory.gen_alphanumeric()\n catalog_item = CatalogItem(item_type=catalog_item_type, name=item_name,\n description=\"my catalog\", display_in=True,\n catalog=catalog,\n dialog=dialog.label,\n catalog_name=template,\n provider=a_provider.name,\n prov_data=provisioning_data)\n\n catalog_item.create()\n service_catalogs = ServiceCatalogs(\"service_name\")\n service_catalogs.order(catalog_item.catalog, catalog_item)\n row_description = catalog_item.name\n cells = {'Description': row_description}\n row, __ = wait_for(requests.wait_for_request, [cells, True],\n fail_func=requests.reload, num_sec=2000, delay=20)\n assert row.last_message.text == 'Request complete'\n try:\n services = [_ for _ in rest_api.collections.services]\n services[0]\n except IndexError:\n pytest.skip(\"There is no service to be taken\")\n\n @request.addfinalizer\n def _finished():\n services = [_ for _ in rest_api.collections.services]\n if len(services) != 0:\n rest_api.collections.services.action.delete(*services)\n\n return services\n\n\ndef rates(request, rest_api):\n chargeback = rest_api.collections.chargebacks.get(rate_type='Compute')\n data = [{\n 'description': 'test_rate_{}_{}'.format(_index, fauxfactory.gen_alphanumeric()),\n 'rate': 1,\n 'group': 'cpu',\n 'per_time': 'daily',\n 'per_unit': 'megahertz',\n 'chargeback_rate_id': chargeback.id\n } for _index in range(0, 3)]\n\n rates = rest_api.collections.rates.action.create(*data)\n for rate in data:\n wait_for(\n lambda: rest_api.collections.rates.find_by(description=rate.get('description')),\n num_sec=180,\n delay=10,\n )\n\n @request.addfinalizer\n def _finished():\n ids = [rate.id for rate in rates]\n delete_rates = [rate for rate in rest_api.collections.rates if rate.id in ids]\n if len(delete_rates) != 0:\n rest_api.collections.rates.action.delete(*delete_rates)\n\n return rates\n\n\ndef a_provider():\n return _setup_a_provider(\"infra\")\n\n\ndef vm(request, a_provider, rest_api):\n if \"refresh\" not in rest_api.collections.providers.action.all:\n pytest.skip(\"Refresh action is not implemented in this version\")\n provider_rest = rest_api.collections.providers.get(name=a_provider.name)\n vm_name = deploy_template(\n a_provider.key,\n \"test_rest_vm_{}\".format(fauxfactory.gen_alphanumeric(length=4)))\n request.addfinalizer(lambda: a_provider.mgmt.delete_vm(vm_name))\n provider_rest.action.refresh()\n wait_for(\n lambda: len(rest_api.collections.vms.find_by(name=vm_name)) > 0,\n num_sec=600, delay=5)\n return vm_name\n\n\ndef service_templates(request, rest_api, dialog):\n catalog_items = []\n for index in range(1, 5):\n catalog_items.append(\n CatalogItem(\n item_type=\"Generic\",\n name=\"item_{}_{}\".format(index, fauxfactory.gen_alphanumeric()),\n description=\"my catalog\", display_in=True,\n dialog=dialog.label)\n )\n\n for catalog_item in catalog_items:\n catalog_item.create()\n\n try:\n s_tpls = [_ for _ in rest_api.collections.service_templates]\n s_tpls[0]\n except IndexError:\n pytest.skip(\"There is no service template to be taken\")\n\n @request.addfinalizer\n def _finished():\n s_tpls = [_ for _ in rest_api.collections.service_templates]\n if len(s_tpls) != 0:\n rest_api.collections.service_templates.action.delete(*s_tpls)\n\n return s_tpls\n\n\ndef automation_requests_data(vm):\n return [{\n \"uri_parts\": {\n \"namespace\": \"System\",\n \"class\": \"Request\",\n \"instance\": \"InspectME\",\n \"message\": \"create\",\n },\n \"parameters\": {\n \"vm_name\": vm,\n },\n \"requester\": {\n \"auto_approve\": True\n }\n } for index in range(1, 5)]\n\n\ndef groups(request, rest_api, role, tenant, num=1):\n data = [{\n \"description\": \"group_description_{}\".format(fauxfactory.gen_alphanumeric()),\n \"role\": {\"href\": role.href},\n \"tenant\": {\"href\": tenant.href}\n } for index in range(0, num)]\n\n groups = _creating_skeleton(request, rest_api, \"groups\", data)\n if num == 1:\n return groups.pop()\n return groups\n\n\ndef roles(request, rest_api, num=1):\n data = [{\n \"name\": \"role_name_{}\".format(fauxfactory.gen_alphanumeric())\n } for index in range(0, num)]\n\n roles = _creating_skeleton(request, rest_api, \"roles\", data)\n if num == 1:\n return roles.pop()\n return roles\n\n\ndef tenants(request, rest_api, num=1):\n parent = rest_api.collections.tenants.get(name='My Company')\n data = [{\n 'description': 'test_tenants_{}_{}'.format(_index, fauxfactory.gen_alphanumeric()),\n 'name': 'test_tenants_{}_{}'.format(_index, fauxfactory.gen_alphanumeric()),\n 'divisible': 'true',\n 'use_config_for_attributes': 'false',\n 'parent': {'href': parent.href}\n } for _index in range(0, num)]\n\n tenants = _creating_skeleton(request, rest_api, \"tenants\", data)\n if num == 1:\n return tenants.pop()\n return tenants\n\n\ndef users(request, rest_api, num=1):\n data = [{\n \"userid\": \"user_{}_{}\".format(_index, fauxfactory.gen_alphanumeric(3)),\n \"name\": \"name_{}_{}\".format(_index, fauxfactory.gen_alphanumeric()),\n \"password\": \"pass_{}_{}\".format(_index, fauxfactory.gen_alphanumeric(3)),\n \"group\": {\"description\": \"EvmGroup-user\"}\n } for _index in range(0, num)]\n\n users = _creating_skeleton(request, rest_api, \"users\", data)\n if num == 1:\n return users.pop()\n return users\n\n\ndef _creating_skeleton(request, rest_api, col_name, col_data):\n collection = getattr(rest_api.collections, col_name)\n if \"create\" not in collection.action.all:\n pytest.skip(\"Create action for {} is not implemented in this version\".format(col_name))\n entities = collection.action.create(*col_data)\n for entity in col_data:\n if entity.get('name', None):\n wait_for(lambda: collection.find_by(name=entity.get('name')), num_sec=180, delay=10)\n elif entity.get('description', None):\n wait_for(lambda: collection.find_by(\n description=entity.get('description')), num_sec=180, delay=10)\n else:\n raise NotImplementedError\n\n @request.addfinalizer\n def _finished():\n collection.reload()\n ids = [e.id for e in entities]\n delete_entities = [e for e in collection if e.id in ids]\n if len(delete_entities) != 0:\n collection.action.delete(*delete_entities)\n\n return entities\n", "path": "cfme/rest/__init__.py"}], "after_files": [{"content": "import fauxfactory\n\nfrom cfme.automate.service_dialogs import ServiceDialog\nfrom cfme.exceptions import OptionNotAvailable\nfrom cfme.services.catalogs.catalog_item import CatalogItem\nfrom cfme.services.catalogs.service_catalogs import ServiceCatalogs\nfrom cfme.services import requests\nfrom utils.providers import setup_a_provider as _setup_a_provider\nfrom utils.virtual_machines import deploy_template\nfrom utils.wait import wait_for\nfrom utils import version\n\n\ndef service_catalogs(request, rest_api):\n name = fauxfactory.gen_alphanumeric()\n scls_data = [{\n \"name\": \"name_{}_{}\".format(name, index),\n \"description\": \"description_{}_{}\".format(name, index),\n \"service_templates\": []\n } for index in range(1, 5)]\n\n scls = rest_api.collections.service_catalogs.action.add(*scls_data)\n for scl in scls:\n wait_for(\n lambda: rest_api.collections.service_catalogs.find_by(name=scl.name),\n num_sec=180,\n delay=10,\n )\n\n @request.addfinalizer\n def _finished():\n ids = [s.id for s in scls]\n delete_scls = [s for s in rest_api.collections.service_catalogs if s.id in ids]\n if len(delete_scls) != 0:\n rest_api.collections.service_catalogs.action.delete(*delete_scls)\n\n return scls\n\n\ndef categories(request, rest_api, num=1):\n ctg_data = [{\n 'name': 'test_category_{}_{}'.format(fauxfactory.gen_alphanumeric().lower(), _index),\n 'description': 'test_category_{}_{}'.format(fauxfactory.gen_alphanumeric().lower(), _index)\n } for _index in range(0, num)]\n ctgs = rest_api.collections.categories.action.create(*ctg_data)\n for ctg in ctgs:\n wait_for(\n lambda: rest_api.collections.categories.find_by(description=ctg.description),\n num_sec=180,\n delay=10,\n )\n\n @request.addfinalizer\n def _finished():\n ids = [ctg.id for ctg in ctgs]\n delete_ctgs = [ctg for ctg in rest_api.collections.categories\n if ctg.id in ids]\n if len(delete_ctgs) != 0:\n rest_api.collections.categories.action.delete(*delete_ctgs)\n\n return ctgs\n\n\ndef tags(request, rest_api, categories):\n # Category id, href or name needs to be specified for creating a new tag resource\n tags = []\n for ctg in categories:\n data = {\n 'name': 'test_tag_{}'.format(fauxfactory.gen_alphanumeric().lower()),\n 'description': 'test_tag_{}'.format(fauxfactory.gen_alphanumeric().lower()),\n 'category': {'href': ctg.href}\n }\n tags.append(data)\n tags = rest_api.collections.tags.action.create(*tags)\n for tag in tags:\n wait_for(\n lambda: rest_api.collections.tags.find_by(name=tag.name),\n num_sec=180,\n delay=10,\n )\n\n @request.addfinalizer\n def _finished():\n ids = [tag.id for tag in tags]\n delete_tags = [tag for tag in rest_api.collections.tags if tag.id in ids]\n if len(delete_tags) != 0:\n rest_api.collections.tags.action.delete(*delete_tags)\n\n return tags\n\n\ndef dialog():\n dialog = \"dialog_{}\".format(fauxfactory.gen_alphanumeric())\n element_data = dict(\n ele_label=\"ele_{}\".format(fauxfactory.gen_alphanumeric()),\n ele_name=fauxfactory.gen_alphanumeric(),\n ele_desc=\"my ele desc\",\n choose_type=\"Text Box\",\n default_text_box=\"default value\"\n )\n service_dialog = ServiceDialog(\n label=dialog,\n description=\"my dialog\",\n submit=True,\n cancel=True,\n tab_label=\"tab_{}\".format(fauxfactory.gen_alphanumeric()),\n tab_desc=\"my tab desc\",\n box_label=\"box_{}\".format(fauxfactory.gen_alphanumeric()),\n box_desc=\"my box desc\")\n service_dialog.create(element_data)\n return service_dialog\n\n\ndef services(request, rest_api, a_provider, dialog, service_catalogs):\n \"\"\"\n The attempt to add the service entities via web\n \"\"\"\n template, host, datastore, iso_file, vlan, catalog_item_type = map(a_provider.data.get(\n \"provisioning\").get,\n ('template', 'host', 'datastore', 'iso_file', 'vlan', 'catalog_item_type'))\n\n provisioning_data = {\n 'vm_name': 'test_rest_{}'.format(fauxfactory.gen_alphanumeric()),\n 'host_name': {'name': [host]},\n 'datastore_name': {'name': [datastore]}\n }\n\n if a_provider.type == 'rhevm':\n provisioning_data['provision_type'] = 'Native Clone'\n provisioning_data['vlan'] = vlan\n catalog_item_type = version.pick({\n version.LATEST: \"RHEV\",\n '5.3': \"RHEV\",\n '5.2': \"Redhat\"\n })\n elif a_provider.type == 'virtualcenter':\n provisioning_data['provision_type'] = 'VMware'\n catalog = service_catalogs[0].name\n item_name = fauxfactory.gen_alphanumeric()\n catalog_item = CatalogItem(item_type=catalog_item_type, name=item_name,\n description=\"my catalog\", display_in=True,\n catalog=catalog,\n dialog=dialog.label,\n catalog_name=template,\n provider=a_provider.name,\n prov_data=provisioning_data)\n\n catalog_item.create()\n service_catalogs = ServiceCatalogs(\"service_name\")\n service_catalogs.order(catalog_item.catalog, catalog_item)\n row_description = catalog_item.name\n cells = {'Description': row_description}\n row, __ = wait_for(requests.wait_for_request, [cells, True],\n fail_func=requests.reload, num_sec=2000, delay=20)\n assert row.last_message.text == 'Request complete'\n try:\n services = [_ for _ in rest_api.collections.services]\n services[0]\n except IndexError:\n raise Exception(\"No options are selected\")\n\n @request.addfinalizer\n def _finished():\n services = [_ for _ in rest_api.collections.services]\n if len(services) != 0:\n rest_api.collections.services.action.delete(*services)\n\n return services\n\n\ndef rates(request, rest_api):\n chargeback = rest_api.collections.chargebacks.get(rate_type='Compute')\n data = [{\n 'description': 'test_rate_{}_{}'.format(_index, fauxfactory.gen_alphanumeric()),\n 'rate': 1,\n 'group': 'cpu',\n 'per_time': 'daily',\n 'per_unit': 'megahertz',\n 'chargeback_rate_id': chargeback.id\n } for _index in range(0, 3)]\n\n rates = rest_api.collections.rates.action.create(*data)\n for rate in data:\n wait_for(\n lambda: rest_api.collections.rates.find_by(description=rate.get('description')),\n num_sec=180,\n delay=10,\n )\n\n @request.addfinalizer\n def _finished():\n ids = [rate.id for rate in rates]\n delete_rates = [rate for rate in rest_api.collections.rates if rate.id in ids]\n if len(delete_rates) != 0:\n rest_api.collections.rates.action.delete(*delete_rates)\n\n return rates\n\n\ndef a_provider():\n return _setup_a_provider(\"infra\")\n\n\ndef vm(request, a_provider, rest_api):\n provider_rest = rest_api.collections.providers.get(name=a_provider.name)\n vm_name = deploy_template(\n a_provider.key,\n \"test_rest_vm_{}\".format(fauxfactory.gen_alphanumeric(length=4)))\n request.addfinalizer(lambda: a_provider.mgmt.delete_vm(vm_name))\n provider_rest.action.refresh()\n wait_for(\n lambda: len(rest_api.collections.vms.find_by(name=vm_name)) > 0,\n num_sec=600, delay=5)\n return vm_name\n\n\ndef service_templates(request, rest_api, dialog):\n catalog_items = []\n for index in range(1, 5):\n catalog_items.append(\n CatalogItem(\n item_type=\"Generic\",\n name=\"item_{}_{}\".format(index, fauxfactory.gen_alphanumeric()),\n description=\"my catalog\", display_in=True,\n dialog=dialog.label)\n )\n\n for catalog_item in catalog_items:\n catalog_item.create()\n\n try:\n s_tpls = [_ for _ in rest_api.collections.service_templates]\n s_tpls[0]\n except IndexError:\n raise Exception(\"There is no service template to be taken\")\n\n @request.addfinalizer\n def _finished():\n s_tpls = [_ for _ in rest_api.collections.service_templates]\n if len(s_tpls) != 0:\n rest_api.collections.service_templates.action.delete(*s_tpls)\n\n return s_tpls\n\n\ndef automation_requests_data(vm):\n return [{\n \"uri_parts\": {\n \"namespace\": \"System\",\n \"class\": \"Request\",\n \"instance\": \"InspectME\",\n \"message\": \"create\",\n },\n \"parameters\": {\n \"vm_name\": vm,\n },\n \"requester\": {\n \"auto_approve\": True\n }\n } for index in range(1, 5)]\n\n\ndef groups(request, rest_api, role, tenant, num=1):\n data = [{\n \"description\": \"group_description_{}\".format(fauxfactory.gen_alphanumeric()),\n \"role\": {\"href\": role.href},\n \"tenant\": {\"href\": tenant.href}\n } for index in range(0, num)]\n\n groups = _creating_skeleton(request, rest_api, \"groups\", data)\n if num == 1:\n return groups.pop()\n return groups\n\n\ndef roles(request, rest_api, num=1):\n data = [{\n \"name\": \"role_name_{}\".format(fauxfactory.gen_alphanumeric())\n } for index in range(0, num)]\n\n roles = _creating_skeleton(request, rest_api, \"roles\", data)\n if num == 1:\n return roles.pop()\n return roles\n\n\ndef tenants(request, rest_api, num=1):\n parent = rest_api.collections.tenants.get(name='My Company')\n data = [{\n 'description': 'test_tenants_{}_{}'.format(_index, fauxfactory.gen_alphanumeric()),\n 'name': 'test_tenants_{}_{}'.format(_index, fauxfactory.gen_alphanumeric()),\n 'divisible': 'true',\n 'use_config_for_attributes': 'false',\n 'parent': {'href': parent.href}\n } for _index in range(0, num)]\n\n tenants = _creating_skeleton(request, rest_api, \"tenants\", data)\n if num == 1:\n return tenants.pop()\n return tenants\n\n\ndef users(request, rest_api, num=1):\n data = [{\n \"userid\": \"user_{}_{}\".format(_index, fauxfactory.gen_alphanumeric(3)),\n \"name\": \"name_{}_{}\".format(_index, fauxfactory.gen_alphanumeric()),\n \"password\": \"pass_{}_{}\".format(_index, fauxfactory.gen_alphanumeric(3)),\n \"group\": {\"description\": \"EvmGroup-user\"}\n } for _index in range(0, num)]\n\n users = _creating_skeleton(request, rest_api, \"users\", data)\n if num == 1:\n return users.pop()\n return users\n\n\ndef _creating_skeleton(request, rest_api, col_name, col_data):\n collection = getattr(rest_api.collections, col_name)\n if \"create\" not in collection.action.all:\n raise OptionNotAvailable(\n \"Create action for {} is not implemented in this version\".format(col_name))\n entities = collection.action.create(*col_data)\n for entity in col_data:\n if entity.get('name', None):\n wait_for(lambda: collection.find_by(name=entity.get('name')), num_sec=180, delay=10)\n elif entity.get('description', None):\n wait_for(lambda: collection.find_by(\n description=entity.get('description')), num_sec=180, delay=10)\n else:\n raise NotImplementedError\n\n @request.addfinalizer\n def _finished():\n collection.reload()\n ids = [e.id for e in entities]\n delete_entities = [e for e in collection if e.id in ids]\n if len(delete_entities) != 0:\n collection.action.delete(*delete_entities)\n\n return entities\n", "path": "cfme/rest/__init__.py"}]} | 4,000 | 472 |
gh_patches_debug_24628 | rasdani/github-patches | git_diff | scverse__scanpy-1554 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deprecate scvi in external
I was wondering if we could deprecate the scvi external wrapper as we now have `scvi-tools`. I could also update the wrapper to have minimal functionality, but I think it would be better for people to use our API now that it's tightly integrated with scanpy anyway.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scanpy/external/pp/_scvi.py`
Content:
```
1 import numpy as np
2 import pandas as pd
3 import scipy as sp
4
5 from typing import Optional, Sequence, Union
6 from anndata import AnnData
7
8 MIN_VERSION = "0.6.5"
9
10
11 def scvi(
12 adata: AnnData,
13 n_hidden: int = 128,
14 n_latent: int = 10,
15 n_layers: int = 1,
16 dispersion: str = "gene",
17 n_epochs: int = 400,
18 lr: int = 1e-3,
19 train_size: int = 1.0,
20 batch_key: Optional[str] = None,
21 use_highly_variable_genes: bool = True,
22 subset_genes: Optional[Sequence[Union[int, str]]] = None,
23 linear_decoder: bool = False,
24 copy: bool = False,
25 use_cuda: bool = True,
26 return_posterior: bool = True,
27 trainer_kwargs: dict = {},
28 model_kwargs: dict = {},
29 ) -> Optional[AnnData]:
30 """\
31 SCVI [Lopez18]_.
32
33 Fits scVI model onto raw count data given an anndata object
34
35 scVI uses stochastic optimization and deep neural networks to aggregate information
36 across similar cells and genes and to approximate the distributions that underlie
37 observed expression values, while accounting for batch effects and limited sensitivity.
38
39 To use a linear-decoded Variational AutoEncoder model (implementation of [Svensson20]_.),
40 set linear_decoded = True. Compared to standard VAE, this model is less powerful, but can
41 be used to inspect which genes contribute to variation in the dataset. It may also be used
42 for all scVI tasks, like differential expression, batch correction, imputation, etc.
43 However, batch correction may be less powerful as it assumes a linear model.
44
45 .. note::
46 More information and bug reports `here <https://github.com/YosefLab/scVI>`__.
47
48 Parameters
49 ----------
50 adata
51 An anndata file with `X` attribute of unnormalized count data
52 n_hidden
53 Number of nodes per hidden layer
54 n_latent
55 Dimensionality of the latent space
56 n_layers
57 Number of hidden layers used for encoder and decoder NNs
58 dispersion
59 One of the following
60 * `'gene'` - dispersion parameter of NB is constant per gene across cells
61 * `'gene-batch'` - dispersion can differ between different batches
62 * `'gene-label'` - dispersion can differ between different labels
63 * `'gene-cell'` - dispersion can differ for every gene in every cell
64 n_epochs
65 Number of epochs to train
66 lr
67 Learning rate
68 train_size
69 The train size, either a float between 0 and 1 or an integer for the number of training samples to use
70 batch_key
71 Column name in anndata.obs for batches.
72 If None, no batch correction is performed
73 If not None, batch correction is performed per batch category
74 use_highly_variable_genes
75 If true, uses only the genes in anndata.var["highly_variable"]
76 subset_genes
77 Optional list of indices or gene names to subset anndata.
78 If not None, use_highly_variable_genes is ignored
79 linear_decoder
80 If true, uses LDVAE model, which is an implementation of [Svensson20]_.
81 copy
82 If true, a copy of anndata is returned
83 return_posterior
84 If true, posterior object is returned
85 use_cuda
86 If true, uses cuda
87 trainer_kwargs
88 Extra arguments for UnsupervisedTrainer
89 model_kwargs
90 Extra arguments for VAE or LDVAE model
91
92 Returns
93 -------
94 If `copy` is true, anndata is returned.
95 If `return_posterior` is true, the posterior object is returned
96 If both `copy` and `return_posterior` are true,
97 a tuple of anndata and the posterior are returned in that order.
98
99 `adata.obsm['X_scvi']` stores the latent representations
100 `adata.obsm['X_scvi_denoised']` stores the normalized mean of the negative binomial
101 `adata.obsm['X_scvi_sample_rate']` stores the mean of the negative binomial
102
103 If linear_decoder is true:
104 `adata.uns['ldvae_loadings']` stores the per-gene weights in the linear decoder as a
105 genes by n_latent matrix.
106
107 """
108
109 try:
110 from scvi.models import VAE, LDVAE
111 from scvi.inference import UnsupervisedTrainer
112 from scvi.dataset import AnnDatasetFromAnnData
113 except ImportError:
114 raise ImportError(
115 "Please install scvi package from https://github.com/YosefLab/scVI"
116 )
117
118 # check if observations are unnormalized using first 10
119 # code from: https://github.com/theislab/dca/blob/89eee4ed01dd969b3d46e0c815382806fbfc2526/dca/io.py#L63-L69
120 if len(adata) > 10:
121 X_subset = adata.X[:10]
122 else:
123 X_subset = adata.X
124 norm_error = (
125 'Make sure that the dataset (adata.X) contains unnormalized count data.'
126 )
127 if sp.sparse.issparse(X_subset):
128 assert (X_subset.astype(int) != X_subset).nnz == 0, norm_error
129 else:
130 assert np.all(X_subset.astype(int) == X_subset), norm_error
131
132 if subset_genes is not None:
133 adata_subset = adata[:, subset_genes]
134 elif use_highly_variable_genes and "highly_variable" in adata.var:
135 adata_subset = adata[:, adata.var["highly_variable"]]
136 else:
137 adata_subset = adata
138
139 if batch_key is not None:
140 codes, uniques = pd.factorize(adata_subset.obs[batch_key])
141 adata_subset.obs['_tmp_scvi_batch'] = codes
142 n_batches = len(uniques)
143 else:
144 n_batches = 0
145
146 dataset = AnnDatasetFromAnnData(adata_subset.copy(), batch_label='_tmp_scvi_batch')
147
148 if linear_decoder:
149 vae = LDVAE(
150 n_input=dataset.nb_genes,
151 n_batch=n_batches,
152 n_labels=dataset.n_labels,
153 n_hidden=n_hidden,
154 n_latent=n_latent,
155 n_layers_encoder=n_layers,
156 dispersion=dispersion,
157 **model_kwargs,
158 )
159
160 else:
161 vae = VAE(
162 dataset.nb_genes,
163 n_batch=n_batches,
164 n_labels=dataset.n_labels,
165 n_hidden=n_hidden,
166 n_latent=n_latent,
167 n_layers=n_layers,
168 dispersion=dispersion,
169 **model_kwargs,
170 )
171
172 trainer = UnsupervisedTrainer(
173 model=vae,
174 gene_dataset=dataset,
175 use_cuda=use_cuda,
176 train_size=train_size,
177 **trainer_kwargs,
178 )
179
180 trainer.train(n_epochs=n_epochs, lr=lr)
181
182 full = trainer.create_posterior(
183 trainer.model, dataset, indices=np.arange(len(dataset))
184 )
185 latent, batch_indices, labels = full.sequential().get_latent()
186
187 if copy:
188 adata = adata.copy()
189
190 adata.obsm['X_scvi'] = latent
191 adata.obsm['X_scvi_denoised'] = full.sequential().get_sample_scale()
192 adata.obsm['X_scvi_sample_rate'] = full.sequential().imputation()
193
194 if linear_decoder:
195 loadings = vae.get_loadings()
196 df = pd.DataFrame(loadings, index=adata_subset.var_names)
197 adata.uns['ldvae_loadings'] = df
198
199 if copy and return_posterior:
200 return adata, full
201 elif copy:
202 return adata
203 elif return_posterior:
204 return full
205
```
Path: `setup.py`
Content:
```
1 import sys
2
3 if sys.version_info < (3, 6):
4 sys.exit('scanpy requires Python >= 3.6')
5 from pathlib import Path
6
7 from setuptools import setup, find_packages
8
9 try:
10 import pytoml
11 except ImportError:
12 sys.exit('Please use `pip install .` or install pytoml first.')
13
14 proj = pytoml.loads(Path('pyproject.toml').read_text())
15 metadata = proj['tool']['scanpy']
16
17 setup(
18 name='scanpy',
19 use_scm_version=True,
20 setup_requires=['setuptools_scm'],
21 description='Single-Cell Analysis in Python.',
22 long_description=Path('README.rst').read_text('utf-8'),
23 url='http://github.com/theislab/scanpy',
24 author=metadata['author'],
25 author_email=metadata['author-email'],
26 license='BSD',
27 python_requires='>=3.6',
28 install_requires=[
29 l.strip() for l in Path('requirements.txt').read_text('utf-8').splitlines()
30 ],
31 extras_require=dict(
32 louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'],
33 leiden=['python-igraph', 'leidenalg'],
34 bbknn=['bbknn'],
35 scvi=['scvi>=0.6.5'],
36 rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'],
37 magic=['magic-impute>=2.0'],
38 skmisc=['scikit-misc>=0.1.3'],
39 harmony=['harmonypy'],
40 scrublet=['scrublet'],
41 dev=['setuptools_scm', 'pytoml', 'black>=20.8b1'],
42 doc=[
43 'sphinx>=3.2',
44 'sphinx_rtd_theme>=0.3.1',
45 'sphinx_autodoc_typehints',
46 'scanpydoc>=0.5',
47 'typing_extensions; python_version < "3.8"', # for `Literal`
48 ],
49 test=[
50 'pytest>=4.4',
51 'dask[array]!=2.17.0',
52 'fsspec',
53 'zappy',
54 'zarr',
55 'profimp',
56 ],
57 ),
58 packages=find_packages(),
59 include_package_data=True,
60 entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),
61 zip_safe=False,
62 classifiers=[
63 'Development Status :: 5 - Production/Stable',
64 'Environment :: Console',
65 'Framework :: Jupyter',
66 'Intended Audience :: Developers',
67 'Intended Audience :: Science/Research',
68 'Natural Language :: English',
69 'Operating System :: MacOS :: MacOS X',
70 'Operating System :: Microsoft :: Windows',
71 'Operating System :: POSIX :: Linux',
72 'Programming Language :: Python :: 3',
73 'Programming Language :: Python :: 3.5',
74 'Programming Language :: Python :: 3.6',
75 'Programming Language :: Python :: 3.7',
76 'Topic :: Scientific/Engineering :: Bio-Informatics',
77 'Topic :: Scientific/Engineering :: Visualization',
78 ],
79 )
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scanpy/external/pp/_scvi.py b/scanpy/external/pp/_scvi.py
--- a/scanpy/external/pp/_scvi.py
+++ b/scanpy/external/pp/_scvi.py
@@ -1,3 +1,4 @@
+import warnings
import numpy as np
import pandas as pd
import scipy as sp
@@ -5,7 +6,7 @@
from typing import Optional, Sequence, Union
from anndata import AnnData
-MIN_VERSION = "0.6.5"
+MIN_VERSION = "0.6.7"
def scvi(
@@ -105,6 +106,11 @@
genes by n_latent matrix.
"""
+ warnings.warn(
+ "scvi via scanpy external API is no longer supported. "
+ + "Please use the new scvi-tools package from `scvi-tools.org`",
+ FutureWarning,
+ )
try:
from scvi.models import VAE, LDVAE
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -32,7 +32,7 @@
louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'],
leiden=['python-igraph', 'leidenalg'],
bbknn=['bbknn'],
- scvi=['scvi>=0.6.5'],
+ scvi=['scvi==0.6.7'],
rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'],
magic=['magic-impute>=2.0'],
skmisc=['scikit-misc>=0.1.3'],
| {"golden_diff": "diff --git a/scanpy/external/pp/_scvi.py b/scanpy/external/pp/_scvi.py\n--- a/scanpy/external/pp/_scvi.py\n+++ b/scanpy/external/pp/_scvi.py\n@@ -1,3 +1,4 @@\n+import warnings\n import numpy as np\n import pandas as pd\n import scipy as sp\n@@ -5,7 +6,7 @@\n from typing import Optional, Sequence, Union\n from anndata import AnnData\n \n-MIN_VERSION = \"0.6.5\"\n+MIN_VERSION = \"0.6.7\"\n \n \n def scvi(\n@@ -105,6 +106,11 @@\n genes by n_latent matrix.\n \n \"\"\"\n+ warnings.warn(\n+ \"scvi via scanpy external API is no longer supported. \"\n+ + \"Please use the new scvi-tools package from `scvi-tools.org`\",\n+ FutureWarning,\n+ )\n \n try:\n from scvi.models import VAE, LDVAE\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,7 +32,7 @@\n louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'],\n leiden=['python-igraph', 'leidenalg'],\n bbknn=['bbknn'],\n- scvi=['scvi>=0.6.5'],\n+ scvi=['scvi==0.6.7'],\n rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'],\n magic=['magic-impute>=2.0'],\n skmisc=['scikit-misc>=0.1.3'],\n", "issue": "deprecate scvi in external\nI was wondering if we could deprecate the scvi external wrapper as we now have `scvi-tools`. I could also update the wrapper to have minimal functionality, but I think it would be better for people to use our API now that it's tightly integrated with scanpy anyway.\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\nimport scipy as sp\n\nfrom typing import Optional, Sequence, Union\nfrom anndata import AnnData\n\nMIN_VERSION = \"0.6.5\"\n\n\ndef scvi(\n adata: AnnData,\n n_hidden: int = 128,\n n_latent: int = 10,\n n_layers: int = 1,\n dispersion: str = \"gene\",\n n_epochs: int = 400,\n lr: int = 1e-3,\n train_size: int = 1.0,\n batch_key: Optional[str] = None,\n use_highly_variable_genes: bool = True,\n subset_genes: Optional[Sequence[Union[int, str]]] = None,\n linear_decoder: bool = False,\n copy: bool = False,\n use_cuda: bool = True,\n return_posterior: bool = True,\n trainer_kwargs: dict = {},\n model_kwargs: dict = {},\n) -> Optional[AnnData]:\n \"\"\"\\\n SCVI [Lopez18]_.\n\n Fits scVI model onto raw count data given an anndata object\n\n scVI uses stochastic optimization and deep neural networks to aggregate information \n across similar cells and genes and to approximate the distributions that underlie\n observed expression values, while accounting for batch effects and limited sensitivity.\n\n To use a linear-decoded Variational AutoEncoder model (implementation of [Svensson20]_.),\n set linear_decoded = True. Compared to standard VAE, this model is less powerful, but can \n be used to inspect which genes contribute to variation in the dataset. It may also be used\n for all scVI tasks, like differential expression, batch correction, imputation, etc.\n However, batch correction may be less powerful as it assumes a linear model.\n\n .. note::\n More information and bug reports `here <https://github.com/YosefLab/scVI>`__.\n\n Parameters\n ----------\n adata\n An anndata file with `X` attribute of unnormalized count data\n n_hidden\n Number of nodes per hidden layer\n n_latent\n Dimensionality of the latent space\n n_layers\n Number of hidden layers used for encoder and decoder NNs\n dispersion\n One of the following\n * `'gene'` - dispersion parameter of NB is constant per gene across cells\n * `'gene-batch'` - dispersion can differ between different batches\n * `'gene-label'` - dispersion can differ between different labels\n * `'gene-cell'` - dispersion can differ for every gene in every cell\n n_epochs\n Number of epochs to train\n lr\n Learning rate\n train_size\n The train size, either a float between 0 and 1 or an integer for the number of training samples to use\n batch_key\n Column name in anndata.obs for batches. \n If None, no batch correction is performed\n If not None, batch correction is performed per batch category\n use_highly_variable_genes\n If true, uses only the genes in anndata.var[\"highly_variable\"]\n subset_genes\n Optional list of indices or gene names to subset anndata. \n If not None, use_highly_variable_genes is ignored\n linear_decoder\n If true, uses LDVAE model, which is an implementation of [Svensson20]_.\n copy\n If true, a copy of anndata is returned\n return_posterior\n If true, posterior object is returned\n use_cuda\n If true, uses cuda\n trainer_kwargs\n Extra arguments for UnsupervisedTrainer\n model_kwargs\n Extra arguments for VAE or LDVAE model\n \n Returns\n -------\n If `copy` is true, anndata is returned.\n If `return_posterior` is true, the posterior object is returned\n If both `copy` and `return_posterior` are true, \n a tuple of anndata and the posterior are returned in that order. \n\n `adata.obsm['X_scvi']` stores the latent representations\n `adata.obsm['X_scvi_denoised']` stores the normalized mean of the negative binomial\n `adata.obsm['X_scvi_sample_rate']` stores the mean of the negative binomial\n \n If linear_decoder is true:\n `adata.uns['ldvae_loadings']` stores the per-gene weights in the linear decoder as a\n genes by n_latent matrix.\n\n \"\"\"\n\n try:\n from scvi.models import VAE, LDVAE\n from scvi.inference import UnsupervisedTrainer\n from scvi.dataset import AnnDatasetFromAnnData\n except ImportError:\n raise ImportError(\n \"Please install scvi package from https://github.com/YosefLab/scVI\"\n )\n\n # check if observations are unnormalized using first 10\n # code from: https://github.com/theislab/dca/blob/89eee4ed01dd969b3d46e0c815382806fbfc2526/dca/io.py#L63-L69\n if len(adata) > 10:\n X_subset = adata.X[:10]\n else:\n X_subset = adata.X\n norm_error = (\n 'Make sure that the dataset (adata.X) contains unnormalized count data.'\n )\n if sp.sparse.issparse(X_subset):\n assert (X_subset.astype(int) != X_subset).nnz == 0, norm_error\n else:\n assert np.all(X_subset.astype(int) == X_subset), norm_error\n\n if subset_genes is not None:\n adata_subset = adata[:, subset_genes]\n elif use_highly_variable_genes and \"highly_variable\" in adata.var:\n adata_subset = adata[:, adata.var[\"highly_variable\"]]\n else:\n adata_subset = adata\n\n if batch_key is not None:\n codes, uniques = pd.factorize(adata_subset.obs[batch_key])\n adata_subset.obs['_tmp_scvi_batch'] = codes\n n_batches = len(uniques)\n else:\n n_batches = 0\n\n dataset = AnnDatasetFromAnnData(adata_subset.copy(), batch_label='_tmp_scvi_batch')\n\n if linear_decoder:\n vae = LDVAE(\n n_input=dataset.nb_genes,\n n_batch=n_batches,\n n_labels=dataset.n_labels,\n n_hidden=n_hidden,\n n_latent=n_latent,\n n_layers_encoder=n_layers,\n dispersion=dispersion,\n **model_kwargs,\n )\n\n else:\n vae = VAE(\n dataset.nb_genes,\n n_batch=n_batches,\n n_labels=dataset.n_labels,\n n_hidden=n_hidden,\n n_latent=n_latent,\n n_layers=n_layers,\n dispersion=dispersion,\n **model_kwargs,\n )\n\n trainer = UnsupervisedTrainer(\n model=vae,\n gene_dataset=dataset,\n use_cuda=use_cuda,\n train_size=train_size,\n **trainer_kwargs,\n )\n\n trainer.train(n_epochs=n_epochs, lr=lr)\n\n full = trainer.create_posterior(\n trainer.model, dataset, indices=np.arange(len(dataset))\n )\n latent, batch_indices, labels = full.sequential().get_latent()\n\n if copy:\n adata = adata.copy()\n\n adata.obsm['X_scvi'] = latent\n adata.obsm['X_scvi_denoised'] = full.sequential().get_sample_scale()\n adata.obsm['X_scvi_sample_rate'] = full.sequential().imputation()\n\n if linear_decoder:\n loadings = vae.get_loadings()\n df = pd.DataFrame(loadings, index=adata_subset.var_names)\n adata.uns['ldvae_loadings'] = df\n\n if copy and return_posterior:\n return adata, full\n elif copy:\n return adata\n elif return_posterior:\n return full\n", "path": "scanpy/external/pp/_scvi.py"}, {"content": "import sys\n\nif sys.version_info < (3, 6):\n sys.exit('scanpy requires Python >= 3.6')\nfrom pathlib import Path\n\nfrom setuptools import setup, find_packages\n\ntry:\n import pytoml\nexcept ImportError:\n sys.exit('Please use `pip install .` or install pytoml first.')\n\nproj = pytoml.loads(Path('pyproject.toml').read_text())\nmetadata = proj['tool']['scanpy']\n\nsetup(\n name='scanpy',\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description='Single-Cell Analysis in Python.',\n long_description=Path('README.rst').read_text('utf-8'),\n url='http://github.com/theislab/scanpy',\n author=metadata['author'],\n author_email=metadata['author-email'],\n license='BSD',\n python_requires='>=3.6',\n install_requires=[\n l.strip() for l in Path('requirements.txt').read_text('utf-8').splitlines()\n ],\n extras_require=dict(\n louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'],\n leiden=['python-igraph', 'leidenalg'],\n bbknn=['bbknn'],\n scvi=['scvi>=0.6.5'],\n rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'],\n magic=['magic-impute>=2.0'],\n skmisc=['scikit-misc>=0.1.3'],\n harmony=['harmonypy'],\n scrublet=['scrublet'],\n dev=['setuptools_scm', 'pytoml', 'black>=20.8b1'],\n doc=[\n 'sphinx>=3.2',\n 'sphinx_rtd_theme>=0.3.1',\n 'sphinx_autodoc_typehints',\n 'scanpydoc>=0.5',\n 'typing_extensions; python_version < \"3.8\"', # for `Literal`\n ],\n test=[\n 'pytest>=4.4',\n 'dask[array]!=2.17.0',\n 'fsspec',\n 'zappy',\n 'zarr',\n 'profimp',\n ],\n ),\n packages=find_packages(),\n include_package_data=True,\n entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Jupyter',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Visualization',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import warnings\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\n\nfrom typing import Optional, Sequence, Union\nfrom anndata import AnnData\n\nMIN_VERSION = \"0.6.7\"\n\n\ndef scvi(\n adata: AnnData,\n n_hidden: int = 128,\n n_latent: int = 10,\n n_layers: int = 1,\n dispersion: str = \"gene\",\n n_epochs: int = 400,\n lr: int = 1e-3,\n train_size: int = 1.0,\n batch_key: Optional[str] = None,\n use_highly_variable_genes: bool = True,\n subset_genes: Optional[Sequence[Union[int, str]]] = None,\n linear_decoder: bool = False,\n copy: bool = False,\n use_cuda: bool = True,\n return_posterior: bool = True,\n trainer_kwargs: dict = {},\n model_kwargs: dict = {},\n) -> Optional[AnnData]:\n \"\"\"\\\n SCVI [Lopez18]_.\n\n Fits scVI model onto raw count data given an anndata object\n\n scVI uses stochastic optimization and deep neural networks to aggregate information \n across similar cells and genes and to approximate the distributions that underlie\n observed expression values, while accounting for batch effects and limited sensitivity.\n\n To use a linear-decoded Variational AutoEncoder model (implementation of [Svensson20]_.),\n set linear_decoded = True. Compared to standard VAE, this model is less powerful, but can \n be used to inspect which genes contribute to variation in the dataset. It may also be used\n for all scVI tasks, like differential expression, batch correction, imputation, etc.\n However, batch correction may be less powerful as it assumes a linear model.\n\n .. note::\n More information and bug reports `here <https://github.com/YosefLab/scVI>`__.\n\n Parameters\n ----------\n adata\n An anndata file with `X` attribute of unnormalized count data\n n_hidden\n Number of nodes per hidden layer\n n_latent\n Dimensionality of the latent space\n n_layers\n Number of hidden layers used for encoder and decoder NNs\n dispersion\n One of the following\n * `'gene'` - dispersion parameter of NB is constant per gene across cells\n * `'gene-batch'` - dispersion can differ between different batches\n * `'gene-label'` - dispersion can differ between different labels\n * `'gene-cell'` - dispersion can differ for every gene in every cell\n n_epochs\n Number of epochs to train\n lr\n Learning rate\n train_size\n The train size, either a float between 0 and 1 or an integer for the number of training samples to use\n batch_key\n Column name in anndata.obs for batches. \n If None, no batch correction is performed\n If not None, batch correction is performed per batch category\n use_highly_variable_genes\n If true, uses only the genes in anndata.var[\"highly_variable\"]\n subset_genes\n Optional list of indices or gene names to subset anndata. \n If not None, use_highly_variable_genes is ignored\n linear_decoder\n If true, uses LDVAE model, which is an implementation of [Svensson20]_.\n copy\n If true, a copy of anndata is returned\n return_posterior\n If true, posterior object is returned\n use_cuda\n If true, uses cuda\n trainer_kwargs\n Extra arguments for UnsupervisedTrainer\n model_kwargs\n Extra arguments for VAE or LDVAE model\n \n Returns\n -------\n If `copy` is true, anndata is returned.\n If `return_posterior` is true, the posterior object is returned\n If both `copy` and `return_posterior` are true, \n a tuple of anndata and the posterior are returned in that order. \n\n `adata.obsm['X_scvi']` stores the latent representations\n `adata.obsm['X_scvi_denoised']` stores the normalized mean of the negative binomial\n `adata.obsm['X_scvi_sample_rate']` stores the mean of the negative binomial\n \n If linear_decoder is true:\n `adata.uns['ldvae_loadings']` stores the per-gene weights in the linear decoder as a\n genes by n_latent matrix.\n\n \"\"\"\n warnings.warn(\n \"scvi via scanpy external API is no longer supported. \"\n + \"Please use the new scvi-tools package from `scvi-tools.org`\",\n FutureWarning,\n )\n\n try:\n from scvi.models import VAE, LDVAE\n from scvi.inference import UnsupervisedTrainer\n from scvi.dataset import AnnDatasetFromAnnData\n except ImportError:\n raise ImportError(\n \"Please install scvi package from https://github.com/YosefLab/scVI\"\n )\n\n # check if observations are unnormalized using first 10\n # code from: https://github.com/theislab/dca/blob/89eee4ed01dd969b3d46e0c815382806fbfc2526/dca/io.py#L63-L69\n if len(adata) > 10:\n X_subset = adata.X[:10]\n else:\n X_subset = adata.X\n norm_error = (\n 'Make sure that the dataset (adata.X) contains unnormalized count data.'\n )\n if sp.sparse.issparse(X_subset):\n assert (X_subset.astype(int) != X_subset).nnz == 0, norm_error\n else:\n assert np.all(X_subset.astype(int) == X_subset), norm_error\n\n if subset_genes is not None:\n adata_subset = adata[:, subset_genes]\n elif use_highly_variable_genes and \"highly_variable\" in adata.var:\n adata_subset = adata[:, adata.var[\"highly_variable\"]]\n else:\n adata_subset = adata\n\n if batch_key is not None:\n codes, uniques = pd.factorize(adata_subset.obs[batch_key])\n adata_subset.obs['_tmp_scvi_batch'] = codes\n n_batches = len(uniques)\n else:\n n_batches = 0\n\n dataset = AnnDatasetFromAnnData(adata_subset.copy(), batch_label='_tmp_scvi_batch')\n\n if linear_decoder:\n vae = LDVAE(\n n_input=dataset.nb_genes,\n n_batch=n_batches,\n n_labels=dataset.n_labels,\n n_hidden=n_hidden,\n n_latent=n_latent,\n n_layers_encoder=n_layers,\n dispersion=dispersion,\n **model_kwargs,\n )\n\n else:\n vae = VAE(\n dataset.nb_genes,\n n_batch=n_batches,\n n_labels=dataset.n_labels,\n n_hidden=n_hidden,\n n_latent=n_latent,\n n_layers=n_layers,\n dispersion=dispersion,\n **model_kwargs,\n )\n\n trainer = UnsupervisedTrainer(\n model=vae,\n gene_dataset=dataset,\n use_cuda=use_cuda,\n train_size=train_size,\n **trainer_kwargs,\n )\n\n trainer.train(n_epochs=n_epochs, lr=lr)\n\n full = trainer.create_posterior(\n trainer.model, dataset, indices=np.arange(len(dataset))\n )\n latent, batch_indices, labels = full.sequential().get_latent()\n\n if copy:\n adata = adata.copy()\n\n adata.obsm['X_scvi'] = latent\n adata.obsm['X_scvi_denoised'] = full.sequential().get_sample_scale()\n adata.obsm['X_scvi_sample_rate'] = full.sequential().imputation()\n\n if linear_decoder:\n loadings = vae.get_loadings()\n df = pd.DataFrame(loadings, index=adata_subset.var_names)\n adata.uns['ldvae_loadings'] = df\n\n if copy and return_posterior:\n return adata, full\n elif copy:\n return adata\n elif return_posterior:\n return full\n", "path": "scanpy/external/pp/_scvi.py"}, {"content": "import sys\n\nif sys.version_info < (3, 6):\n sys.exit('scanpy requires Python >= 3.6')\nfrom pathlib import Path\n\nfrom setuptools import setup, find_packages\n\ntry:\n import pytoml\nexcept ImportError:\n sys.exit('Please use `pip install .` or install pytoml first.')\n\nproj = pytoml.loads(Path('pyproject.toml').read_text())\nmetadata = proj['tool']['scanpy']\n\nsetup(\n name='scanpy',\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description='Single-Cell Analysis in Python.',\n long_description=Path('README.rst').read_text('utf-8'),\n url='http://github.com/theislab/scanpy',\n author=metadata['author'],\n author_email=metadata['author-email'],\n license='BSD',\n python_requires='>=3.6',\n install_requires=[\n l.strip() for l in Path('requirements.txt').read_text('utf-8').splitlines()\n ],\n extras_require=dict(\n louvain=['python-igraph', 'louvain>=0.6,!=0.6.2'],\n leiden=['python-igraph', 'leidenalg'],\n bbknn=['bbknn'],\n scvi=['scvi==0.6.7'],\n rapids=['cudf>=0.9', 'cuml>=0.9', 'cugraph>=0.9'],\n magic=['magic-impute>=2.0'],\n skmisc=['scikit-misc>=0.1.3'],\n harmony=['harmonypy'],\n scrublet=['scrublet'],\n dev=['setuptools_scm', 'pytoml', 'black>=20.8b1'],\n doc=[\n 'sphinx>=3.2',\n 'sphinx_rtd_theme>=0.3.1',\n 'sphinx_autodoc_typehints',\n 'scanpydoc>=0.5',\n 'typing_extensions; python_version < \"3.8\"', # for `Literal`\n ],\n test=[\n 'pytest>=4.4',\n 'dask[array]!=2.17.0',\n 'fsspec',\n 'zappy',\n 'zarr',\n 'profimp',\n ],\n ),\n packages=find_packages(),\n include_package_data=True,\n entry_points=dict(console_scripts=['scanpy=scanpy.cli:console_main']),\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Jupyter',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Visualization',\n ],\n)\n", "path": "setup.py"}]} | 3,438 | 388 |
gh_patches_debug_6234 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-3564 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
moderators for b-plans
if an initiator starts a b-plan via meinBerlin (as e.g. SenWohn does, they don't have imperia) or an external project he/she is automatically added as moderator and gets mails as the one below. This is confusing because:
a) you don't see moderators in dashboard
b) you can't follow a b-plan/external project
c) the link does not go to the external page (in this case it goes here: https://mein.berlin.de/projects/bebauungsplan-8-66-buckower-felder/)
Should we take out this rule for these two templates or stop sending mails?
<img width="698" alt="bildschirmfoto 2019-02-04 um 13 32 08" src="https://user-images.githubusercontent.com/35491681/52208589-762c0780-2881-11e9-9781-21826347abe4.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/notifications/signals.py`
Content:
```
1 from django.contrib.auth import get_user_model
2 from django.db.models import signals
3 from django.dispatch import receiver
4
5 from adhocracy4.actions.models import Action
6 from adhocracy4.actions.verbs import Verbs
7 from adhocracy4.dashboard import signals as dashboard_signals
8 from adhocracy4.follows.models import Follow
9 from adhocracy4.projects.models import Project
10
11 from . import emails
12
13 User = get_user_model()
14
15
16 @receiver(signals.post_save, sender=Action)
17 def send_notifications(instance, created, **kwargs):
18 action = instance
19 verb = Verbs(action.verb)
20
21 if action.type in ('item', 'comment') \
22 and verb in (Verbs.CREATE, Verbs.ADD):
23 emails.NotifyCreatorEmail.send(action)
24
25 if action.project:
26 emails.NotifyModeratorsEmail.send(action)
27
28 elif action.type == 'phase':
29 if verb == Verbs.START:
30 emails.NotifyFollowersOnPhaseStartedEmail.send(action)
31 elif verb == Verbs.SCHEDULE:
32 emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)
33
34 elif action.type == 'offlineevent' and verb == Verbs.START:
35 emails.NotifyFollowersOnUpcommingEventEmail.send(action)
36
37
38 @receiver(dashboard_signals.project_created)
39 def send_project_created_notifications(**kwargs):
40 project = kwargs.get('project')
41 creator = kwargs.get('user')
42 emails.NotifyInitiatorsOnProjectCreatedEmail.send(
43 project, creator_pk=creator.pk)
44
45
46 @receiver(signals.m2m_changed, sender=Project.moderators.through)
47 def autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):
48 if action == 'post_add':
49 autofollow_project(instance, pk_set, reverse)
50
51
52 def autofollow_project(instance, pk_set, reverse):
53 if not reverse:
54 project = instance
55 users_pks = pk_set
56
57 for user_pk in users_pks:
58 Follow.objects.update_or_create(
59 project=project,
60 creator_id=user_pk,
61 defaults={
62 'enabled': True
63 }
64 )
65 else:
66 user = instance
67 project_pks = pk_set
68
69 for project_pk in project_pks:
70 Follow.objects.update_or_create(
71 project_id=project_pk,
72 creator=user,
73 defaults={
74 'enabled': True
75 }
76 )
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/notifications/signals.py b/meinberlin/apps/notifications/signals.py
--- a/meinberlin/apps/notifications/signals.py
+++ b/meinberlin/apps/notifications/signals.py
@@ -25,7 +25,8 @@
if action.project:
emails.NotifyModeratorsEmail.send(action)
- elif action.type == 'phase':
+ elif (action.type == 'phase' and
+ action.project.project_type == 'a4projects.Project'):
if verb == Verbs.START:
emails.NotifyFollowersOnPhaseStartedEmail.send(action)
elif verb == Verbs.SCHEDULE:
| {"golden_diff": "diff --git a/meinberlin/apps/notifications/signals.py b/meinberlin/apps/notifications/signals.py\n--- a/meinberlin/apps/notifications/signals.py\n+++ b/meinberlin/apps/notifications/signals.py\n@@ -25,7 +25,8 @@\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n \n- elif action.type == 'phase':\n+ elif (action.type == 'phase' and\n+ action.project.project_type == 'a4projects.Project'):\n if verb == Verbs.START:\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n elif verb == Verbs.SCHEDULE:\n", "issue": "moderators for b-plans\nif an initiator starts a b-plan via meinBerlin (as e.g. SenWohn does, they don't have imperia) or an external project he/she is automatically added as moderator and gets mails as the one below. This is confusing because:\r\na) you don't see moderators in dashboard\r\nb) you can't follow a b-plan/external project\r\nc) the link does not go to the external page (in this case it goes here: https://mein.berlin.de/projects/bebauungsplan-8-66-buckower-felder/)\r\n\r\nShould we take out this rule for these two templates or stop sending mails?\r\n\r\n\r\n<img width=\"698\" alt=\"bildschirmfoto 2019-02-04 um 13 32 08\" src=\"https://user-images.githubusercontent.com/35491681/52208589-762c0780-2881-11e9-9781-21826347abe4.png\">\r\n\n", "before_files": [{"content": "from django.contrib.auth import get_user_model\nfrom django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom adhocracy4.dashboard import signals as dashboard_signals\nfrom adhocracy4.follows.models import Follow\nfrom adhocracy4.projects.models import Project\n\nfrom . import emails\n\nUser = get_user_model()\n\n\n@receiver(signals.post_save, sender=Action)\ndef send_notifications(instance, created, **kwargs):\n action = instance\n verb = Verbs(action.verb)\n\n if action.type in ('item', 'comment') \\\n and verb in (Verbs.CREATE, Verbs.ADD):\n emails.NotifyCreatorEmail.send(action)\n\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n\n elif action.type == 'phase':\n if verb == Verbs.START:\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n elif verb == Verbs.SCHEDULE:\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n\n elif action.type == 'offlineevent' and verb == Verbs.START:\n emails.NotifyFollowersOnUpcommingEventEmail.send(action)\n\n\n@receiver(dashboard_signals.project_created)\ndef send_project_created_notifications(**kwargs):\n project = kwargs.get('project')\n creator = kwargs.get('user')\n emails.NotifyInitiatorsOnProjectCreatedEmail.send(\n project, creator_pk=creator.pk)\n\n\n@receiver(signals.m2m_changed, sender=Project.moderators.through)\ndef autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\n if action == 'post_add':\n autofollow_project(instance, pk_set, reverse)\n\n\ndef autofollow_project(instance, pk_set, reverse):\n if not reverse:\n project = instance\n users_pks = pk_set\n\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project,\n creator_id=user_pk,\n defaults={\n 'enabled': True\n }\n )\n else:\n user = instance\n project_pks = pk_set\n\n for project_pk in project_pks:\n Follow.objects.update_or_create(\n project_id=project_pk,\n creator=user,\n defaults={\n 'enabled': True\n }\n )\n", "path": "meinberlin/apps/notifications/signals.py"}], "after_files": [{"content": "from django.contrib.auth import get_user_model\nfrom django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom adhocracy4.dashboard import signals as dashboard_signals\nfrom adhocracy4.follows.models import Follow\nfrom adhocracy4.projects.models import Project\n\nfrom . import emails\n\nUser = get_user_model()\n\n\n@receiver(signals.post_save, sender=Action)\ndef send_notifications(instance, created, **kwargs):\n action = instance\n verb = Verbs(action.verb)\n\n if action.type in ('item', 'comment') \\\n and verb in (Verbs.CREATE, Verbs.ADD):\n emails.NotifyCreatorEmail.send(action)\n\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n\n elif (action.type == 'phase' and\n action.project.project_type == 'a4projects.Project'):\n if verb == Verbs.START:\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n elif verb == Verbs.SCHEDULE:\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n\n elif action.type == 'offlineevent' and verb == Verbs.START:\n emails.NotifyFollowersOnUpcommingEventEmail.send(action)\n\n\n@receiver(dashboard_signals.project_created)\ndef send_project_created_notifications(**kwargs):\n project = kwargs.get('project')\n creator = kwargs.get('user')\n emails.NotifyInitiatorsOnProjectCreatedEmail.send(\n project, creator_pk=creator.pk)\n\n\n@receiver(signals.m2m_changed, sender=Project.moderators.through)\ndef autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\n if action == 'post_add':\n autofollow_project(instance, pk_set, reverse)\n\n\ndef autofollow_project(instance, pk_set, reverse):\n if not reverse:\n project = instance\n users_pks = pk_set\n\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project,\n creator_id=user_pk,\n defaults={\n 'enabled': True\n }\n )\n else:\n user = instance\n project_pks = pk_set\n\n for project_pk in project_pks:\n Follow.objects.update_or_create(\n project_id=project_pk,\n creator=user,\n defaults={\n 'enabled': True\n }\n )\n", "path": "meinberlin/apps/notifications/signals.py"}]} | 1,152 | 142 |
gh_patches_debug_16491 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-734 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E0000 found unknown escape character ‘/’
version:1:1
cfn-lint --template vpc.cf.json
E0000 found unknown escape character ‘/’
vpc.cf.json:12:135
this is the string that it says container the escape character error. this however works fine when deployed to the CFN service.
"^([0-9]{1,3}\\.){3}[0-9]{1,3}(\\\/([0-9]|[1-2][0-9]|3[0-2]))?$"

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/decode/__init__.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import sys
18 import logging
19 import six
20 try:
21 from json.decoder import JSONDecodeError
22 except ImportError:
23 JSONDecodeError = ValueError
24 from yaml.parser import ParserError, ScannerError
25 from yaml import YAMLError
26 import cfnlint.decode.cfn_yaml
27 import cfnlint.decode.cfn_json
28
29
30 LOGGER = logging.getLogger(__name__)
31
32
33 def decode(filename, ignore_bad_template):
34 """
35 Decode filename into an object
36 """
37 template = None
38 matches = []
39 try:
40 template = cfnlint.decode.cfn_yaml.load(filename)
41 except IOError as e:
42 if e.errno == 2:
43 LOGGER.error('Template file not found: %s', filename)
44 matches.append(create_match_file_error(filename, 'Template file not found: %s' % filename))
45 elif e.errno == 21:
46 LOGGER.error('Template references a directory, not a file: %s', filename)
47 matches.append(create_match_file_error(filename, 'Template references a directory, not a file: %s' % filename))
48 elif e.errno == 13:
49 LOGGER.error('Permission denied when accessing template file: %s', filename)
50 matches.append(create_match_file_error(filename, 'Permission denied when accessing template file: %s' % filename))
51
52 if matches:
53 return(None, matches)
54 except UnicodeDecodeError as err:
55 LOGGER.error('Cannot read file contents: %s', filename)
56 matches.append(create_match_file_error(filename, 'Cannot read file contents: %s' % filename))
57 except cfnlint.decode.cfn_yaml.CfnParseError as err:
58 err.match.Filename = filename
59 matches = [err.match]
60
61 except ParserError as err:
62 matches = [create_match_yaml_parser_error(err, filename)]
63 except ScannerError as err:
64 if err.problem == 'found character \'\\t\' that cannot start any token':
65 try:
66 template = cfnlint.decode.cfn_json.load(filename)
67 except cfnlint.decode.cfn_json.JSONDecodeError as json_err:
68 json_err.match.filename = filename
69 matches = [json_err.match]
70 except JSONDecodeError as json_err:
71 matches = [create_match_json_parser_error(json_err, filename)]
72 except Exception as json_err: # pylint: disable=W0703
73 if ignore_bad_template:
74 LOGGER.info('Template %s is malformed: %s', filename, err.problem)
75 LOGGER.info('Tried to parse %s as JSON but got error: %s', filename, str(json_err))
76 else:
77 LOGGER.error('Template %s is malformed: %s', filename, err.problem)
78 LOGGER.error('Tried to parse %s as JSON but got error: %s', filename, str(json_err))
79 return(None, [create_match_file_error(filename, 'Tried to parse %s as JSON but got error: %s' % (filename, str(json_err)))])
80 else:
81 matches = [create_match_yaml_parser_error(err, filename)]
82 except YAMLError as err:
83 matches = [create_match_file_error(filename, err)]
84
85 if not isinstance(template, dict) and not matches:
86 # Template isn't a dict which means nearly nothing will work
87 matches = [cfnlint.Match(1, 1, 1, 1, filename, cfnlint.ParseError(), message='Template needs to be an object.')]
88 return (template, matches)
89
90
91 def create_match_yaml_parser_error(parser_error, filename):
92 """Create a Match for a parser error"""
93 lineno = parser_error.problem_mark.line + 1
94 colno = parser_error.problem_mark.column + 1
95 msg = parser_error.problem
96 return cfnlint.Match(
97 lineno, colno, lineno, colno + 1, filename,
98 cfnlint.ParseError(), message=msg)
99
100
101 def create_match_file_error(filename, msg):
102 """Create a Match for a parser error"""
103 return cfnlint.Match(
104 linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2,
105 filename=filename, rule=cfnlint.ParseError(), message=msg)
106
107
108 def create_match_json_parser_error(parser_error, filename):
109 """Create a Match for a parser error"""
110 if sys.version_info[0] == 3:
111 lineno = parser_error.lineno
112 colno = parser_error.colno
113 msg = parser_error.msg
114 elif sys.version_info[0] == 2:
115 lineno = 1
116 colno = 1
117 msg = parser_error.message
118 return cfnlint.Match(
119 lineno, colno, lineno, colno + 1, filename, cfnlint.ParseError(), message=msg)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/decode/__init__.py b/src/cfnlint/decode/__init__.py
--- a/src/cfnlint/decode/__init__.py
+++ b/src/cfnlint/decode/__init__.py
@@ -57,11 +57,12 @@
except cfnlint.decode.cfn_yaml.CfnParseError as err:
err.match.Filename = filename
matches = [err.match]
-
except ParserError as err:
matches = [create_match_yaml_parser_error(err, filename)]
except ScannerError as err:
- if err.problem == 'found character \'\\t\' that cannot start any token':
+ if err.problem in [
+ 'found character \'\\t\' that cannot start any token',
+ 'found unknown escape character']:
try:
template = cfnlint.decode.cfn_json.load(filename)
except cfnlint.decode.cfn_json.JSONDecodeError as json_err:
| {"golden_diff": "diff --git a/src/cfnlint/decode/__init__.py b/src/cfnlint/decode/__init__.py\n--- a/src/cfnlint/decode/__init__.py\n+++ b/src/cfnlint/decode/__init__.py\n@@ -57,11 +57,12 @@\n except cfnlint.decode.cfn_yaml.CfnParseError as err:\n err.match.Filename = filename\n matches = [err.match]\n-\n except ParserError as err:\n matches = [create_match_yaml_parser_error(err, filename)]\n except ScannerError as err:\n- if err.problem == 'found character \\'\\\\t\\' that cannot start any token':\n+ if err.problem in [\n+ 'found character \\'\\\\t\\' that cannot start any token',\n+ 'found unknown escape character']:\n try:\n template = cfnlint.decode.cfn_json.load(filename)\n except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\n", "issue": "E0000 found unknown escape character \u2018/\u2019\nversion:1:1\r\n\r\ncfn-lint --template vpc.cf.json\r\nE0000 found unknown escape character \u2018/\u2019\r\nvpc.cf.json:12:135\r\n\r\nthis is the string that it says container the escape character error. this however works fine when deployed to the CFN service. \r\n\r\n\"^([0-9]{1,3}\\\\.){3}[0-9]{1,3}(\\\\\\/([0-9]|[1-2][0-9]|3[0-2]))?$\"\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport sys\nimport logging\nimport six\ntry:\n from json.decoder import JSONDecodeError\nexcept ImportError:\n JSONDecodeError = ValueError\nfrom yaml.parser import ParserError, ScannerError\nfrom yaml import YAMLError\nimport cfnlint.decode.cfn_yaml\nimport cfnlint.decode.cfn_json\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef decode(filename, ignore_bad_template):\n \"\"\"\n Decode filename into an object\n \"\"\"\n template = None\n matches = []\n try:\n template = cfnlint.decode.cfn_yaml.load(filename)\n except IOError as e:\n if e.errno == 2:\n LOGGER.error('Template file not found: %s', filename)\n matches.append(create_match_file_error(filename, 'Template file not found: %s' % filename))\n elif e.errno == 21:\n LOGGER.error('Template references a directory, not a file: %s', filename)\n matches.append(create_match_file_error(filename, 'Template references a directory, not a file: %s' % filename))\n elif e.errno == 13:\n LOGGER.error('Permission denied when accessing template file: %s', filename)\n matches.append(create_match_file_error(filename, 'Permission denied when accessing template file: %s' % filename))\n\n if matches:\n return(None, matches)\n except UnicodeDecodeError as err:\n LOGGER.error('Cannot read file contents: %s', filename)\n matches.append(create_match_file_error(filename, 'Cannot read file contents: %s' % filename))\n except cfnlint.decode.cfn_yaml.CfnParseError as err:\n err.match.Filename = filename\n matches = [err.match]\n\n except ParserError as err:\n matches = [create_match_yaml_parser_error(err, filename)]\n except ScannerError as err:\n if err.problem == 'found character \\'\\\\t\\' that cannot start any token':\n try:\n template = cfnlint.decode.cfn_json.load(filename)\n except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\n json_err.match.filename = filename\n matches = [json_err.match]\n except JSONDecodeError as json_err:\n matches = [create_match_json_parser_error(json_err, filename)]\n except Exception as json_err: # pylint: disable=W0703\n if ignore_bad_template:\n LOGGER.info('Template %s is malformed: %s', filename, err.problem)\n LOGGER.info('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n else:\n LOGGER.error('Template %s is malformed: %s', filename, err.problem)\n LOGGER.error('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n return(None, [create_match_file_error(filename, 'Tried to parse %s as JSON but got error: %s' % (filename, str(json_err)))])\n else:\n matches = [create_match_yaml_parser_error(err, filename)]\n except YAMLError as err:\n matches = [create_match_file_error(filename, err)]\n\n if not isinstance(template, dict) and not matches:\n # Template isn't a dict which means nearly nothing will work\n matches = [cfnlint.Match(1, 1, 1, 1, filename, cfnlint.ParseError(), message='Template needs to be an object.')]\n return (template, matches)\n\n\ndef create_match_yaml_parser_error(parser_error, filename):\n \"\"\"Create a Match for a parser error\"\"\"\n lineno = parser_error.problem_mark.line + 1\n colno = parser_error.problem_mark.column + 1\n msg = parser_error.problem\n return cfnlint.Match(\n lineno, colno, lineno, colno + 1, filename,\n cfnlint.ParseError(), message=msg)\n\n\ndef create_match_file_error(filename, msg):\n \"\"\"Create a Match for a parser error\"\"\"\n return cfnlint.Match(\n linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2,\n filename=filename, rule=cfnlint.ParseError(), message=msg)\n\n\ndef create_match_json_parser_error(parser_error, filename):\n \"\"\"Create a Match for a parser error\"\"\"\n if sys.version_info[0] == 3:\n lineno = parser_error.lineno\n colno = parser_error.colno\n msg = parser_error.msg\n elif sys.version_info[0] == 2:\n lineno = 1\n colno = 1\n msg = parser_error.message\n return cfnlint.Match(\n lineno, colno, lineno, colno + 1, filename, cfnlint.ParseError(), message=msg)\n", "path": "src/cfnlint/decode/__init__.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport sys\nimport logging\nimport six\ntry:\n from json.decoder import JSONDecodeError\nexcept ImportError:\n JSONDecodeError = ValueError\nfrom yaml.parser import ParserError, ScannerError\nfrom yaml import YAMLError\nimport cfnlint.decode.cfn_yaml\nimport cfnlint.decode.cfn_json\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef decode(filename, ignore_bad_template):\n \"\"\"\n Decode filename into an object\n \"\"\"\n template = None\n matches = []\n try:\n template = cfnlint.decode.cfn_yaml.load(filename)\n except IOError as e:\n if e.errno == 2:\n LOGGER.error('Template file not found: %s', filename)\n matches.append(create_match_file_error(filename, 'Template file not found: %s' % filename))\n elif e.errno == 21:\n LOGGER.error('Template references a directory, not a file: %s', filename)\n matches.append(create_match_file_error(filename, 'Template references a directory, not a file: %s' % filename))\n elif e.errno == 13:\n LOGGER.error('Permission denied when accessing template file: %s', filename)\n matches.append(create_match_file_error(filename, 'Permission denied when accessing template file: %s' % filename))\n\n if matches:\n return(None, matches)\n except UnicodeDecodeError as err:\n LOGGER.error('Cannot read file contents: %s', filename)\n matches.append(create_match_file_error(filename, 'Cannot read file contents: %s' % filename))\n except cfnlint.decode.cfn_yaml.CfnParseError as err:\n err.match.Filename = filename\n matches = [err.match]\n except ParserError as err:\n matches = [create_match_yaml_parser_error(err, filename)]\n except ScannerError as err:\n if err.problem in [\n 'found character \\'\\\\t\\' that cannot start any token',\n 'found unknown escape character']:\n try:\n template = cfnlint.decode.cfn_json.load(filename)\n except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\n json_err.match.filename = filename\n matches = [json_err.match]\n except JSONDecodeError as json_err:\n matches = [create_match_json_parser_error(json_err, filename)]\n except Exception as json_err: # pylint: disable=W0703\n if ignore_bad_template:\n LOGGER.info('Template %s is malformed: %s', filename, err.problem)\n LOGGER.info('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n else:\n LOGGER.error('Template %s is malformed: %s', filename, err.problem)\n LOGGER.error('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n return(None, [create_match_file_error(filename, 'Tried to parse %s as JSON but got error: %s' % (filename, str(json_err)))])\n else:\n matches = [create_match_yaml_parser_error(err, filename)]\n except YAMLError as err:\n matches = [create_match_file_error(filename, err)]\n\n if not isinstance(template, dict) and not matches:\n # Template isn't a dict which means nearly nothing will work\n matches = [cfnlint.Match(1, 1, 1, 1, filename, cfnlint.ParseError(), message='Template needs to be an object.')]\n return (template, matches)\n\n\ndef create_match_yaml_parser_error(parser_error, filename):\n \"\"\"Create a Match for a parser error\"\"\"\n lineno = parser_error.problem_mark.line + 1\n colno = parser_error.problem_mark.column + 1\n msg = parser_error.problem\n return cfnlint.Match(\n lineno, colno, lineno, colno + 1, filename,\n cfnlint.ParseError(), message=msg)\n\n\ndef create_match_file_error(filename, msg):\n \"\"\"Create a Match for a parser error\"\"\"\n return cfnlint.Match(\n linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2,\n filename=filename, rule=cfnlint.ParseError(), message=msg)\n\n\ndef create_match_json_parser_error(parser_error, filename):\n \"\"\"Create a Match for a parser error\"\"\"\n if sys.version_info[0] == 3:\n lineno = parser_error.lineno\n colno = parser_error.colno\n msg = parser_error.msg\n elif sys.version_info[0] == 2:\n lineno = 1\n colno = 1\n msg = parser_error.message\n return cfnlint.Match(\n lineno, colno, lineno, colno + 1, filename, cfnlint.ParseError(), message=msg)\n", "path": "src/cfnlint/decode/__init__.py"}]} | 1,922 | 206 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.