problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_10296 | rasdani/github-patches | git_diff | getredash__redash-3634 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Queries search is not working on release/7.0.x
### Issue Summary
I try building docker image with the redash dockerfile and start by docker-compose. But queries search is not working
### Steps to Reproduce

Check out the queries table and find that search_vector is null.

### Technical details:
* Redash Version: release/7.0.x
* Browser/OS: Chrome Mac
* How did you install Redash: redash setup
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/cli/database.py`
Content:
```
1 import time
2
3 from flask.cli import AppGroup
4 from flask_migrate import stamp
5 from sqlalchemy.exc import DatabaseError
6
7 manager = AppGroup(help="Manage the database (create/drop tables).")
8
9
10 def _wait_for_db_connection(db):
11 retried = False
12 while not retried:
13 try:
14 db.engine.execute('SELECT 1;')
15 return
16 except DatabaseError:
17 time.sleep(30)
18
19 retried = True
20
21
22 @manager.command()
23 def create_tables():
24 """Create the database tables."""
25 from redash.models import db
26
27 _wait_for_db_connection(db)
28 db.create_all()
29
30 # Need to mark current DB as up to date
31 stamp()
32
33
34 @manager.command()
35 def drop_tables():
36 """Drop the database tables."""
37 from redash.models import db
38
39 _wait_for_db_connection(db)
40 db.drop_all()
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/cli/database.py b/redash/cli/database.py
--- a/redash/cli/database.py
+++ b/redash/cli/database.py
@@ -1,5 +1,6 @@
import time
+import sqlalchemy
from flask.cli import AppGroup
from flask_migrate import stamp
from sqlalchemy.exc import DatabaseError
@@ -25,6 +26,8 @@
from redash.models import db
_wait_for_db_connection(db)
+ # To create triggers for searchable models, we need to call configure_mappers().
+ sqlalchemy.orm.configure_mappers()
db.create_all()
# Need to mark current DB as up to date
| {"golden_diff": "diff --git a/redash/cli/database.py b/redash/cli/database.py\n--- a/redash/cli/database.py\n+++ b/redash/cli/database.py\n@@ -1,5 +1,6 @@\n import time\n \n+import sqlalchemy\n from flask.cli import AppGroup\n from flask_migrate import stamp\n from sqlalchemy.exc import DatabaseError\n@@ -25,6 +26,8 @@\n from redash.models import db\n \n _wait_for_db_connection(db)\n+ # To create triggers for searchable models, we need to call configure_mappers().\n+ sqlalchemy.orm.configure_mappers()\n db.create_all()\n \n # Need to mark current DB as up to date\n", "issue": "Queries search is not working on release/7.0.x\n### Issue Summary\r\n\r\nI try building docker image with the redash dockerfile and start by docker-compose. But queries search is not working\r\n\r\n### Steps to Reproduce\r\n\r\n\r\n\r\nCheck out the queries table and find that search_vector is null.\r\n\r\n\r\n### Technical details:\r\n\r\n* Redash Version: release/7.0.x\r\n* Browser/OS: Chrome Mac\r\n* How did you install Redash: redash setup\r\n\n", "before_files": [{"content": "import time\n\nfrom flask.cli import AppGroup\nfrom flask_migrate import stamp\nfrom sqlalchemy.exc import DatabaseError\n\nmanager = AppGroup(help=\"Manage the database (create/drop tables).\")\n\n\ndef _wait_for_db_connection(db):\n retried = False\n while not retried:\n try:\n db.engine.execute('SELECT 1;')\n return\n except DatabaseError:\n time.sleep(30)\n\n retried = True\n\n\[email protected]()\ndef create_tables():\n \"\"\"Create the database tables.\"\"\"\n from redash.models import db\n\n _wait_for_db_connection(db)\n db.create_all()\n\n # Need to mark current DB as up to date\n stamp()\n\n\[email protected]()\ndef drop_tables():\n \"\"\"Drop the database tables.\"\"\"\n from redash.models import db\n\n _wait_for_db_connection(db)\n db.drop_all()\n", "path": "redash/cli/database.py"}], "after_files": [{"content": "import time\n\nimport sqlalchemy\nfrom flask.cli import AppGroup\nfrom flask_migrate import stamp\nfrom sqlalchemy.exc import DatabaseError\n\nmanager = AppGroup(help=\"Manage the database (create/drop tables).\")\n\n\ndef _wait_for_db_connection(db):\n retried = False\n while not retried:\n try:\n db.engine.execute('SELECT 1;')\n return\n except DatabaseError:\n time.sleep(30)\n\n retried = True\n\n\[email protected]()\ndef create_tables():\n \"\"\"Create the database tables.\"\"\"\n from redash.models import db\n\n _wait_for_db_connection(db)\n # To create triggers for searchable models, we need to call configure_mappers().\n sqlalchemy.orm.configure_mappers()\n db.create_all()\n\n # Need to mark current DB as up to date\n stamp()\n\n\[email protected]()\ndef drop_tables():\n \"\"\"Drop the database tables.\"\"\"\n from redash.models import db\n\n _wait_for_db_connection(db)\n db.drop_all()\n", "path": "redash/cli/database.py"}]} | 749 | 142 |
gh_patches_debug_27768 | rasdani/github-patches | git_diff | svthalia__concrexit-2021 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Registration message in calendar wrong
### Describe the bug
In the calendar, when you have to register for an event, it says in the block; You can register on .......
However, this seems to be the date of when the event will take place (or the registration deadline, can't tell for sure).
See screenshots for an example.
### How to reproduce
Steps to reproduce the behaviour:
1. Go to the calendar and look for an event with a registration.
2. Look at the message in the calendar view
3. Compare with the registration date in the event itself.
### Expected behaviour
The message in the calendar view matches the date of when the registrations will open.
### Screenshots


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/events/api/calendarjs/serializers.py`
Content:
```
1 from datetime import timedelta
2
3 from django.contrib.humanize.templatetags.humanize import naturaltime
4 from django.template.defaultfilters import date
5 from django.utils import timezone
6 from django.utils.translation import gettext_lazy as _
7 from rest_framework.reverse import reverse
8
9 from events import services
10 from events.models import Event
11 from thaliawebsite.api.calendarjs.serializers import CalenderJSSerializer
12
13
14 class EventsCalenderJSSerializer(CalenderJSSerializer):
15 class Meta(CalenderJSSerializer.Meta):
16 model = Event
17
18 def _url(self, instance):
19 return reverse("events:event", kwargs={"pk": instance.id})
20
21 def _class_names(self, instance):
22 if self.context["member"] and services.is_user_registered(
23 self.context["member"], instance
24 ):
25 if services.user_registration_pending(self.context["member"], instance):
26 return ["regular-event-pending-registration"]
27 else:
28 return ["regular-event-has-registration"]
29 elif (not instance.registration_required) or instance.registration_allowed:
30 return ["regular-event-registration-open"]
31 else:
32 # I think this handles the case that registration is needed, but not yet possible
33 return ["regular-event-registration-closed"]
34
35 def _registration_info(self, instance: Event):
36 # If registered in some way
37 if self.context["member"] and services.is_user_registered(
38 self.context["member"], instance
39 ):
40 queue_pos = services.user_registration_pending(
41 self.context["member"], instance
42 )
43 # In waiting list
44 if type(queue_pos) is int:
45 return _("In waiting list at position {queue_pos}").format(
46 queue_pos=queue_pos
47 )
48 # Actually registered
49 else:
50 return _("You are registered for this event")
51 # Optional registration possible
52 elif instance.optional_registration_allowed:
53 return _("You can optionally register for this event")
54 # No places left
55 elif instance.reached_participants_limit():
56 return _("You can put yourself on the waiting list for this event")
57 # Registration still possible
58 elif instance.registration_allowed:
59 return _("You can register for this event")
60 # Not registration time yet
61 elif instance.registration_end:
62 now = timezone.now()
63 if instance.registration_end < now:
64 return _("Registrations have been closed")
65 elif instance.registration_end <= now + timedelta(days=2):
66 return _("You can register {at_time}").format(
67 at_time=naturaltime(instance.registration_end)
68 )
69 else:
70 return _("You can register on {date}").format(
71 date=date(instance.registration_end)
72 )
73
74
75 class UnpublishedEventsCalenderJSSerializer(CalenderJSSerializer):
76 """See CalenderJSSerializer, customised classes."""
77
78 class Meta(CalenderJSSerializer.Meta):
79 model = Event
80
81 def _class_names(self, instance):
82 return ["unpublished-event"]
83
84 def _url(self, instance):
85 return reverse("admin:events_event_details", kwargs={"pk": instance.id})
86
87 def _registration_info(self, instance):
88 return "Unpublished event"
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/events/api/calendarjs/serializers.py b/website/events/api/calendarjs/serializers.py
--- a/website/events/api/calendarjs/serializers.py
+++ b/website/events/api/calendarjs/serializers.py
@@ -50,7 +50,7 @@
return _("You are registered for this event")
# Optional registration possible
elif instance.optional_registration_allowed:
- return _("You can optionally register for this event")
+ return _("Registering for this event is optional")
# No places left
elif instance.reached_participants_limit():
return _("You can put yourself on the waiting list for this event")
@@ -62,13 +62,13 @@
now = timezone.now()
if instance.registration_end < now:
return _("Registrations have been closed")
- elif instance.registration_end <= now + timedelta(days=2):
- return _("You can register {at_time}").format(
- at_time=naturaltime(instance.registration_end)
+ elif instance.registration_start <= now + timedelta(days=2):
+ return _("Registrations open {at_time}").format(
+ at_time=naturaltime(instance.registration_start)
)
else:
- return _("You can register on {date}").format(
- date=date(instance.registration_end)
+ return _("Registrations open {date}").format(
+ date=date(instance.registration_start)
)
| {"golden_diff": "diff --git a/website/events/api/calendarjs/serializers.py b/website/events/api/calendarjs/serializers.py\n--- a/website/events/api/calendarjs/serializers.py\n+++ b/website/events/api/calendarjs/serializers.py\n@@ -50,7 +50,7 @@\n return _(\"You are registered for this event\")\n # Optional registration possible\n elif instance.optional_registration_allowed:\n- return _(\"You can optionally register for this event\")\n+ return _(\"Registering for this event is optional\")\n # No places left\n elif instance.reached_participants_limit():\n return _(\"You can put yourself on the waiting list for this event\")\n@@ -62,13 +62,13 @@\n now = timezone.now()\n if instance.registration_end < now:\n return _(\"Registrations have been closed\")\n- elif instance.registration_end <= now + timedelta(days=2):\n- return _(\"You can register {at_time}\").format(\n- at_time=naturaltime(instance.registration_end)\n+ elif instance.registration_start <= now + timedelta(days=2):\n+ return _(\"Registrations open {at_time}\").format(\n+ at_time=naturaltime(instance.registration_start)\n )\n else:\n- return _(\"You can register on {date}\").format(\n- date=date(instance.registration_end)\n+ return _(\"Registrations open {date}\").format(\n+ date=date(instance.registration_start)\n )\n", "issue": "Registration message in calendar wrong\n### Describe the bug\r\nIn the calendar, when you have to register for an event, it says in the block; You can register on .......\r\nHowever, this seems to be the date of when the event will take place (or the registration deadline, can't tell for sure).\r\nSee screenshots for an example.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Go to the calendar and look for an event with a registration.\r\n2. Look at the message in the calendar view\r\n3. Compare with the registration date in the event itself.\r\n\r\n### Expected behaviour\r\nThe message in the calendar view matches the date of when the registrations will open.\r\n\r\n### Screenshots\r\n\r\n\r\n\n", "before_files": [{"content": "from datetime import timedelta\n\nfrom django.contrib.humanize.templatetags.humanize import naturaltime\nfrom django.template.defaultfilters import date\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.reverse import reverse\n\nfrom events import services\nfrom events.models import Event\nfrom thaliawebsite.api.calendarjs.serializers import CalenderJSSerializer\n\n\nclass EventsCalenderJSSerializer(CalenderJSSerializer):\n class Meta(CalenderJSSerializer.Meta):\n model = Event\n\n def _url(self, instance):\n return reverse(\"events:event\", kwargs={\"pk\": instance.id})\n\n def _class_names(self, instance):\n if self.context[\"member\"] and services.is_user_registered(\n self.context[\"member\"], instance\n ):\n if services.user_registration_pending(self.context[\"member\"], instance):\n return [\"regular-event-pending-registration\"]\n else:\n return [\"regular-event-has-registration\"]\n elif (not instance.registration_required) or instance.registration_allowed:\n return [\"regular-event-registration-open\"]\n else:\n # I think this handles the case that registration is needed, but not yet possible\n return [\"regular-event-registration-closed\"]\n\n def _registration_info(self, instance: Event):\n # If registered in some way\n if self.context[\"member\"] and services.is_user_registered(\n self.context[\"member\"], instance\n ):\n queue_pos = services.user_registration_pending(\n self.context[\"member\"], instance\n )\n # In waiting list\n if type(queue_pos) is int:\n return _(\"In waiting list at position {queue_pos}\").format(\n queue_pos=queue_pos\n )\n # Actually registered\n else:\n return _(\"You are registered for this event\")\n # Optional registration possible\n elif instance.optional_registration_allowed:\n return _(\"You can optionally register for this event\")\n # No places left\n elif instance.reached_participants_limit():\n return _(\"You can put yourself on the waiting list for this event\")\n # Registration still possible\n elif instance.registration_allowed:\n return _(\"You can register for this event\")\n # Not registration time yet\n elif instance.registration_end:\n now = timezone.now()\n if instance.registration_end < now:\n return _(\"Registrations have been closed\")\n elif instance.registration_end <= now + timedelta(days=2):\n return _(\"You can register {at_time}\").format(\n at_time=naturaltime(instance.registration_end)\n )\n else:\n return _(\"You can register on {date}\").format(\n date=date(instance.registration_end)\n )\n\n\nclass UnpublishedEventsCalenderJSSerializer(CalenderJSSerializer):\n \"\"\"See CalenderJSSerializer, customised classes.\"\"\"\n\n class Meta(CalenderJSSerializer.Meta):\n model = Event\n\n def _class_names(self, instance):\n return [\"unpublished-event\"]\n\n def _url(self, instance):\n return reverse(\"admin:events_event_details\", kwargs={\"pk\": instance.id})\n\n def _registration_info(self, instance):\n return \"Unpublished event\"\n", "path": "website/events/api/calendarjs/serializers.py"}], "after_files": [{"content": "from datetime import timedelta\n\nfrom django.contrib.humanize.templatetags.humanize import naturaltime\nfrom django.template.defaultfilters import date\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.reverse import reverse\n\nfrom events import services\nfrom events.models import Event\nfrom thaliawebsite.api.calendarjs.serializers import CalenderJSSerializer\n\n\nclass EventsCalenderJSSerializer(CalenderJSSerializer):\n class Meta(CalenderJSSerializer.Meta):\n model = Event\n\n def _url(self, instance):\n return reverse(\"events:event\", kwargs={\"pk\": instance.id})\n\n def _class_names(self, instance):\n if self.context[\"member\"] and services.is_user_registered(\n self.context[\"member\"], instance\n ):\n if services.user_registration_pending(self.context[\"member\"], instance):\n return [\"regular-event-pending-registration\"]\n else:\n return [\"regular-event-has-registration\"]\n elif (not instance.registration_required) or instance.registration_allowed:\n return [\"regular-event-registration-open\"]\n else:\n # I think this handles the case that registration is needed, but not yet possible\n return [\"regular-event-registration-closed\"]\n\n def _registration_info(self, instance: Event):\n # If registered in some way\n if self.context[\"member\"] and services.is_user_registered(\n self.context[\"member\"], instance\n ):\n queue_pos = services.user_registration_pending(\n self.context[\"member\"], instance\n )\n # In waiting list\n if type(queue_pos) is int:\n return _(\"In waiting list at position {queue_pos}\").format(\n queue_pos=queue_pos\n )\n # Actually registered\n else:\n return _(\"You are registered for this event\")\n # Optional registration possible\n elif instance.optional_registration_allowed:\n return _(\"Registering for this event is optional\")\n # No places left\n elif instance.reached_participants_limit():\n return _(\"You can put yourself on the waiting list for this event\")\n # Registration still possible\n elif instance.registration_allowed:\n return _(\"You can register for this event\")\n # Not registration time yet\n elif instance.registration_end:\n now = timezone.now()\n if instance.registration_end < now:\n return _(\"Registrations have been closed\")\n elif instance.registration_start <= now + timedelta(days=2):\n return _(\"Registrations open {at_time}\").format(\n at_time=naturaltime(instance.registration_start)\n )\n else:\n return _(\"Registrations open {date}\").format(\n date=date(instance.registration_start)\n )\n\n\nclass UnpublishedEventsCalenderJSSerializer(CalenderJSSerializer):\n \"\"\"See CalenderJSSerializer, customised classes.\"\"\"\n\n class Meta(CalenderJSSerializer.Meta):\n model = Event\n\n def _class_names(self, instance):\n return [\"unpublished-event\"]\n\n def _url(self, instance):\n return reverse(\"admin:events_event_details\", kwargs={\"pk\": instance.id})\n\n def _registration_info(self, instance):\n return \"Unpublished event\"\n", "path": "website/events/api/calendarjs/serializers.py"}]} | 1,358 | 306 |
gh_patches_debug_43117 | rasdani/github-patches | git_diff | spyder-ide__spyder-7902 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
4.0beta1 is offered as an update when using a development version and a virtualenv
We need to avoid showing beta versions when checking for updates
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spyder/workers/updates.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 # Standard library imports
8 import json
9 import os
10 import ssl
11 import sys
12
13 # Third party imports
14 from qtpy.QtCore import QObject, Signal
15
16 # Local imports
17 from spyder import __version__
18 from spyder.config.base import _
19 from spyder.py3compat import PY3
20 from spyder.config.utils import is_anaconda
21 from spyder.utils.programs import check_version, is_stable_version
22
23
24 if PY3:
25 from urllib.request import urlopen
26 from urllib.error import URLError, HTTPError
27 else:
28 from urllib2 import urlopen, URLError, HTTPError
29
30
31 class WorkerUpdates(QObject):
32 """
33 Worker that checks for releases using either the Anaconda
34 default channels or the Github Releases page without
35 blocking the Spyder user interface, in case of connection
36 issues.
37 """
38 sig_ready = Signal()
39
40 def __init__(self, parent, startup, version=""):
41 QObject.__init__(self)
42 self._parent = parent
43 self.error = None
44 self.latest_release = None
45 self.startup = startup
46 self.version = version
47
48 def check_update_available(self, version, releases, github=False):
49 """Checks if there is an update available.
50
51 It takes as parameters the current version of Spyder and a list of
52 valid cleaned releases in chronological order.
53 Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...]
54 """
55 if is_stable_version(version):
56 # Remove non stable versions from the list
57 releases = [r for r in releases if is_stable_version(r)]
58
59 if github:
60 latest_release = releases[0]
61 else:
62 latest_release = releases[-1]
63
64 if version.endswith('dev'):
65 return (False, latest_release)
66
67 return (check_version(version, latest_release, '<'), latest_release)
68
69 def start(self):
70 """Main method of the WorkerUpdates worker"""
71 if is_anaconda():
72 self.url = 'https://repo.anaconda.com/pkgs/main'
73 if os.name == 'nt':
74 self.url += '/win-64/repodata.json'
75 elif sys.platform == 'darwin':
76 self.url += '/osx-64/repodata.json'
77 else:
78 self.url += '/linux-64/repodata.json'
79 else:
80 self.url = ('https://api.github.com/repos/'
81 'spyder-ide/spyder/releases')
82 self.update_available = False
83 self.latest_release = __version__
84
85 error_msg = None
86
87 try:
88 if hasattr(ssl, '_create_unverified_context'):
89 # Fix for issue # 2685 [Works only with Python >=2.7.9]
90 # More info: https://www.python.org/dev/peps/pep-0476/#opting-out
91 context = ssl._create_unverified_context()
92 page = urlopen(self.url, context=context)
93 else:
94 page = urlopen(self.url)
95 try:
96 data = page.read()
97
98 # Needed step for python3 compatibility
99 if not isinstance(data, str):
100 data = data.decode()
101
102 data = json.loads(data)
103 if not self.version:
104 self.version = __version__
105
106 if is_anaconda():
107 releases = []
108 for item in data['packages']:
109 if 'spyder' in item and 'spyder-kernels' not in item:
110 releases.append(item.split('-')[1])
111 result = self.check_update_available(self.version,
112 releases)
113 else:
114 releases = [item['tag_name'].replace('v', '')
115 for item in data]
116 result = self.check_update_available(self.version,
117 releases,
118 github=True)
119 self.update_available, self.latest_release = result
120 except Exception:
121 error_msg = _('Unable to retrieve information.')
122 except HTTPError:
123 error_msg = _('Unable to retrieve information.')
124 except URLError:
125 error_msg = _('Unable to connect to the internet. <br><br>Make '
126 'sure the connection is working properly.')
127 except Exception:
128 error_msg = _('Unable to check for updates.')
129
130 # Don't show dialog when starting up spyder and an error occur
131 if not (self.startup and error_msg is not None):
132 self.error = error_msg
133 self.sig_ready.emit()
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spyder/workers/updates.py b/spyder/workers/updates.py
--- a/spyder/workers/updates.py
+++ b/spyder/workers/updates.py
@@ -7,6 +7,7 @@
# Standard library imports
import json
import os
+import re
import ssl
import sys
@@ -37,34 +38,41 @@
"""
sig_ready = Signal()
- def __init__(self, parent, startup, version=""):
+ def __init__(self, parent, startup, version="", releases=None):
QObject.__init__(self)
self._parent = parent
self.error = None
self.latest_release = None
self.startup = startup
- self.version = version
+ self.releases = releases
- def check_update_available(self, version, releases, github=False):
+ if not version:
+ self.version = __version__
+ else:
+ self.version = version
+
+ def check_update_available(self):
"""Checks if there is an update available.
It takes as parameters the current version of Spyder and a list of
valid cleaned releases in chronological order.
Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...]
"""
- if is_stable_version(version):
- # Remove non stable versions from the list
- releases = [r for r in releases if is_stable_version(r)]
+ # Don't perform any check for development versions
+ if 'dev' in self.version:
+ return (False, latest_release)
- if github:
- latest_release = releases[0]
+ # Filter releases
+ if is_stable_version(self.version):
+ releases = [r for r in self.releases if is_stable_version(r)]
else:
- latest_release = releases[-1]
+ releases = [r for r in self.releases
+ if not is_stable_version(r) or r in self.version]
- if version.endswith('dev'):
- return (False, latest_release)
+ latest_release = releases[-1]
- return (check_version(version, latest_release, '<'), latest_release)
+ return (check_version(self.version, latest_release, '<'),
+ latest_release)
def start(self):
"""Main method of the WorkerUpdates worker"""
@@ -98,24 +106,23 @@
# Needed step for python3 compatibility
if not isinstance(data, str):
data = data.decode()
-
data = json.loads(data)
- if not self.version:
- self.version = __version__
if is_anaconda():
- releases = []
- for item in data['packages']:
- if 'spyder' in item and 'spyder-kernels' not in item:
- releases.append(item.split('-')[1])
- result = self.check_update_available(self.version,
- releases)
+ if self.releases is None:
+ self.releases = []
+ for item in data['packages']:
+ if ('spyder' in item and
+ not re.search(r'spyder-[a-zA-Z]', item)):
+ self.releases.append(item.split('-')[1])
+ result = self.check_update_available()
else:
- releases = [item['tag_name'].replace('v', '')
- for item in data]
- result = self.check_update_available(self.version,
- releases,
- github=True)
+ if self.releases is None:
+ self.releases = [item['tag_name'].replace('v', '')
+ for item in data]
+ self.releases = list(reversed(self.releases))
+
+ result = self.check_update_available()
self.update_available, self.latest_release = result
except Exception:
error_msg = _('Unable to retrieve information.')
| {"golden_diff": "diff --git a/spyder/workers/updates.py b/spyder/workers/updates.py\n--- a/spyder/workers/updates.py\n+++ b/spyder/workers/updates.py\n@@ -7,6 +7,7 @@\n # Standard library imports\n import json\n import os\n+import re\n import ssl\n import sys\n \n@@ -37,34 +38,41 @@\n \"\"\"\n sig_ready = Signal()\n \n- def __init__(self, parent, startup, version=\"\"):\n+ def __init__(self, parent, startup, version=\"\", releases=None):\n QObject.__init__(self)\n self._parent = parent\n self.error = None\n self.latest_release = None\n self.startup = startup\n- self.version = version\n+ self.releases = releases\n \n- def check_update_available(self, version, releases, github=False):\n+ if not version:\n+ self.version = __version__\n+ else:\n+ self.version = version\n+\n+ def check_update_available(self):\n \"\"\"Checks if there is an update available.\n \n It takes as parameters the current version of Spyder and a list of\n valid cleaned releases in chronological order.\n Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...]\n \"\"\"\n- if is_stable_version(version):\n- # Remove non stable versions from the list\n- releases = [r for r in releases if is_stable_version(r)]\n+ # Don't perform any check for development versions\n+ if 'dev' in self.version:\n+ return (False, latest_release)\n \n- if github:\n- latest_release = releases[0]\n+ # Filter releases\n+ if is_stable_version(self.version):\n+ releases = [r for r in self.releases if is_stable_version(r)]\n else:\n- latest_release = releases[-1]\n+ releases = [r for r in self.releases\n+ if not is_stable_version(r) or r in self.version]\n \n- if version.endswith('dev'):\n- return (False, latest_release)\n+ latest_release = releases[-1]\n \n- return (check_version(version, latest_release, '<'), latest_release)\n+ return (check_version(self.version, latest_release, '<'),\n+ latest_release)\n \n def start(self):\n \"\"\"Main method of the WorkerUpdates worker\"\"\"\n@@ -98,24 +106,23 @@\n # Needed step for python3 compatibility\n if not isinstance(data, str):\n data = data.decode()\n-\n data = json.loads(data)\n- if not self.version:\n- self.version = __version__\n \n if is_anaconda():\n- releases = []\n- for item in data['packages']:\n- if 'spyder' in item and 'spyder-kernels' not in item:\n- releases.append(item.split('-')[1])\n- result = self.check_update_available(self.version,\n- releases)\n+ if self.releases is None:\n+ self.releases = []\n+ for item in data['packages']:\n+ if ('spyder' in item and\n+ not re.search(r'spyder-[a-zA-Z]', item)):\n+ self.releases.append(item.split('-')[1])\n+ result = self.check_update_available()\n else:\n- releases = [item['tag_name'].replace('v', '')\n- for item in data]\n- result = self.check_update_available(self.version,\n- releases,\n- github=True)\n+ if self.releases is None:\n+ self.releases = [item['tag_name'].replace('v', '')\n+ for item in data]\n+ self.releases = list(reversed(self.releases))\n+\n+ result = self.check_update_available()\n self.update_available, self.latest_release = result\n except Exception:\n error_msg = _('Unable to retrieve information.')\n", "issue": "4.0beta1 is offered as an update when using a development version and a virtualenv\nWe need to avoid showing beta versions when checking for updates\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n# Standard library imports\nimport json\nimport os\nimport ssl\nimport sys\n\n# Third party imports\nfrom qtpy.QtCore import QObject, Signal\n\n# Local imports\nfrom spyder import __version__\nfrom spyder.config.base import _\nfrom spyder.py3compat import PY3\nfrom spyder.config.utils import is_anaconda\nfrom spyder.utils.programs import check_version, is_stable_version\n\n\nif PY3:\n from urllib.request import urlopen\n from urllib.error import URLError, HTTPError\nelse:\n from urllib2 import urlopen, URLError, HTTPError\n\n\nclass WorkerUpdates(QObject):\n \"\"\"\n Worker that checks for releases using either the Anaconda\n default channels or the Github Releases page without\n blocking the Spyder user interface, in case of connection\n issues.\n \"\"\"\n sig_ready = Signal()\n\n def __init__(self, parent, startup, version=\"\"):\n QObject.__init__(self)\n self._parent = parent\n self.error = None\n self.latest_release = None\n self.startup = startup\n self.version = version\n\n def check_update_available(self, version, releases, github=False):\n \"\"\"Checks if there is an update available.\n\n It takes as parameters the current version of Spyder and a list of\n valid cleaned releases in chronological order.\n Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...]\n \"\"\"\n if is_stable_version(version):\n # Remove non stable versions from the list\n releases = [r for r in releases if is_stable_version(r)]\n\n if github:\n latest_release = releases[0]\n else:\n latest_release = releases[-1]\n\n if version.endswith('dev'):\n return (False, latest_release)\n\n return (check_version(version, latest_release, '<'), latest_release)\n\n def start(self):\n \"\"\"Main method of the WorkerUpdates worker\"\"\"\n if is_anaconda():\n self.url = 'https://repo.anaconda.com/pkgs/main'\n if os.name == 'nt':\n self.url += '/win-64/repodata.json'\n elif sys.platform == 'darwin':\n self.url += '/osx-64/repodata.json'\n else:\n self.url += '/linux-64/repodata.json'\n else:\n self.url = ('https://api.github.com/repos/'\n 'spyder-ide/spyder/releases')\n self.update_available = False\n self.latest_release = __version__\n\n error_msg = None\n\n try:\n if hasattr(ssl, '_create_unverified_context'):\n # Fix for issue # 2685 [Works only with Python >=2.7.9]\n # More info: https://www.python.org/dev/peps/pep-0476/#opting-out\n context = ssl._create_unverified_context()\n page = urlopen(self.url, context=context)\n else:\n page = urlopen(self.url)\n try:\n data = page.read()\n\n # Needed step for python3 compatibility\n if not isinstance(data, str):\n data = data.decode()\n\n data = json.loads(data)\n if not self.version:\n self.version = __version__\n\n if is_anaconda():\n releases = []\n for item in data['packages']:\n if 'spyder' in item and 'spyder-kernels' not in item:\n releases.append(item.split('-')[1])\n result = self.check_update_available(self.version,\n releases)\n else:\n releases = [item['tag_name'].replace('v', '')\n for item in data]\n result = self.check_update_available(self.version,\n releases,\n github=True)\n self.update_available, self.latest_release = result\n except Exception:\n error_msg = _('Unable to retrieve information.')\n except HTTPError:\n error_msg = _('Unable to retrieve information.')\n except URLError:\n error_msg = _('Unable to connect to the internet. <br><br>Make '\n 'sure the connection is working properly.')\n except Exception:\n error_msg = _('Unable to check for updates.')\n\n # Don't show dialog when starting up spyder and an error occur\n if not (self.startup and error_msg is not None):\n self.error = error_msg\n self.sig_ready.emit()\n", "path": "spyder/workers/updates.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n# Standard library imports\nimport json\nimport os\nimport re\nimport ssl\nimport sys\n\n# Third party imports\nfrom qtpy.QtCore import QObject, Signal\n\n# Local imports\nfrom spyder import __version__\nfrom spyder.config.base import _\nfrom spyder.py3compat import PY3\nfrom spyder.config.utils import is_anaconda\nfrom spyder.utils.programs import check_version, is_stable_version\n\n\nif PY3:\n from urllib.request import urlopen\n from urllib.error import URLError, HTTPError\nelse:\n from urllib2 import urlopen, URLError, HTTPError\n\n\nclass WorkerUpdates(QObject):\n \"\"\"\n Worker that checks for releases using either the Anaconda\n default channels or the Github Releases page without\n blocking the Spyder user interface, in case of connection\n issues.\n \"\"\"\n sig_ready = Signal()\n\n def __init__(self, parent, startup, version=\"\", releases=None):\n QObject.__init__(self)\n self._parent = parent\n self.error = None\n self.latest_release = None\n self.startup = startup\n self.releases = releases\n\n if not version:\n self.version = __version__\n else:\n self.version = version\n\n def check_update_available(self):\n \"\"\"Checks if there is an update available.\n\n It takes as parameters the current version of Spyder and a list of\n valid cleaned releases in chronological order.\n Example: ['2.3.2', '2.3.3' ...] or with github ['2.3.4', '2.3.3' ...]\n \"\"\"\n # Don't perform any check for development versions\n if 'dev' in self.version:\n return (False, latest_release)\n\n # Filter releases\n if is_stable_version(self.version):\n releases = [r for r in self.releases if is_stable_version(r)]\n else:\n releases = [r for r in self.releases\n if not is_stable_version(r) or r in self.version]\n\n latest_release = releases[-1]\n\n return (check_version(self.version, latest_release, '<'),\n latest_release)\n\n def start(self):\n \"\"\"Main method of the WorkerUpdates worker\"\"\"\n if is_anaconda():\n self.url = 'https://repo.anaconda.com/pkgs/main'\n if os.name == 'nt':\n self.url += '/win-64/repodata.json'\n elif sys.platform == 'darwin':\n self.url += '/osx-64/repodata.json'\n else:\n self.url += '/linux-64/repodata.json'\n else:\n self.url = ('https://api.github.com/repos/'\n 'spyder-ide/spyder/releases')\n self.update_available = False\n self.latest_release = __version__\n\n error_msg = None\n\n try:\n if hasattr(ssl, '_create_unverified_context'):\n # Fix for issue # 2685 [Works only with Python >=2.7.9]\n # More info: https://www.python.org/dev/peps/pep-0476/#opting-out\n context = ssl._create_unverified_context()\n page = urlopen(self.url, context=context)\n else:\n page = urlopen(self.url)\n try:\n data = page.read()\n\n # Needed step for python3 compatibility\n if not isinstance(data, str):\n data = data.decode()\n data = json.loads(data)\n\n if is_anaconda():\n if self.releases is None:\n self.releases = []\n for item in data['packages']:\n if ('spyder' in item and\n not re.search(r'spyder-[a-zA-Z]', item)):\n self.releases.append(item.split('-')[1])\n result = self.check_update_available()\n else:\n if self.releases is None:\n self.releases = [item['tag_name'].replace('v', '')\n for item in data]\n self.releases = list(reversed(self.releases))\n\n result = self.check_update_available()\n self.update_available, self.latest_release = result\n except Exception:\n error_msg = _('Unable to retrieve information.')\n except HTTPError:\n error_msg = _('Unable to retrieve information.')\n except URLError:\n error_msg = _('Unable to connect to the internet. <br><br>Make '\n 'sure the connection is working properly.')\n except Exception:\n error_msg = _('Unable to check for updates.')\n\n # Don't show dialog when starting up spyder and an error occur\n if not (self.startup and error_msg is not None):\n self.error = error_msg\n self.sig_ready.emit()\n", "path": "spyder/workers/updates.py"}]} | 1,582 | 868 |
gh_patches_debug_28096 | rasdani/github-patches | git_diff | open-mmlab__mmpretrain-149 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug report
The attribute **meta_keys** is lost in Collect.
```python
@PIPELINES.register_module()
class Collect(object):
"""
Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img" and "gt_label".
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
data = {}
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + \
f'(keys={self.keys}, meta_keys={self.meta_keys})'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmcls/datasets/pipelines/formating.py`
Content:
```
1 from collections.abc import Sequence
2
3 import mmcv
4 import numpy as np
5 import torch
6 from PIL import Image
7
8 from ..builder import PIPELINES
9
10
11 def to_tensor(data):
12 """Convert objects of various python types to :obj:`torch.Tensor`.
13
14 Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
15 :class:`Sequence`, :class:`int` and :class:`float`.
16 """
17 if isinstance(data, torch.Tensor):
18 return data
19 elif isinstance(data, np.ndarray):
20 return torch.from_numpy(data)
21 elif isinstance(data, Sequence) and not mmcv.is_str(data):
22 return torch.tensor(data)
23 elif isinstance(data, int):
24 return torch.LongTensor([data])
25 elif isinstance(data, float):
26 return torch.FloatTensor([data])
27 else:
28 raise TypeError(
29 f'Type {type(data)} cannot be converted to tensor.'
30 'Supported types are: `numpy.ndarray`, `torch.Tensor`, '
31 '`Sequence`, `int` and `float`')
32
33
34 @PIPELINES.register_module()
35 class ToTensor(object):
36
37 def __init__(self, keys):
38 self.keys = keys
39
40 def __call__(self, results):
41 for key in self.keys:
42 results[key] = to_tensor(results[key])
43 return results
44
45 def __repr__(self):
46 return self.__class__.__name__ + f'(keys={self.keys})'
47
48
49 @PIPELINES.register_module()
50 class ImageToTensor(object):
51
52 def __init__(self, keys):
53 self.keys = keys
54
55 def __call__(self, results):
56 for key in self.keys:
57 img = results[key]
58 if len(img.shape) < 3:
59 img = np.expand_dims(img, -1)
60 results[key] = to_tensor(img.transpose(2, 0, 1))
61 return results
62
63 def __repr__(self):
64 return self.__class__.__name__ + f'(keys={self.keys})'
65
66
67 @PIPELINES.register_module()
68 class Transpose(object):
69
70 def __init__(self, keys, order):
71 self.keys = keys
72 self.order = order
73
74 def __call__(self, results):
75 for key in self.keys:
76 results[key] = results[key].transpose(self.order)
77 return results
78
79 def __repr__(self):
80 return self.__class__.__name__ + \
81 f'(keys={self.keys}, order={self.order})'
82
83
84 @PIPELINES.register_module()
85 class ToPIL(object):
86
87 def __init__(self):
88 pass
89
90 def __call__(self, results):
91 results['img'] = Image.fromarray(results['img'])
92 return results
93
94
95 @PIPELINES.register_module()
96 class ToNumpy(object):
97
98 def __init__(self):
99 pass
100
101 def __call__(self, results):
102 results['img'] = np.array(results['img'], dtype=np.float32)
103 return results
104
105
106 @PIPELINES.register_module()
107 class Collect(object):
108 """
109 Collect data from the loader relevant to the specific task.
110
111 This is usually the last stage of the data loader pipeline. Typically keys
112 is set to some subset of "img" and "gt_label".
113 """
114
115 def __init__(self, keys):
116 self.keys = keys
117
118 def __call__(self, results):
119 data = {}
120 for key in self.keys:
121 data[key] = results[key]
122 return data
123
124 def __repr__(self):
125 return self.__class__.__name__ + \
126 f'(keys={self.keys}, meta_keys={self.meta_keys})'
127
128
129 @PIPELINES.register_module()
130 class WrapFieldsToLists(object):
131 """Wrap fields of the data dictionary into lists for evaluation.
132
133 This class can be used as a last step of a test or validation
134 pipeline for single image evaluation or inference.
135
136 Example:
137 >>> test_pipeline = [
138 >>> dict(type='LoadImageFromFile'),
139 >>> dict(type='Normalize',
140 mean=[123.675, 116.28, 103.53],
141 std=[58.395, 57.12, 57.375],
142 to_rgb=True),
143 >>> dict(type='ImageToTensor', keys=['img']),
144 >>> dict(type='Collect', keys=['img']),
145 >>> dict(type='WrapIntoLists')
146 >>> ]
147 """
148
149 def __call__(self, results):
150 # Wrap dict fields into lists
151 for key, val in results.items():
152 results[key] = [val]
153 return results
154
155 def __repr__(self):
156 return f'{self.__class__.__name__}()'
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmcls/datasets/pipelines/formating.py b/mmcls/datasets/pipelines/formating.py
--- a/mmcls/datasets/pipelines/formating.py
+++ b/mmcls/datasets/pipelines/formating.py
@@ -3,6 +3,7 @@
import mmcv
import numpy as np
import torch
+from mmcv.parallel import DataContainer as DC
from PIL import Image
from ..builder import PIPELINES
@@ -110,13 +111,34 @@
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img" and "gt_label".
+
+ Args:
+ keys (Sequence[str]): Keys of results to be collected in ``data``.
+ meta_keys (Sequence[str], optional): Meta keys to be converted to
+ ``mmcv.DataContainer`` and collected in ``data[img_metas]``.
+ Default: ``('filename', 'ori_shape', 'img_shape', 'flip',
+ 'flip_direction', 'img_norm_cfg')``
+
+ Returns:
+ dict: The result dict contains the following keys
+ - keys in``self.keys``
+ - ``img_metas`` if avaliable
"""
- def __init__(self, keys):
+ def __init__(self,
+ keys,
+ meta_keys=('filename', 'ori_shape', 'img_shape', 'flip',
+ 'flip_direction', 'img_norm_cfg')):
self.keys = keys
+ self.meta_keys = meta_keys
def __call__(self, results):
data = {}
+ img_meta = {}
+ for key in self.meta_keys:
+ if key in results:
+ img_meta[key] = results[key]
+ data['img_metas'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
| {"golden_diff": "diff --git a/mmcls/datasets/pipelines/formating.py b/mmcls/datasets/pipelines/formating.py\n--- a/mmcls/datasets/pipelines/formating.py\n+++ b/mmcls/datasets/pipelines/formating.py\n@@ -3,6 +3,7 @@\n import mmcv\n import numpy as np\n import torch\n+from mmcv.parallel import DataContainer as DC\n from PIL import Image\n \n from ..builder import PIPELINES\n@@ -110,13 +111,34 @@\n \n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of \"img\" and \"gt_label\".\n+\n+ Args:\n+ keys (Sequence[str]): Keys of results to be collected in ``data``.\n+ meta_keys (Sequence[str], optional): Meta keys to be converted to\n+ ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n+ Default: ``('filename', 'ori_shape', 'img_shape', 'flip',\n+ 'flip_direction', 'img_norm_cfg')``\n+\n+ Returns:\n+ dict: The result dict contains the following keys\n+ - keys in``self.keys``\n+ - ``img_metas`` if avaliable\n \"\"\"\n \n- def __init__(self, keys):\n+ def __init__(self,\n+ keys,\n+ meta_keys=('filename', 'ori_shape', 'img_shape', 'flip',\n+ 'flip_direction', 'img_norm_cfg')):\n self.keys = keys\n+ self.meta_keys = meta_keys\n \n def __call__(self, results):\n data = {}\n+ img_meta = {}\n+ for key in self.meta_keys:\n+ if key in results:\n+ img_meta[key] = results[key]\n+ data['img_metas'] = DC(img_meta, cpu_only=True)\n for key in self.keys:\n data[key] = results[key]\n return data\n", "issue": "bug report\nThe attribute **meta_keys** is lost in Collect.\r\n```python\r\[email protected]_module()\r\nclass Collect(object):\r\n \"\"\"\r\n Collect data from the loader relevant to the specific task.\r\n This is usually the last stage of the data loader pipeline. Typically keys\r\n is set to some subset of \"img\" and \"gt_label\".\r\n \"\"\"\r\n\r\n def __init__(self, keys):\r\n self.keys = keys\r\n\r\n def __call__(self, results):\r\n data = {}\r\n for key in self.keys:\r\n data[key] = results[key]\r\n return data\r\n\r\n def __repr__(self):\r\n return self.__class__.__name__ + \\\r\n f'(keys={self.keys}, meta_keys={self.meta_keys})'\r\n```\n", "before_files": [{"content": "from collections.abc import Sequence\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom PIL import Image\n\nfrom ..builder import PIPELINES\n\n\ndef to_tensor(data):\n \"\"\"Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n \"\"\"\n if isinstance(data, torch.Tensor):\n return data\n elif isinstance(data, np.ndarray):\n return torch.from_numpy(data)\n elif isinstance(data, Sequence) and not mmcv.is_str(data):\n return torch.tensor(data)\n elif isinstance(data, int):\n return torch.LongTensor([data])\n elif isinstance(data, float):\n return torch.FloatTensor([data])\n else:\n raise TypeError(\n f'Type {type(data)} cannot be converted to tensor.'\n 'Supported types are: `numpy.ndarray`, `torch.Tensor`, '\n '`Sequence`, `int` and `float`')\n\n\[email protected]_module()\nclass ToTensor(object):\n\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, results):\n for key in self.keys:\n results[key] = to_tensor(results[key])\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'\n\n\[email protected]_module()\nclass ImageToTensor(object):\n\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, results):\n for key in self.keys:\n img = results[key]\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n results[key] = to_tensor(img.transpose(2, 0, 1))\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'\n\n\[email protected]_module()\nclass Transpose(object):\n\n def __init__(self, keys, order):\n self.keys = keys\n self.order = order\n\n def __call__(self, results):\n for key in self.keys:\n results[key] = results[key].transpose(self.order)\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(keys={self.keys}, order={self.order})'\n\n\[email protected]_module()\nclass ToPIL(object):\n\n def __init__(self):\n pass\n\n def __call__(self, results):\n results['img'] = Image.fromarray(results['img'])\n return results\n\n\[email protected]_module()\nclass ToNumpy(object):\n\n def __init__(self):\n pass\n\n def __call__(self, results):\n results['img'] = np.array(results['img'], dtype=np.float32)\n return results\n\n\[email protected]_module()\nclass Collect(object):\n \"\"\"\n Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of \"img\" and \"gt_label\".\n \"\"\"\n\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, results):\n data = {}\n for key in self.keys:\n data[key] = results[key]\n return data\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(keys={self.keys}, meta_keys={self.meta_keys})'\n\n\[email protected]_module()\nclass WrapFieldsToLists(object):\n \"\"\"Wrap fields of the data dictionary into lists for evaluation.\n\n This class can be used as a last step of a test or validation\n pipeline for single image evaluation or inference.\n\n Example:\n >>> test_pipeline = [\n >>> dict(type='LoadImageFromFile'),\n >>> dict(type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n >>> dict(type='ImageToTensor', keys=['img']),\n >>> dict(type='Collect', keys=['img']),\n >>> dict(type='WrapIntoLists')\n >>> ]\n \"\"\"\n\n def __call__(self, results):\n # Wrap dict fields into lists\n for key, val in results.items():\n results[key] = [val]\n return results\n\n def __repr__(self):\n return f'{self.__class__.__name__}()'\n", "path": "mmcls/datasets/pipelines/formating.py"}], "after_files": [{"content": "from collections.abc import Sequence\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.parallel import DataContainer as DC\nfrom PIL import Image\n\nfrom ..builder import PIPELINES\n\n\ndef to_tensor(data):\n \"\"\"Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n \"\"\"\n if isinstance(data, torch.Tensor):\n return data\n elif isinstance(data, np.ndarray):\n return torch.from_numpy(data)\n elif isinstance(data, Sequence) and not mmcv.is_str(data):\n return torch.tensor(data)\n elif isinstance(data, int):\n return torch.LongTensor([data])\n elif isinstance(data, float):\n return torch.FloatTensor([data])\n else:\n raise TypeError(\n f'Type {type(data)} cannot be converted to tensor.'\n 'Supported types are: `numpy.ndarray`, `torch.Tensor`, '\n '`Sequence`, `int` and `float`')\n\n\[email protected]_module()\nclass ToTensor(object):\n\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, results):\n for key in self.keys:\n results[key] = to_tensor(results[key])\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'\n\n\[email protected]_module()\nclass ImageToTensor(object):\n\n def __init__(self, keys):\n self.keys = keys\n\n def __call__(self, results):\n for key in self.keys:\n img = results[key]\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n results[key] = to_tensor(img.transpose(2, 0, 1))\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + f'(keys={self.keys})'\n\n\[email protected]_module()\nclass Transpose(object):\n\n def __init__(self, keys, order):\n self.keys = keys\n self.order = order\n\n def __call__(self, results):\n for key in self.keys:\n results[key] = results[key].transpose(self.order)\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(keys={self.keys}, order={self.order})'\n\n\[email protected]_module()\nclass ToPIL(object):\n\n def __init__(self):\n pass\n\n def __call__(self, results):\n results['img'] = Image.fromarray(results['img'])\n return results\n\n\[email protected]_module()\nclass ToNumpy(object):\n\n def __init__(self):\n pass\n\n def __call__(self, results):\n results['img'] = np.array(results['img'], dtype=np.float32)\n return results\n\n\[email protected]_module()\nclass Collect(object):\n \"\"\"\n Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of \"img\" and \"gt_label\".\n\n Args:\n keys (Sequence[str]): Keys of results to be collected in ``data``.\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``('filename', 'ori_shape', 'img_shape', 'flip',\n 'flip_direction', 'img_norm_cfg')``\n\n Returns:\n dict: The result dict contains the following keys\n - keys in``self.keys``\n - ``img_metas`` if avaliable\n \"\"\"\n\n def __init__(self,\n keys,\n meta_keys=('filename', 'ori_shape', 'img_shape', 'flip',\n 'flip_direction', 'img_norm_cfg')):\n self.keys = keys\n self.meta_keys = meta_keys\n\n def __call__(self, results):\n data = {}\n img_meta = {}\n for key in self.meta_keys:\n if key in results:\n img_meta[key] = results[key]\n data['img_metas'] = DC(img_meta, cpu_only=True)\n for key in self.keys:\n data[key] = results[key]\n return data\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(keys={self.keys}, meta_keys={self.meta_keys})'\n\n\[email protected]_module()\nclass WrapFieldsToLists(object):\n \"\"\"Wrap fields of the data dictionary into lists for evaluation.\n\n This class can be used as a last step of a test or validation\n pipeline for single image evaluation or inference.\n\n Example:\n >>> test_pipeline = [\n >>> dict(type='LoadImageFromFile'),\n >>> dict(type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n >>> dict(type='ImageToTensor', keys=['img']),\n >>> dict(type='Collect', keys=['img']),\n >>> dict(type='WrapIntoLists')\n >>> ]\n \"\"\"\n\n def __call__(self, results):\n # Wrap dict fields into lists\n for key, val in results.items():\n results[key] = [val]\n return results\n\n def __repr__(self):\n return f'{self.__class__.__name__}()'\n", "path": "mmcls/datasets/pipelines/formating.py"}]} | 1,809 | 421 |
gh_patches_debug_7831 | rasdani/github-patches | git_diff | Kinto__kinto-913 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Forbidden is numbered 401 in the docs
Both Forbidden and Unauthorized responses are listed as 401 in the docs,
I think Forbidden should be 403, right?
For example:
http://kinto.readthedocs.io/en/stable/api/1.x/buckets.html#http-status-codes
Forbidden is numbered 401 in the docs
Both Forbidden and Unauthorized responses are listed as 401 in the docs,
I think Forbidden should be 403, right?
For example:
http://kinto.readthedocs.io/en/stable/api/1.x/buckets.html#http-status-codes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Kinto documentation build configuration file, created by
4 # sphinx-quickstart on Mon Feb 2 15:08:06 2015.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 import os
16 import sys
17
18 __HERE__ = os.path.dirname(os.path.abspath(__file__))
19
20 on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
21
22 if not on_rtd: # only import and set the theme if we're building docs locally
23 import sphinx_rtd_theme
24 html_theme = 'sphinx_rtd_theme'
25 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
26
27 # otherwise, readthedocs.io uses their theme by default, so no need to specify
28 # it
29
30 # If extensions (or modules to document with autodoc) are in another directory,
31 # add these directories to sys.path here. If the directory is relative to the
32 # documentation root, use os.path.abspath to make it absolute, like shown here.
33 sys.path.insert(0, os.path.abspath(os.path.join('..')))
34
35 # -- General configuration ------------------------------------------------
36
37 # If your documentation needs a minimal Sphinx version, state it here.
38 # needs_sphinx = '1.0'
39
40 # Add any Sphinx extension module names here, as strings. They can be
41 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
42 # ones.
43 extensions = [
44 'sphinx.ext.autodoc',
45 'sphinxcontrib.httpdomain',
46 'sphinx.ext.extlinks',
47 'sphinx.ext.intersphinx',
48 ]
49
50 # Add any paths that contain templates here, relative to this directory.
51 templates_path = ['_templates']
52 html_additional_pages = {
53 'index': 'indexcontent.html',
54 }
55
56
57 # The suffix of source filenames.
58 source_suffix = '.rst'
59
60 # The encoding of source files.
61 # source_encoding = 'utf-8-sig'
62
63 # The master toctree document.
64 master_doc = 'index'
65
66 # General information about the project.
67 project = u'Kinto'
68 copyright = u'2015-2016 — Mozilla Services'
69
70 # The version info for the project you're documenting, acts as replacement for
71 # |version| and |release|, also used in various other places throughout the
72 # built documents.
73 #
74 # The short X.Y version.
75 version = '4.3'
76 # The full version, including alpha/beta/rc tags.
77 release = '4.3.1'
78
79 # List of patterns, relative to source directory, that match files and
80 # directories to ignore when looking for source files.
81 exclude_patterns = ['_build']
82
83 # The name of the Pygments (syntax highlighting) style to use.
84 pygments_style = 'sphinx'
85
86 # Add any paths that contain custom static files (such as style sheets) here,
87 # relative to this directory. They are copied after the builtin static files,
88 # so a file named "default.css" will overwrite the builtin "default.css".
89 html_static_path = ['_static']
90
91 # Output file base name for HTML help builder.
92 htmlhelp_basename = 'Kintodoc'
93
94
95 # -- Options for autodoc --------------------------------------------------
96
97 autodoc_member_order = 'bysource'
98 # Enable nitpicky mode - which ensures that all references in the docs
99 # resolve.
100 nitpicky = True
101 nitpick_ignore = [
102 ('py:obj', 'bool'),
103 ('py:obj', 'Exception'),
104 ('py:obj', 'int'),
105 ('py:obj', 'str'),
106 ('py:obj', 'dict'),
107 ('py:obj', 'list'),
108 ('py:obj', 'tuple'),
109 ('py:obj', 'float'),
110 ('py:obj', 'cornice.Service'),
111 # Member autodoc fails with those:
112 # kinto.core.resource.schema
113 ('py:class', 'Integer'),
114 ('py:class', 'String'),
115 # kinto.core.resource
116 ('py:class', 'ViewSet'),
117 ('py:class', 'ShareableViewSet'),
118 ('py:class', 'Model'),
119 ('py:class', 'ShareableModel'),
120 ]
121
122
123 # -- Options of extlinks --------------------------------------------------
124
125 extlinks = {
126 'github': ('https://github.com/%s/', ''),
127 'rtd': ('https://%s.readthedocs.io', ''),
128 'blog': ('http://www.servicedenuages.fr/%s', '')
129 }
130
131
132 # -- Substitutions
133
134 rst_epilog = """
135 .. |status-200| replace:: ``200 OK``
136 .. |status-201| replace:: ``201 Created``
137 .. |status-304| replace:: ``304 Not Modified``
138 .. |status-400| replace:: ``400 Bad Request``
139 .. |status-401| replace:: ``401 Unauthorized``
140 .. |status-403| replace:: ``401 Forbidden``
141 .. |status-404| replace:: ``404 Not Found``
142 .. |status-405| replace:: ``405 Method Not Allowed``
143 .. |status-406| replace:: ``406 Not Acceptable``
144 .. |status-409| replace:: ``409 Conflict``
145 .. |status-410| replace:: ``410 Gone``
146 .. |status-412| replace:: ``412 Precondition Failed``
147 .. |status-415| replace:: ``415 Unsupported Media Type``
148 .. |status-503| replace:: ``503 Service Unavailable``
149 """
150
151 # --
152
153 def setup(app):
154 # path relative to _static
155 app.add_stylesheet('theme_overrides.css')
156 app.add_javascript('piwik.js')
157
158
159 # -- Options for intersphinx --------------------------------------------------
160
161 intersphinx_mapping = {
162 'colander': ('https://colander.readthedocs.io/en/latest/', None),
163 'cornice': ('https://cornice.readthedocs.io/en/latest/', None),
164 'pyramid': ('https://pyramid.readthedocs.io/en/latest/', None)
165 }
166
167 # -- Options for LaTeX output ---------------------------------------------
168
169 latex_elements = {}
170
171 # Grouping the document tree into LaTeX files. List of tuples
172 # (source start file, target name, title,
173 # author, documentclass [howto, manual, or own class]).
174 latex_documents = [
175 ('index', 'Kinto.tex', u'Kinto Documentation',
176 u'Mozilla Services — Da French Team', 'manual'),
177 ]
178
179
180 # -- Options for manual page output ---------------------------------------
181
182 # One entry per manual page. List of tuples
183 # (source start file, name, description, authors, manual section).
184 man_pages = [
185 ('index', 'kinto', u'Kinto Documentation',
186 [u'Mozilla Services — Da French Team'], 1)
187 ]
188
189
190 # -- Options for Texinfo output -------------------------------------------
191
192 # Grouping the document tree into Texinfo files. List of tuples
193 # (source start file, target name, title, author,
194 # dir menu entry, description, category)
195 texinfo_documents = [
196 ('index', 'Kinto', u'Kinto Documentation',
197 u'Mozilla Services — Da French Team', 'Kinto',
198 'A remote storage service with syncing and sharing abilities.',
199 'Miscellaneous'),
200 ]
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -137,7 +137,7 @@
.. |status-304| replace:: ``304 Not Modified``
.. |status-400| replace:: ``400 Bad Request``
.. |status-401| replace:: ``401 Unauthorized``
-.. |status-403| replace:: ``401 Forbidden``
+.. |status-403| replace:: ``403 Forbidden``
.. |status-404| replace:: ``404 Not Found``
.. |status-405| replace:: ``405 Method Not Allowed``
.. |status-406| replace:: ``406 Not Acceptable``
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -137,7 +137,7 @@\n .. |status-304| replace:: ``304 Not Modified``\n .. |status-400| replace:: ``400 Bad Request``\n .. |status-401| replace:: ``401 Unauthorized``\n-.. |status-403| replace:: ``401 Forbidden``\n+.. |status-403| replace:: ``403 Forbidden``\n .. |status-404| replace:: ``404 Not Found``\n .. |status-405| replace:: ``405 Method Not Allowed``\n .. |status-406| replace:: ``406 Not Acceptable``\n", "issue": "Forbidden is numbered 401 in the docs\nBoth Forbidden and Unauthorized responses are listed as 401 in the docs,\r\nI think Forbidden should be 403, right?\r\n\r\nFor example:\r\nhttp://kinto.readthedocs.io/en/stable/api/1.x/buckets.html#http-status-codes\nForbidden is numbered 401 in the docs\nBoth Forbidden and Unauthorized responses are listed as 401 in the docs,\r\nI think Forbidden should be 403, right?\r\n\r\nFor example:\r\nhttp://kinto.readthedocs.io/en/stable/api/1.x/buckets.html#http-status-codes\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Kinto documentation build configuration file, created by\n# sphinx-quickstart on Mon Feb 2 15:08:06 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n__HERE__ = os.path.dirname(os.path.abspath(__file__))\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# otherwise, readthedocs.io uses their theme by default, so no need to specify\n# it\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(os.path.join('..')))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinxcontrib.httpdomain',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.intersphinx',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nhtml_additional_pages = {\n 'index': 'indexcontent.html',\n}\n\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Kinto'\ncopyright = u'2015-2016 \u2014 Mozilla Services'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '4.3'\n# The full version, including alpha/beta/rc tags.\nrelease = '4.3.1'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Kintodoc'\n\n\n# -- Options for autodoc --------------------------------------------------\n\nautodoc_member_order = 'bysource'\n# Enable nitpicky mode - which ensures that all references in the docs\n# resolve.\nnitpicky = True\nnitpick_ignore = [\n ('py:obj', 'bool'),\n ('py:obj', 'Exception'),\n ('py:obj', 'int'),\n ('py:obj', 'str'),\n ('py:obj', 'dict'),\n ('py:obj', 'list'),\n ('py:obj', 'tuple'),\n ('py:obj', 'float'),\n ('py:obj', 'cornice.Service'),\n # Member autodoc fails with those:\n # kinto.core.resource.schema\n ('py:class', 'Integer'),\n ('py:class', 'String'),\n # kinto.core.resource\n ('py:class', 'ViewSet'),\n ('py:class', 'ShareableViewSet'),\n ('py:class', 'Model'),\n ('py:class', 'ShareableModel'),\n]\n\n\n# -- Options of extlinks --------------------------------------------------\n\nextlinks = {\n 'github': ('https://github.com/%s/', ''),\n 'rtd': ('https://%s.readthedocs.io', ''),\n 'blog': ('http://www.servicedenuages.fr/%s', '')\n}\n\n\n# -- Substitutions\n\nrst_epilog = \"\"\"\n.. |status-200| replace:: ``200 OK``\n.. |status-201| replace:: ``201 Created``\n.. |status-304| replace:: ``304 Not Modified``\n.. |status-400| replace:: ``400 Bad Request``\n.. |status-401| replace:: ``401 Unauthorized``\n.. |status-403| replace:: ``401 Forbidden``\n.. |status-404| replace:: ``404 Not Found``\n.. |status-405| replace:: ``405 Method Not Allowed``\n.. |status-406| replace:: ``406 Not Acceptable``\n.. |status-409| replace:: ``409 Conflict``\n.. |status-410| replace:: ``410 Gone``\n.. |status-412| replace:: ``412 Precondition Failed``\n.. |status-415| replace:: ``415 Unsupported Media Type``\n.. |status-503| replace:: ``503 Service Unavailable``\n\"\"\"\n\n# --\n\ndef setup(app):\n # path relative to _static\n app.add_stylesheet('theme_overrides.css')\n app.add_javascript('piwik.js')\n\n\n# -- Options for intersphinx --------------------------------------------------\n\nintersphinx_mapping = {\n 'colander': ('https://colander.readthedocs.io/en/latest/', None),\n 'cornice': ('https://cornice.readthedocs.io/en/latest/', None),\n 'pyramid': ('https://pyramid.readthedocs.io/en/latest/', None)\n}\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Kinto.tex', u'Kinto Documentation',\n u'Mozilla Services \u2014 Da French Team', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'kinto', u'Kinto Documentation',\n [u'Mozilla Services \u2014 Da French Team'], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Kinto', u'Kinto Documentation',\n u'Mozilla Services \u2014 Da French Team', 'Kinto',\n 'A remote storage service with syncing and sharing abilities.',\n 'Miscellaneous'),\n]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Kinto documentation build configuration file, created by\n# sphinx-quickstart on Mon Feb 2 15:08:06 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n__HERE__ = os.path.dirname(os.path.abspath(__file__))\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\n\nif not on_rtd: # only import and set the theme if we're building docs locally\n import sphinx_rtd_theme\n html_theme = 'sphinx_rtd_theme'\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# otherwise, readthedocs.io uses their theme by default, so no need to specify\n# it\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(os.path.join('..')))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinxcontrib.httpdomain',\n 'sphinx.ext.extlinks',\n 'sphinx.ext.intersphinx',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nhtml_additional_pages = {\n 'index': 'indexcontent.html',\n}\n\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Kinto'\ncopyright = u'2015-2016 \u2014 Mozilla Services'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '4.3'\n# The full version, including alpha/beta/rc tags.\nrelease = '4.3.1'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Kintodoc'\n\n\n# -- Options for autodoc --------------------------------------------------\n\nautodoc_member_order = 'bysource'\n# Enable nitpicky mode - which ensures that all references in the docs\n# resolve.\nnitpicky = True\nnitpick_ignore = [\n ('py:obj', 'bool'),\n ('py:obj', 'Exception'),\n ('py:obj', 'int'),\n ('py:obj', 'str'),\n ('py:obj', 'dict'),\n ('py:obj', 'list'),\n ('py:obj', 'tuple'),\n ('py:obj', 'float'),\n ('py:obj', 'cornice.Service'),\n # Member autodoc fails with those:\n # kinto.core.resource.schema\n ('py:class', 'Integer'),\n ('py:class', 'String'),\n # kinto.core.resource\n ('py:class', 'ViewSet'),\n ('py:class', 'ShareableViewSet'),\n ('py:class', 'Model'),\n ('py:class', 'ShareableModel'),\n]\n\n\n# -- Options of extlinks --------------------------------------------------\n\nextlinks = {\n 'github': ('https://github.com/%s/', ''),\n 'rtd': ('https://%s.readthedocs.io', ''),\n 'blog': ('http://www.servicedenuages.fr/%s', '')\n}\n\n\n# -- Substitutions\n\nrst_epilog = \"\"\"\n.. |status-200| replace:: ``200 OK``\n.. |status-201| replace:: ``201 Created``\n.. |status-304| replace:: ``304 Not Modified``\n.. |status-400| replace:: ``400 Bad Request``\n.. |status-401| replace:: ``401 Unauthorized``\n.. |status-403| replace:: ``403 Forbidden``\n.. |status-404| replace:: ``404 Not Found``\n.. |status-405| replace:: ``405 Method Not Allowed``\n.. |status-406| replace:: ``406 Not Acceptable``\n.. |status-409| replace:: ``409 Conflict``\n.. |status-410| replace:: ``410 Gone``\n.. |status-412| replace:: ``412 Precondition Failed``\n.. |status-415| replace:: ``415 Unsupported Media Type``\n.. |status-503| replace:: ``503 Service Unavailable``\n\"\"\"\n\n# --\n\ndef setup(app):\n # path relative to _static\n app.add_stylesheet('theme_overrides.css')\n app.add_javascript('piwik.js')\n\n\n# -- Options for intersphinx --------------------------------------------------\n\nintersphinx_mapping = {\n 'colander': ('https://colander.readthedocs.io/en/latest/', None),\n 'cornice': ('https://cornice.readthedocs.io/en/latest/', None),\n 'pyramid': ('https://pyramid.readthedocs.io/en/latest/', None)\n}\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'Kinto.tex', u'Kinto Documentation',\n u'Mozilla Services \u2014 Da French Team', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'kinto', u'Kinto Documentation',\n [u'Mozilla Services \u2014 Da French Team'], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'Kinto', u'Kinto Documentation',\n u'Mozilla Services \u2014 Da French Team', 'Kinto',\n 'A remote storage service with syncing and sharing abilities.',\n 'Miscellaneous'),\n]\n", "path": "docs/conf.py"}]} | 2,517 | 175 |
gh_patches_debug_28586 | rasdani/github-patches | git_diff | uccser__cs-unplugged-764 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Provide clearer entry points for new visitors on website
Landing page interface redesign
Current:
https://cs-unplugged-dev.appspot.com/en/
Proposed new design:

**Redesign based on older current landing page.
The main changes are the jumbotron banner, nav bar and footer ( issue #695 )
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/general/urls.py`
Content:
```
1 """URL routing for the general application."""
2
3 from django.conf.urls import url
4
5 from . import views
6
7 urlpatterns = [
8 url(
9 r"^$",
10 views.GeneralIndexView.as_view(),
11 name="home"
12 ),
13 url(
14 r"^about/$",
15 views.GeneralAboutView.as_view(),
16 name="about"
17 ),
18 url(
19 r"^computational-thinking/$",
20 views.ComputationalThinkingView.as_view(),
21 name="computational_thinking"
22 ),
23 url(
24 r"^contact/$",
25 views.GeneralContactView.as_view(),
26 name="contact"
27 ),
28 url(
29 r"^people/$",
30 views.GeneralPeopleView.as_view(),
31 name="people"
32 ),
33 url(
34 r"^principles/$",
35 views.GeneralPrinciplesView.as_view(),
36 name="principles"
37 ),
38 ]
39
```
Path: `csunplugged/general/views.py`
Content:
```
1 """Views for the general application."""
2
3 from django.views.generic import TemplateView
4 from django.http import HttpResponse
5
6
7 class GeneralIndexView(TemplateView):
8 """View for the homepage that renders from a template."""
9
10 template_name = "general/index.html"
11
12
13 class GeneralAboutView(TemplateView):
14 """View for the about page that renders from a template."""
15
16 template_name = "general/about.html"
17
18
19 class GeneralContactView(TemplateView):
20 """View for the contact page that renders from a template."""
21
22 template_name = "general/contact.html"
23
24
25 class GeneralPeopleView(TemplateView):
26 """View for the people page that renders from a template."""
27
28 template_name = "general/people.html"
29
30
31 class GeneralPrinciplesView(TemplateView):
32 """View for the princples page that renders from a template."""
33
34 template_name = "general/principles.html"
35
36
37 class ComputationalThinkingView(TemplateView):
38 """View for the Computational Thinking page that renders from a template."""
39
40 template_name = "general/computational-thinking.html"
41
42
43 def health_check(request):
44 """Return heath check response for Google App Engine.
45
46 Returns a 200 HTTP response for Google App Engine to detect the system
47 is running.
48 """
49 return HttpResponse(status=200)
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py
--- a/csunplugged/general/urls.py
+++ b/csunplugged/general/urls.py
@@ -15,11 +15,21 @@
views.GeneralAboutView.as_view(),
name="about"
),
+ url(
+ r"^what-is-computer-science/$",
+ views.WhatIsCSView.as_view(),
+ name="what_is_cs"
+ ),
url(
r"^computational-thinking/$",
views.ComputationalThinkingView.as_view(),
name="computational_thinking"
),
+ url(
+ r"^how-do-i-teach-cs-unplugged/$",
+ views.HowDoITeachCSUnpluggedView.as_view(),
+ name="how_do_i_teach_cs_unplugged"
+ ),
url(
r"^contact/$",
views.GeneralContactView.as_view(),
diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py
--- a/csunplugged/general/views.py
+++ b/csunplugged/general/views.py
@@ -34,12 +34,24 @@
template_name = "general/principles.html"
+class WhatIsCSView(TemplateView):
+ """View for the 'What is Computer Science?' page that renders from a template."""
+
+ template_name = "general/what-is-computer-science.html"
+
+
class ComputationalThinkingView(TemplateView):
"""View for the Computational Thinking page that renders from a template."""
template_name = "general/computational-thinking.html"
+class HowDoITeachCSUnpluggedView(TemplateView):
+ """View for the 'How do I teach CS Unplugged?' page that renders from a template."""
+
+ template_name = "general/how-do-i-teach-cs-unplugged.html"
+
+
def health_check(request):
"""Return heath check response for Google App Engine.
| {"golden_diff": "diff --git a/csunplugged/general/urls.py b/csunplugged/general/urls.py\n--- a/csunplugged/general/urls.py\n+++ b/csunplugged/general/urls.py\n@@ -15,11 +15,21 @@\n views.GeneralAboutView.as_view(),\n name=\"about\"\n ),\n+ url(\n+ r\"^what-is-computer-science/$\",\n+ views.WhatIsCSView.as_view(),\n+ name=\"what_is_cs\"\n+ ),\n url(\n r\"^computational-thinking/$\",\n views.ComputationalThinkingView.as_view(),\n name=\"computational_thinking\"\n ),\n+ url(\n+ r\"^how-do-i-teach-cs-unplugged/$\",\n+ views.HowDoITeachCSUnpluggedView.as_view(),\n+ name=\"how_do_i_teach_cs_unplugged\"\n+ ),\n url(\n r\"^contact/$\",\n views.GeneralContactView.as_view(),\ndiff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\n--- a/csunplugged/general/views.py\n+++ b/csunplugged/general/views.py\n@@ -34,12 +34,24 @@\n template_name = \"general/principles.html\"\n \n \n+class WhatIsCSView(TemplateView):\n+ \"\"\"View for the 'What is Computer Science?' page that renders from a template.\"\"\"\n+\n+ template_name = \"general/what-is-computer-science.html\"\n+\n+\n class ComputationalThinkingView(TemplateView):\n \"\"\"View for the Computational Thinking page that renders from a template.\"\"\"\n \n template_name = \"general/computational-thinking.html\"\n \n \n+class HowDoITeachCSUnpluggedView(TemplateView):\n+ \"\"\"View for the 'How do I teach CS Unplugged?' page that renders from a template.\"\"\"\n+\n+ template_name = \"general/how-do-i-teach-cs-unplugged.html\"\n+\n+\n def health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n", "issue": "Provide clearer entry points for new visitors on website\n\nLanding page interface redesign\nCurrent:\r\nhttps://cs-unplugged-dev.appspot.com/en/\r\n\r\nProposed new design:\r\n\r\n\r\n**Redesign based on older current landing page.\r\n\r\nThe main changes are the jumbotron banner, nav bar and footer ( issue #695 )\n", "before_files": [{"content": "\"\"\"URL routing for the general application.\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(\n r\"^$\",\n views.GeneralIndexView.as_view(),\n name=\"home\"\n ),\n url(\n r\"^about/$\",\n views.GeneralAboutView.as_view(),\n name=\"about\"\n ),\n url(\n r\"^computational-thinking/$\",\n views.ComputationalThinkingView.as_view(),\n name=\"computational_thinking\"\n ),\n url(\n r\"^contact/$\",\n views.GeneralContactView.as_view(),\n name=\"contact\"\n ),\n url(\n r\"^people/$\",\n views.GeneralPeopleView.as_view(),\n name=\"people\"\n ),\n url(\n r\"^principles/$\",\n views.GeneralPrinciplesView.as_view(),\n name=\"principles\"\n ),\n]\n", "path": "csunplugged/general/urls.py"}, {"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = \"general/index.html\"\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = \"general/about.html\"\n\n\nclass GeneralContactView(TemplateView):\n \"\"\"View for the contact page that renders from a template.\"\"\"\n\n template_name = \"general/contact.html\"\n\n\nclass GeneralPeopleView(TemplateView):\n \"\"\"View for the people page that renders from a template.\"\"\"\n\n template_name = \"general/people.html\"\n\n\nclass GeneralPrinciplesView(TemplateView):\n \"\"\"View for the princples page that renders from a template.\"\"\"\n\n template_name = \"general/principles.html\"\n\n\nclass ComputationalThinkingView(TemplateView):\n \"\"\"View for the Computational Thinking page that renders from a template.\"\"\"\n\n template_name = \"general/computational-thinking.html\"\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}], "after_files": [{"content": "\"\"\"URL routing for the general application.\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(\n r\"^$\",\n views.GeneralIndexView.as_view(),\n name=\"home\"\n ),\n url(\n r\"^about/$\",\n views.GeneralAboutView.as_view(),\n name=\"about\"\n ),\n url(\n r\"^what-is-computer-science/$\",\n views.WhatIsCSView.as_view(),\n name=\"what_is_cs\"\n ),\n url(\n r\"^computational-thinking/$\",\n views.ComputationalThinkingView.as_view(),\n name=\"computational_thinking\"\n ),\n url(\n r\"^how-do-i-teach-cs-unplugged/$\",\n views.HowDoITeachCSUnpluggedView.as_view(),\n name=\"how_do_i_teach_cs_unplugged\"\n ),\n url(\n r\"^contact/$\",\n views.GeneralContactView.as_view(),\n name=\"contact\"\n ),\n url(\n r\"^people/$\",\n views.GeneralPeopleView.as_view(),\n name=\"people\"\n ),\n url(\n r\"^principles/$\",\n views.GeneralPrinciplesView.as_view(),\n name=\"principles\"\n ),\n]\n", "path": "csunplugged/general/urls.py"}, {"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = \"general/index.html\"\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = \"general/about.html\"\n\n\nclass GeneralContactView(TemplateView):\n \"\"\"View for the contact page that renders from a template.\"\"\"\n\n template_name = \"general/contact.html\"\n\n\nclass GeneralPeopleView(TemplateView):\n \"\"\"View for the people page that renders from a template.\"\"\"\n\n template_name = \"general/people.html\"\n\n\nclass GeneralPrinciplesView(TemplateView):\n \"\"\"View for the princples page that renders from a template.\"\"\"\n\n template_name = \"general/principles.html\"\n\n\nclass WhatIsCSView(TemplateView):\n \"\"\"View for the 'What is Computer Science?' page that renders from a template.\"\"\"\n\n template_name = \"general/what-is-computer-science.html\"\n\n\nclass ComputationalThinkingView(TemplateView):\n \"\"\"View for the Computational Thinking page that renders from a template.\"\"\"\n\n template_name = \"general/computational-thinking.html\"\n\n\nclass HowDoITeachCSUnpluggedView(TemplateView):\n \"\"\"View for the 'How do I teach CS Unplugged?' page that renders from a template.\"\"\"\n\n template_name = \"general/how-do-i-teach-cs-unplugged.html\"\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}]} | 1,027 | 437 |
gh_patches_debug_16392 | rasdani/github-patches | git_diff | cal-itp__benefits-864 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve checks in `oauth` `authorize` view
We ran into a case while deploying #862 where our `auth_provider` model is configured with a `scope`, but the response from the Auth server doesn't actually contain the scope (probably still being set up).
The `oauth` `authorize` function doesn't handle this case and raises an `AttributeError`
```
File "/home/calitp/app/benefits/oauth/views.py", line 70, in authorize
claim_flag = (userinfo.get(verifier_claim) if userinfo else "false").lower() == "true"
AttributeError: 'NoneType' object has no attribute 'lower'
```
Let's add a check and tests for this so that the logs for this are more helpful.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/oauth/views.py`
Content:
```
1 import logging
2
3 from django.shortcuts import redirect
4 from django.urls import reverse
5 from django.utils.decorators import decorator_from_middleware
6
7 from benefits.core import session
8 from benefits.core.middleware import VerifierSessionRequired
9 from . import analytics, redirects
10 from .client import oauth
11
12
13 logger = logging.getLogger(__name__)
14
15
16 ROUTE_AUTH = "oauth:authorize"
17 ROUTE_START = "eligibility:start"
18 ROUTE_CONFIRM = "eligibility:confirm"
19 ROUTE_UNVERIFIED = "eligibility:unverified"
20 ROUTE_POST_LOGOUT = "oauth:post_logout"
21
22
23 @decorator_from_middleware(VerifierSessionRequired)
24 def login(request):
25 """View implementing OIDC authorize_redirect."""
26 verifier = session.verifier(request)
27 oauth_client = oauth.create_client(verifier.auth_provider.client_name)
28
29 if not oauth_client:
30 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}")
31
32 route = reverse(ROUTE_AUTH)
33 redirect_uri = redirects.generate_redirect_uri(request, route)
34
35 logger.debug(f"OAuth authorize_redirect with redirect_uri: {redirect_uri}")
36
37 analytics.started_sign_in(request)
38
39 return oauth_client.authorize_redirect(request, redirect_uri)
40
41
42 @decorator_from_middleware(VerifierSessionRequired)
43 def authorize(request):
44 """View implementing OIDC token authorization."""
45 verifier = session.verifier(request)
46 oauth_client = oauth.create_client(verifier.auth_provider.client_name)
47
48 if not oauth_client:
49 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}")
50
51 logger.debug("Attempting to authorize OAuth access token")
52 token = oauth_client.authorize_access_token(request)
53
54 if token is None:
55 logger.warning("Could not authorize OAuth access token")
56 return redirect(ROUTE_START)
57
58 logger.debug("OAuth access token authorized")
59
60 # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.
61 id_token = token["id_token"]
62
63 # We store the returned claim in case it can be used later in eligibility verification.
64 verifier_claim = verifier.auth_provider.claim
65 stored_claim = None
66
67 if verifier_claim:
68 userinfo = token.get("userinfo")
69 # the claim comes back in userinfo like { "claim": "True" | "False" }
70 claim_flag = (userinfo.get(verifier_claim) if userinfo else "false").lower() == "true"
71 # if userinfo contains our claim and the flag is true, store the *claim*
72 stored_claim = verifier_claim if claim_flag else None
73
74 session.update(request, oauth_token=id_token, oauth_claim=stored_claim)
75
76 analytics.finished_sign_in(request)
77
78 return redirect(ROUTE_CONFIRM)
79
80
81 def cancel(request):
82 """View implementing cancellation of OIDC authorization."""
83
84 analytics.canceled_sign_in(request)
85
86 return redirect(ROUTE_UNVERIFIED)
87
88
89 @decorator_from_middleware(VerifierSessionRequired)
90 def logout(request):
91 """View implementing OIDC and application sign out."""
92 verifier = session.verifier(request)
93 oauth_client = oauth.create_client(verifier.auth_provider.client_name)
94
95 if not oauth_client:
96 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}")
97
98 analytics.started_sign_out(request)
99
100 # overwrite the oauth session token, the user is signed out of the app
101 token = session.oauth_token(request)
102 session.logout(request)
103
104 route = reverse(ROUTE_POST_LOGOUT)
105 redirect_uri = redirects.generate_redirect_uri(request, route)
106
107 logger.debug(f"OAuth end_session_endpoint with redirect_uri: {redirect_uri}")
108
109 # send the user through the end_session_endpoint, redirecting back to
110 # the post_logout route
111 return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)
112
113
114 def post_logout(request):
115 """View routes the user to their origin after sign out."""
116
117 analytics.finished_sign_out(request)
118
119 origin = session.origin(request)
120 return redirect(origin)
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py
--- a/benefits/oauth/views.py
+++ b/benefits/oauth/views.py
@@ -66,10 +66,15 @@
if verifier_claim:
userinfo = token.get("userinfo")
- # the claim comes back in userinfo like { "claim": "True" | "False" }
- claim_flag = (userinfo.get(verifier_claim) if userinfo else "false").lower() == "true"
- # if userinfo contains our claim and the flag is true, store the *claim*
- stored_claim = verifier_claim if claim_flag else None
+
+ if userinfo:
+ claim_value = userinfo.get(verifier_claim)
+ # the claim comes back in userinfo like { "claim": "True" | "False" }
+ if claim_value is None:
+ logger.warning(f"userinfo did not contain: {verifier_claim}")
+ elif claim_value.lower() == "true":
+ # if userinfo contains our claim and the flag is true, store the *claim*
+ stored_claim = verifier_claim
session.update(request, oauth_token=id_token, oauth_claim=stored_claim)
| {"golden_diff": "diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py\n--- a/benefits/oauth/views.py\n+++ b/benefits/oauth/views.py\n@@ -66,10 +66,15 @@\n \n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n- # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n- claim_flag = (userinfo.get(verifier_claim) if userinfo else \"false\").lower() == \"true\"\n- # if userinfo contains our claim and the flag is true, store the *claim*\n- stored_claim = verifier_claim if claim_flag else None\n+\n+ if userinfo:\n+ claim_value = userinfo.get(verifier_claim)\n+ # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n+ if claim_value is None:\n+ logger.warning(f\"userinfo did not contain: {verifier_claim}\")\n+ elif claim_value.lower() == \"true\":\n+ # if userinfo contains our claim and the flag is true, store the *claim*\n+ stored_claim = verifier_claim\n \n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n", "issue": "Improve checks in `oauth` `authorize` view\nWe ran into a case while deploying #862 where our `auth_provider` model is configured with a `scope`, but the response from the Auth server doesn't actually contain the scope (probably still being set up).\r\n\r\nThe `oauth` `authorize` function doesn't handle this case and raises an `AttributeError`\r\n```\r\n File \"/home/calitp/app/benefits/oauth/views.py\", line 70, in authorize\r\n claim_flag = (userinfo.get(verifier_claim) if userinfo else \"false\").lower() == \"true\"\r\nAttributeError: 'NoneType' object has no attribute 'lower'\r\n```\r\n\r\nLet's add a check and tests for this so that the logs for this are more helpful.\n", "before_files": [{"content": "import logging\n\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\n\nfrom benefits.core import session\nfrom benefits.core.middleware import VerifierSessionRequired\nfrom . import analytics, redirects\nfrom .client import oauth\n\n\nlogger = logging.getLogger(__name__)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\nROUTE_UNVERIFIED = \"eligibility:unverified\"\nROUTE_POST_LOGOUT = \"oauth:post_logout\"\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef login(request):\n \"\"\"View implementing OIDC authorize_redirect.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n\n analytics.started_sign_in(request)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef authorize(request):\n \"\"\"View implementing OIDC token authorization.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n\n logger.debug(\"OAuth access token authorized\")\n\n # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.\n id_token = token[\"id_token\"]\n\n # We store the returned claim in case it can be used later in eligibility verification.\n verifier_claim = verifier.auth_provider.claim\n stored_claim = None\n\n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n claim_flag = (userinfo.get(verifier_claim) if userinfo else \"false\").lower() == \"true\"\n # if userinfo contains our claim and the flag is true, store the *claim*\n stored_claim = verifier_claim if claim_flag else None\n\n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n\n analytics.finished_sign_in(request)\n\n return redirect(ROUTE_CONFIRM)\n\n\ndef cancel(request):\n \"\"\"View implementing cancellation of OIDC authorization.\"\"\"\n\n analytics.canceled_sign_in(request)\n\n return redirect(ROUTE_UNVERIFIED)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef logout(request):\n \"\"\"View implementing OIDC and application sign out.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n analytics.started_sign_out(request)\n\n # overwrite the oauth session token, the user is signed out of the app\n token = session.oauth_token(request)\n session.logout(request)\n\n route = reverse(ROUTE_POST_LOGOUT)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth end_session_endpoint with redirect_uri: {redirect_uri}\")\n\n # send the user through the end_session_endpoint, redirecting back to\n # the post_logout route\n return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)\n\n\ndef post_logout(request):\n \"\"\"View routes the user to their origin after sign out.\"\"\"\n\n analytics.finished_sign_out(request)\n\n origin = session.origin(request)\n return redirect(origin)\n", "path": "benefits/oauth/views.py"}], "after_files": [{"content": "import logging\n\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\n\nfrom benefits.core import session\nfrom benefits.core.middleware import VerifierSessionRequired\nfrom . import analytics, redirects\nfrom .client import oauth\n\n\nlogger = logging.getLogger(__name__)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\nROUTE_UNVERIFIED = \"eligibility:unverified\"\nROUTE_POST_LOGOUT = \"oauth:post_logout\"\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef login(request):\n \"\"\"View implementing OIDC authorize_redirect.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n\n analytics.started_sign_in(request)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef authorize(request):\n \"\"\"View implementing OIDC token authorization.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n\n logger.debug(\"OAuth access token authorized\")\n\n # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.\n id_token = token[\"id_token\"]\n\n # We store the returned claim in case it can be used later in eligibility verification.\n verifier_claim = verifier.auth_provider.claim\n stored_claim = None\n\n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n\n if userinfo:\n claim_value = userinfo.get(verifier_claim)\n # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n if claim_value is None:\n logger.warning(f\"userinfo did not contain: {verifier_claim}\")\n elif claim_value.lower() == \"true\":\n # if userinfo contains our claim and the flag is true, store the *claim*\n stored_claim = verifier_claim\n\n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n\n analytics.finished_sign_in(request)\n\n return redirect(ROUTE_CONFIRM)\n\n\ndef cancel(request):\n \"\"\"View implementing cancellation of OIDC authorization.\"\"\"\n\n analytics.canceled_sign_in(request)\n\n return redirect(ROUTE_UNVERIFIED)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef logout(request):\n \"\"\"View implementing OIDC and application sign out.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n analytics.started_sign_out(request)\n\n # overwrite the oauth session token, the user is signed out of the app\n token = session.oauth_token(request)\n session.logout(request)\n\n route = reverse(ROUTE_POST_LOGOUT)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth end_session_endpoint with redirect_uri: {redirect_uri}\")\n\n # send the user through the end_session_endpoint, redirecting back to\n # the post_logout route\n return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)\n\n\ndef post_logout(request):\n \"\"\"View routes the user to their origin after sign out.\"\"\"\n\n analytics.finished_sign_out(request)\n\n origin = session.origin(request)\n return redirect(origin)\n", "path": "benefits/oauth/views.py"}]} | 1,522 | 259 |
gh_patches_debug_7394 | rasdani/github-patches | git_diff | aws__aws-cli-688 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Multipart COPY operation fails when source filename contains + character
If you perform a bucket -> bucket cp or sync operation and one of the filenames in the source bucket contains a `+` character, you get this error:
```
ClientError: A client error (NoSuchKey) occurred when calling the UploadPartCopy operation: The specified key does not exist.
```
The problem is that the filename is not being quoted in the `x-amz-copy-source` header. For example, if the source filename is `foo++bar.txt` the header is:
```
x-amz-copy-source:fiebazfoobar/foo++bar.txt
```
but S3 expects it to be:
```
x-amz-copy-source:fiebazfoobar/foo%2B%2Bbar.txt
```
This only affects multipart COPY operations.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/s3/fileinfo.py`
Content:
```
1 import os
2 import sys
3 import time
4 from functools import partial
5 import hashlib
6
7 from dateutil.parser import parse
8 from dateutil.tz import tzlocal
9
10 from botocore.compat import quote
11 from awscli.customizations.s3.utils import find_bucket_key, \
12 check_etag, check_error, operate, uni_print, \
13 guess_content_type, MD5Error
14
15
16 def read_file(filename):
17 """
18 This reads the file into a form that can be sent to S3
19 """
20 with open(filename, 'rb') as in_file:
21 return in_file.read()
22
23
24 def save_file(filename, response_data, last_update):
25 """
26 This writes to the file upon downloading. It reads the data in the
27 response. Makes a new directory if needed and then writes the
28 data to the file. It also modifies the last modified time to that
29 of the S3 object.
30 """
31 body = response_data['Body']
32 etag = response_data['ETag'][1:-1]
33 d = os.path.dirname(filename)
34 try:
35 if not os.path.exists(d):
36 os.makedirs(d)
37 except Exception:
38 pass
39 md5 = hashlib.md5()
40 file_chunks = iter(partial(body.read, 1024 * 1024), b'')
41 with open(filename, 'wb') as out_file:
42 if not _is_multipart_etag(etag):
43 for chunk in file_chunks:
44 md5.update(chunk)
45 out_file.write(chunk)
46 else:
47 for chunk in file_chunks:
48 out_file.write(chunk)
49 if not _is_multipart_etag(etag):
50 if etag != md5.hexdigest():
51 os.remove(filename)
52 raise MD5Error(filename)
53 last_update_tuple = last_update.timetuple()
54 mod_timestamp = time.mktime(last_update_tuple)
55 os.utime(filename, (int(mod_timestamp), int(mod_timestamp)))
56
57
58 def _is_multipart_etag(etag):
59 return '-' in etag
60
61
62 class TaskInfo(object):
63 """
64 This class contains important details related to performing a task. This
65 object is usually only used for creating buckets, removing buckets, and
66 listing objects/buckets. This object contains the attributes and
67 functions needed to perform the task. Note that just instantiating one
68 of these objects will not be enough to run a listing or bucket command.
69 unless ``session`` and ``region`` are specified upon instantiation.
70
71 :param src: the source path
72 :type src: string
73 :param src_type: if the source file is s3 or local.
74 :type src_type: string
75 :param operation: the operation being performed.
76 :type operation: string
77 :param session: ``botocore.session`` object
78 :param region: The region for the endpoint
79
80 Note that a local file will always have its absolute path, and a s3 file
81 will have its path in the form of bucket/key
82 """
83 def __init__(self, src, src_type, operation_name, service, endpoint):
84 self.src = src
85 self.src_type = src_type
86 self.operation_name = operation_name
87 self.service = service
88 self.endpoint = endpoint
89
90 def make_bucket(self):
91 """
92 This opereation makes a bucket.
93 """
94 bucket, key = find_bucket_key(self.src)
95 bucket_config = {'LocationConstraint': self.endpoint.region_name}
96 params = {'endpoint': self.endpoint, 'bucket': bucket}
97 if self.endpoint.region_name != 'us-east-1':
98 params['create_bucket_configuration'] = bucket_config
99 response_data, http = operate(self.service, 'CreateBucket', params)
100
101 def remove_bucket(self):
102 """
103 This operation removes a bucket.
104 """
105 bucket, key = find_bucket_key(self.src)
106 params = {'endpoint': self.endpoint, 'bucket': bucket}
107 response_data, http = operate(self.service, 'DeleteBucket', params)
108
109
110 class FileInfo(TaskInfo):
111 """
112 This is a child object of the ``TaskInfo`` object. It can perform more
113 operations such as ``upload``, ``download``, ``copy``, ``delete``,
114 ``move``. Similiarly to
115 ``TaskInfo`` objects attributes like ``session`` need to be set in order
116 to perform operations.
117
118 :param dest: the destination path
119 :type dest: string
120 :param compare_key: the name of the file relative to the specified
121 directory/prefix. This variable is used when performing synching
122 or if the destination file is adopting the source file's name.
123 :type compare_key: string
124 :param size: The size of the file in bytes.
125 :type size: integer
126 :param last_update: the local time of last modification.
127 :type last_update: datetime object
128 :param dest_type: if the destination is s3 or local.
129 :param dest_type: string
130 :param parameters: a dictionary of important values this is assigned in
131 the ``BasicTask`` object.
132 """
133 def __init__(self, src, dest=None, compare_key=None, size=None,
134 last_update=None, src_type=None, dest_type=None,
135 operation_name=None, service=None, endpoint=None,
136 parameters=None):
137 super(FileInfo, self).__init__(src, src_type=src_type,
138 operation_name=operation_name,
139 service=service,
140 endpoint=endpoint)
141 self.dest = dest
142 self.dest_type = dest_type
143 self.compare_key = compare_key
144 self.size = size
145 self.last_update = last_update
146 # Usually inject ``parameters`` from ``BasicTask`` class.
147 if parameters is not None:
148 self.parameters = parameters
149 else:
150 self.parameters = {'acl': None,
151 'sse': None}
152
153 def _permission_to_param(self, permission):
154 if permission == 'read':
155 return 'grant_read'
156 if permission == 'full':
157 return 'grant_full_control'
158 if permission == 'readacl':
159 return 'grant_read_acp'
160 if permission == 'writeacl':
161 return 'grant_write_acp'
162 raise ValueError('permission must be one of: '
163 'read|readacl|writeacl|full')
164
165 def _handle_object_params(self, params):
166 if self.parameters['acl']:
167 params['acl'] = self.parameters['acl'][0]
168 if self.parameters['grants']:
169 for grant in self.parameters['grants']:
170 try:
171 permission, grantee = grant.split('=', 1)
172 except ValueError:
173 raise ValueError('grants should be of the form '
174 'permission=principal')
175 params[self._permission_to_param(permission)] = grantee
176 if self.parameters['sse']:
177 params['server_side_encryption'] = 'AES256'
178 if self.parameters['storage_class']:
179 params['storage_class'] = self.parameters['storage_class'][0]
180 if self.parameters['website_redirect']:
181 params['website_redirect_location'] = \
182 self.parameters['website_redirect'][0]
183 if self.parameters['guess_mime_type']:
184 self._inject_content_type(params, self.src)
185 if self.parameters['content_type']:
186 params['content_type'] = self.parameters['content_type'][0]
187 if self.parameters['cache_control']:
188 params['cache_control'] = self.parameters['cache_control'][0]
189 if self.parameters['content_disposition']:
190 params['content_disposition'] = \
191 self.parameters['content_disposition'][0]
192 if self.parameters['content_encoding']:
193 params['content_encoding'] = self.parameters['content_encoding'][0]
194 if self.parameters['content_language']:
195 params['content_language'] = self.parameters['content_language'][0]
196 if self.parameters['expires']:
197 params['expires'] = self.parameters['expires'][0]
198
199 def upload(self):
200 """
201 Redirects the file to the multipart upload function if the file is
202 large. If it is small enough, it puts the file as an object in s3.
203 """
204 with open(self.src, 'rb') as body:
205 bucket, key = find_bucket_key(self.dest)
206 params = {
207 'endpoint': self.endpoint,
208 'bucket': bucket,
209 'key': key,
210 'body': body,
211 }
212 self._handle_object_params(params)
213 response_data, http = operate(self.service, 'PutObject', params)
214 etag = response_data['ETag'][1:-1]
215 body.seek(0)
216 check_etag(etag, body)
217
218 def _inject_content_type(self, params, filename):
219 # Add a content type param if we can guess the type.
220 guessed_type = guess_content_type(filename)
221 if guessed_type is not None:
222 params['content_type'] = guessed_type
223
224 def download(self):
225 """
226 Redirects the file to the multipart download function if the file is
227 large. If it is small enough, it gets the file as an object from s3.
228 """
229 bucket, key = find_bucket_key(self.src)
230 params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key}
231 response_data, http = operate(self.service, 'GetObject', params)
232 save_file(self.dest, response_data, self.last_update)
233
234 def copy(self):
235 """
236 Copies a object in s3 to another location in s3.
237 """
238 copy_source = quote(self.src.encode('utf-8'), safe='/~')
239 bucket, key = find_bucket_key(self.dest)
240 params = {'endpoint': self.endpoint, 'bucket': bucket,
241 'copy_source': copy_source, 'key': key}
242 self._handle_object_params(params)
243 response_data, http = operate(self.service, 'CopyObject', params)
244
245 def delete(self):
246 """
247 Deletes the file from s3 or local. The src file and type is used
248 from the file info object.
249 """
250 if (self.src_type == 's3'):
251 bucket, key = find_bucket_key(self.src)
252 params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key}
253 response_data, http = operate(self.service, 'DeleteObject',
254 params)
255 else:
256 os.remove(self.src)
257
258 def move(self):
259 """
260 Implements a move command for s3.
261 """
262 src = self.src_type
263 dest = self.dest_type
264 if src == 'local' and dest == 's3':
265 self.upload()
266 elif src == 's3' and dest == 's3':
267 self.copy()
268 elif src == 's3' and dest == 'local':
269 self.download()
270 else:
271 raise Exception("Invalid path arguments for mv")
272 self.delete()
273
274 def create_multipart_upload(self):
275 bucket, key = find_bucket_key(self.dest)
276 params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key}
277 self._handle_object_params(params)
278 response_data, http = operate(self.service, 'CreateMultipartUpload',
279 params)
280 upload_id = response_data['UploadId']
281 return upload_id
282
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/s3/fileinfo.py b/awscli/customizations/s3/fileinfo.py
--- a/awscli/customizations/s3/fileinfo.py
+++ b/awscli/customizations/s3/fileinfo.py
@@ -235,7 +235,7 @@
"""
Copies a object in s3 to another location in s3.
"""
- copy_source = quote(self.src.encode('utf-8'), safe='/~')
+ copy_source = self.src
bucket, key = find_bucket_key(self.dest)
params = {'endpoint': self.endpoint, 'bucket': bucket,
'copy_source': copy_source, 'key': key}
| {"golden_diff": "diff --git a/awscli/customizations/s3/fileinfo.py b/awscli/customizations/s3/fileinfo.py\n--- a/awscli/customizations/s3/fileinfo.py\n+++ b/awscli/customizations/s3/fileinfo.py\n@@ -235,7 +235,7 @@\n \"\"\"\n Copies a object in s3 to another location in s3.\n \"\"\"\n- copy_source = quote(self.src.encode('utf-8'), safe='/~')\n+ copy_source = self.src\n bucket, key = find_bucket_key(self.dest)\n params = {'endpoint': self.endpoint, 'bucket': bucket,\n 'copy_source': copy_source, 'key': key}\n", "issue": "Multipart COPY operation fails when source filename contains + character\nIf you perform a bucket -> bucket cp or sync operation and one of the filenames in the source bucket contains a `+` character, you get this error:\n\n```\nClientError: A client error (NoSuchKey) occurred when calling the UploadPartCopy operation: The specified key does not exist.\n```\n\nThe problem is that the filename is not being quoted in the `x-amz-copy-source` header. For example, if the source filename is `foo++bar.txt` the header is:\n\n```\nx-amz-copy-source:fiebazfoobar/foo++bar.txt\n```\n\nbut S3 expects it to be:\n\n```\nx-amz-copy-source:fiebazfoobar/foo%2B%2Bbar.txt\n```\n\nThis only affects multipart COPY operations.\n\n", "before_files": [{"content": "import os\nimport sys\nimport time\nfrom functools import partial\nimport hashlib\n\nfrom dateutil.parser import parse\nfrom dateutil.tz import tzlocal\n\nfrom botocore.compat import quote\nfrom awscli.customizations.s3.utils import find_bucket_key, \\\n check_etag, check_error, operate, uni_print, \\\n guess_content_type, MD5Error\n\n\ndef read_file(filename):\n \"\"\"\n This reads the file into a form that can be sent to S3\n \"\"\"\n with open(filename, 'rb') as in_file:\n return in_file.read()\n\n\ndef save_file(filename, response_data, last_update):\n \"\"\"\n This writes to the file upon downloading. It reads the data in the\n response. Makes a new directory if needed and then writes the\n data to the file. It also modifies the last modified time to that\n of the S3 object.\n \"\"\"\n body = response_data['Body']\n etag = response_data['ETag'][1:-1]\n d = os.path.dirname(filename)\n try:\n if not os.path.exists(d):\n os.makedirs(d)\n except Exception:\n pass\n md5 = hashlib.md5()\n file_chunks = iter(partial(body.read, 1024 * 1024), b'')\n with open(filename, 'wb') as out_file:\n if not _is_multipart_etag(etag):\n for chunk in file_chunks:\n md5.update(chunk)\n out_file.write(chunk)\n else:\n for chunk in file_chunks:\n out_file.write(chunk)\n if not _is_multipart_etag(etag):\n if etag != md5.hexdigest():\n os.remove(filename)\n raise MD5Error(filename)\n last_update_tuple = last_update.timetuple()\n mod_timestamp = time.mktime(last_update_tuple)\n os.utime(filename, (int(mod_timestamp), int(mod_timestamp)))\n\n\ndef _is_multipart_etag(etag):\n return '-' in etag\n\n\nclass TaskInfo(object):\n \"\"\"\n This class contains important details related to performing a task. This\n object is usually only used for creating buckets, removing buckets, and\n listing objects/buckets. This object contains the attributes and\n functions needed to perform the task. Note that just instantiating one\n of these objects will not be enough to run a listing or bucket command.\n unless ``session`` and ``region`` are specified upon instantiation.\n\n :param src: the source path\n :type src: string\n :param src_type: if the source file is s3 or local.\n :type src_type: string\n :param operation: the operation being performed.\n :type operation: string\n :param session: ``botocore.session`` object\n :param region: The region for the endpoint\n\n Note that a local file will always have its absolute path, and a s3 file\n will have its path in the form of bucket/key\n \"\"\"\n def __init__(self, src, src_type, operation_name, service, endpoint):\n self.src = src\n self.src_type = src_type\n self.operation_name = operation_name\n self.service = service\n self.endpoint = endpoint\n\n def make_bucket(self):\n \"\"\"\n This opereation makes a bucket.\n \"\"\"\n bucket, key = find_bucket_key(self.src)\n bucket_config = {'LocationConstraint': self.endpoint.region_name}\n params = {'endpoint': self.endpoint, 'bucket': bucket}\n if self.endpoint.region_name != 'us-east-1':\n params['create_bucket_configuration'] = bucket_config\n response_data, http = operate(self.service, 'CreateBucket', params)\n\n def remove_bucket(self):\n \"\"\"\n This operation removes a bucket.\n \"\"\"\n bucket, key = find_bucket_key(self.src)\n params = {'endpoint': self.endpoint, 'bucket': bucket}\n response_data, http = operate(self.service, 'DeleteBucket', params)\n\n\nclass FileInfo(TaskInfo):\n \"\"\"\n This is a child object of the ``TaskInfo`` object. It can perform more\n operations such as ``upload``, ``download``, ``copy``, ``delete``,\n ``move``. Similiarly to\n ``TaskInfo`` objects attributes like ``session`` need to be set in order\n to perform operations.\n\n :param dest: the destination path\n :type dest: string\n :param compare_key: the name of the file relative to the specified\n directory/prefix. This variable is used when performing synching\n or if the destination file is adopting the source file's name.\n :type compare_key: string\n :param size: The size of the file in bytes.\n :type size: integer\n :param last_update: the local time of last modification.\n :type last_update: datetime object\n :param dest_type: if the destination is s3 or local.\n :param dest_type: string\n :param parameters: a dictionary of important values this is assigned in\n the ``BasicTask`` object.\n \"\"\"\n def __init__(self, src, dest=None, compare_key=None, size=None,\n last_update=None, src_type=None, dest_type=None,\n operation_name=None, service=None, endpoint=None,\n parameters=None):\n super(FileInfo, self).__init__(src, src_type=src_type,\n operation_name=operation_name,\n service=service,\n endpoint=endpoint)\n self.dest = dest\n self.dest_type = dest_type\n self.compare_key = compare_key\n self.size = size\n self.last_update = last_update\n # Usually inject ``parameters`` from ``BasicTask`` class.\n if parameters is not None:\n self.parameters = parameters\n else:\n self.parameters = {'acl': None,\n 'sse': None}\n\n def _permission_to_param(self, permission):\n if permission == 'read':\n return 'grant_read'\n if permission == 'full':\n return 'grant_full_control'\n if permission == 'readacl':\n return 'grant_read_acp'\n if permission == 'writeacl':\n return 'grant_write_acp'\n raise ValueError('permission must be one of: '\n 'read|readacl|writeacl|full')\n\n def _handle_object_params(self, params):\n if self.parameters['acl']:\n params['acl'] = self.parameters['acl'][0]\n if self.parameters['grants']:\n for grant in self.parameters['grants']:\n try:\n permission, grantee = grant.split('=', 1)\n except ValueError:\n raise ValueError('grants should be of the form '\n 'permission=principal')\n params[self._permission_to_param(permission)] = grantee\n if self.parameters['sse']:\n params['server_side_encryption'] = 'AES256'\n if self.parameters['storage_class']:\n params['storage_class'] = self.parameters['storage_class'][0]\n if self.parameters['website_redirect']:\n params['website_redirect_location'] = \\\n self.parameters['website_redirect'][0]\n if self.parameters['guess_mime_type']:\n self._inject_content_type(params, self.src)\n if self.parameters['content_type']:\n params['content_type'] = self.parameters['content_type'][0]\n if self.parameters['cache_control']:\n params['cache_control'] = self.parameters['cache_control'][0]\n if self.parameters['content_disposition']:\n params['content_disposition'] = \\\n self.parameters['content_disposition'][0]\n if self.parameters['content_encoding']:\n params['content_encoding'] = self.parameters['content_encoding'][0]\n if self.parameters['content_language']:\n params['content_language'] = self.parameters['content_language'][0]\n if self.parameters['expires']:\n params['expires'] = self.parameters['expires'][0]\n\n def upload(self):\n \"\"\"\n Redirects the file to the multipart upload function if the file is\n large. If it is small enough, it puts the file as an object in s3.\n \"\"\"\n with open(self.src, 'rb') as body:\n bucket, key = find_bucket_key(self.dest)\n params = {\n 'endpoint': self.endpoint,\n 'bucket': bucket,\n 'key': key,\n 'body': body,\n }\n self._handle_object_params(params)\n response_data, http = operate(self.service, 'PutObject', params)\n etag = response_data['ETag'][1:-1]\n body.seek(0)\n check_etag(etag, body)\n\n def _inject_content_type(self, params, filename):\n # Add a content type param if we can guess the type.\n guessed_type = guess_content_type(filename)\n if guessed_type is not None:\n params['content_type'] = guessed_type\n\n def download(self):\n \"\"\"\n Redirects the file to the multipart download function if the file is\n large. If it is small enough, it gets the file as an object from s3.\n \"\"\"\n bucket, key = find_bucket_key(self.src)\n params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key}\n response_data, http = operate(self.service, 'GetObject', params)\n save_file(self.dest, response_data, self.last_update)\n\n def copy(self):\n \"\"\"\n Copies a object in s3 to another location in s3.\n \"\"\"\n copy_source = quote(self.src.encode('utf-8'), safe='/~')\n bucket, key = find_bucket_key(self.dest)\n params = {'endpoint': self.endpoint, 'bucket': bucket,\n 'copy_source': copy_source, 'key': key}\n self._handle_object_params(params)\n response_data, http = operate(self.service, 'CopyObject', params)\n\n def delete(self):\n \"\"\"\n Deletes the file from s3 or local. The src file and type is used\n from the file info object.\n \"\"\"\n if (self.src_type == 's3'):\n bucket, key = find_bucket_key(self.src)\n params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key}\n response_data, http = operate(self.service, 'DeleteObject',\n params)\n else:\n os.remove(self.src)\n\n def move(self):\n \"\"\"\n Implements a move command for s3.\n \"\"\"\n src = self.src_type\n dest = self.dest_type\n if src == 'local' and dest == 's3':\n self.upload()\n elif src == 's3' and dest == 's3':\n self.copy()\n elif src == 's3' and dest == 'local':\n self.download()\n else:\n raise Exception(\"Invalid path arguments for mv\")\n self.delete()\n\n def create_multipart_upload(self):\n bucket, key = find_bucket_key(self.dest)\n params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key}\n self._handle_object_params(params)\n response_data, http = operate(self.service, 'CreateMultipartUpload',\n params)\n upload_id = response_data['UploadId']\n return upload_id\n", "path": "awscli/customizations/s3/fileinfo.py"}], "after_files": [{"content": "import os\nimport sys\nimport time\nfrom functools import partial\nimport hashlib\n\nfrom dateutil.parser import parse\nfrom dateutil.tz import tzlocal\n\nfrom botocore.compat import quote\nfrom awscli.customizations.s3.utils import find_bucket_key, \\\n check_etag, check_error, operate, uni_print, \\\n guess_content_type, MD5Error\n\n\ndef read_file(filename):\n \"\"\"\n This reads the file into a form that can be sent to S3\n \"\"\"\n with open(filename, 'rb') as in_file:\n return in_file.read()\n\n\ndef save_file(filename, response_data, last_update):\n \"\"\"\n This writes to the file upon downloading. It reads the data in the\n response. Makes a new directory if needed and then writes the\n data to the file. It also modifies the last modified time to that\n of the S3 object.\n \"\"\"\n body = response_data['Body']\n etag = response_data['ETag'][1:-1]\n d = os.path.dirname(filename)\n try:\n if not os.path.exists(d):\n os.makedirs(d)\n except Exception:\n pass\n md5 = hashlib.md5()\n file_chunks = iter(partial(body.read, 1024 * 1024), b'')\n with open(filename, 'wb') as out_file:\n if not _is_multipart_etag(etag):\n for chunk in file_chunks:\n md5.update(chunk)\n out_file.write(chunk)\n else:\n for chunk in file_chunks:\n out_file.write(chunk)\n if not _is_multipart_etag(etag):\n if etag != md5.hexdigest():\n os.remove(filename)\n raise MD5Error(filename)\n last_update_tuple = last_update.timetuple()\n mod_timestamp = time.mktime(last_update_tuple)\n os.utime(filename, (int(mod_timestamp), int(mod_timestamp)))\n\n\ndef _is_multipart_etag(etag):\n return '-' in etag\n\n\nclass TaskInfo(object):\n \"\"\"\n This class contains important details related to performing a task. This\n object is usually only used for creating buckets, removing buckets, and\n listing objects/buckets. This object contains the attributes and\n functions needed to perform the task. Note that just instantiating one\n of these objects will not be enough to run a listing or bucket command.\n unless ``session`` and ``region`` are specified upon instantiation.\n\n :param src: the source path\n :type src: string\n :param src_type: if the source file is s3 or local.\n :type src_type: string\n :param operation: the operation being performed.\n :type operation: string\n :param session: ``botocore.session`` object\n :param region: The region for the endpoint\n\n Note that a local file will always have its absolute path, and a s3 file\n will have its path in the form of bucket/key\n \"\"\"\n def __init__(self, src, src_type, operation_name, service, endpoint):\n self.src = src\n self.src_type = src_type\n self.operation_name = operation_name\n self.service = service\n self.endpoint = endpoint\n\n def make_bucket(self):\n \"\"\"\n This opereation makes a bucket.\n \"\"\"\n bucket, key = find_bucket_key(self.src)\n bucket_config = {'LocationConstraint': self.endpoint.region_name}\n params = {'endpoint': self.endpoint, 'bucket': bucket}\n if self.endpoint.region_name != 'us-east-1':\n params['create_bucket_configuration'] = bucket_config\n response_data, http = operate(self.service, 'CreateBucket', params)\n\n def remove_bucket(self):\n \"\"\"\n This operation removes a bucket.\n \"\"\"\n bucket, key = find_bucket_key(self.src)\n params = {'endpoint': self.endpoint, 'bucket': bucket}\n response_data, http = operate(self.service, 'DeleteBucket', params)\n\n\nclass FileInfo(TaskInfo):\n \"\"\"\n This is a child object of the ``TaskInfo`` object. It can perform more\n operations such as ``upload``, ``download``, ``copy``, ``delete``,\n ``move``. Similiarly to\n ``TaskInfo`` objects attributes like ``session`` need to be set in order\n to perform operations.\n\n :param dest: the destination path\n :type dest: string\n :param compare_key: the name of the file relative to the specified\n directory/prefix. This variable is used when performing synching\n or if the destination file is adopting the source file's name.\n :type compare_key: string\n :param size: The size of the file in bytes.\n :type size: integer\n :param last_update: the local time of last modification.\n :type last_update: datetime object\n :param dest_type: if the destination is s3 or local.\n :param dest_type: string\n :param parameters: a dictionary of important values this is assigned in\n the ``BasicTask`` object.\n \"\"\"\n def __init__(self, src, dest=None, compare_key=None, size=None,\n last_update=None, src_type=None, dest_type=None,\n operation_name=None, service=None, endpoint=None,\n parameters=None):\n super(FileInfo, self).__init__(src, src_type=src_type,\n operation_name=operation_name,\n service=service,\n endpoint=endpoint)\n self.dest = dest\n self.dest_type = dest_type\n self.compare_key = compare_key\n self.size = size\n self.last_update = last_update\n # Usually inject ``parameters`` from ``BasicTask`` class.\n if parameters is not None:\n self.parameters = parameters\n else:\n self.parameters = {'acl': None,\n 'sse': None}\n\n def _permission_to_param(self, permission):\n if permission == 'read':\n return 'grant_read'\n if permission == 'full':\n return 'grant_full_control'\n if permission == 'readacl':\n return 'grant_read_acp'\n if permission == 'writeacl':\n return 'grant_write_acp'\n raise ValueError('permission must be one of: '\n 'read|readacl|writeacl|full')\n\n def _handle_object_params(self, params):\n if self.parameters['acl']:\n params['acl'] = self.parameters['acl'][0]\n if self.parameters['grants']:\n for grant in self.parameters['grants']:\n try:\n permission, grantee = grant.split('=', 1)\n except ValueError:\n raise ValueError('grants should be of the form '\n 'permission=principal')\n params[self._permission_to_param(permission)] = grantee\n if self.parameters['sse']:\n params['server_side_encryption'] = 'AES256'\n if self.parameters['storage_class']:\n params['storage_class'] = self.parameters['storage_class'][0]\n if self.parameters['website_redirect']:\n params['website_redirect_location'] = \\\n self.parameters['website_redirect'][0]\n if self.parameters['guess_mime_type']:\n self._inject_content_type(params, self.src)\n if self.parameters['content_type']:\n params['content_type'] = self.parameters['content_type'][0]\n if self.parameters['cache_control']:\n params['cache_control'] = self.parameters['cache_control'][0]\n if self.parameters['content_disposition']:\n params['content_disposition'] = \\\n self.parameters['content_disposition'][0]\n if self.parameters['content_encoding']:\n params['content_encoding'] = self.parameters['content_encoding'][0]\n if self.parameters['content_language']:\n params['content_language'] = self.parameters['content_language'][0]\n if self.parameters['expires']:\n params['expires'] = self.parameters['expires'][0]\n\n def upload(self):\n \"\"\"\n Redirects the file to the multipart upload function if the file is\n large. If it is small enough, it puts the file as an object in s3.\n \"\"\"\n with open(self.src, 'rb') as body:\n bucket, key = find_bucket_key(self.dest)\n params = {\n 'endpoint': self.endpoint,\n 'bucket': bucket,\n 'key': key,\n 'body': body,\n }\n self._handle_object_params(params)\n response_data, http = operate(self.service, 'PutObject', params)\n etag = response_data['ETag'][1:-1]\n body.seek(0)\n check_etag(etag, body)\n\n def _inject_content_type(self, params, filename):\n # Add a content type param if we can guess the type.\n guessed_type = guess_content_type(filename)\n if guessed_type is not None:\n params['content_type'] = guessed_type\n\n def download(self):\n \"\"\"\n Redirects the file to the multipart download function if the file is\n large. If it is small enough, it gets the file as an object from s3.\n \"\"\"\n bucket, key = find_bucket_key(self.src)\n params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key}\n response_data, http = operate(self.service, 'GetObject', params)\n save_file(self.dest, response_data, self.last_update)\n\n def copy(self):\n \"\"\"\n Copies a object in s3 to another location in s3.\n \"\"\"\n copy_source = self.src\n bucket, key = find_bucket_key(self.dest)\n params = {'endpoint': self.endpoint, 'bucket': bucket,\n 'copy_source': copy_source, 'key': key}\n self._handle_object_params(params)\n response_data, http = operate(self.service, 'CopyObject', params)\n\n def delete(self):\n \"\"\"\n Deletes the file from s3 or local. The src file and type is used\n from the file info object.\n \"\"\"\n if (self.src_type == 's3'):\n bucket, key = find_bucket_key(self.src)\n params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key}\n response_data, http = operate(self.service, 'DeleteObject',\n params)\n else:\n os.remove(self.src)\n\n def move(self):\n \"\"\"\n Implements a move command for s3.\n \"\"\"\n src = self.src_type\n dest = self.dest_type\n if src == 'local' and dest == 's3':\n self.upload()\n elif src == 's3' and dest == 's3':\n self.copy()\n elif src == 's3' and dest == 'local':\n self.download()\n else:\n raise Exception(\"Invalid path arguments for mv\")\n self.delete()\n\n def create_multipart_upload(self):\n bucket, key = find_bucket_key(self.dest)\n params = {'endpoint': self.endpoint, 'bucket': bucket, 'key': key}\n self._handle_object_params(params)\n response_data, http = operate(self.service, 'CreateMultipartUpload',\n params)\n upload_id = response_data['UploadId']\n return upload_id\n", "path": "awscli/customizations/s3/fileinfo.py"}]} | 3,558 | 144 |
gh_patches_debug_42785 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-1299 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug]
# 🐛 Bug
Batched fixed noise GPs fail when the preconditioning threshold is reached. The concatenation in this line fails with a shape error.
https://github.com/cornellius-gp/gpytorch/blob/master/gpytorch/lazy/added_diag_lazy_tensor.py#L126
## To reproduce
** Code snippet to reproduce **
```python
import torch
import gpytorch
class BatchFixedNoiseGP(gpytorch.models.GP):
def __init__(self, init_x, init_y, noise, batch_shape):
super().__init__()
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.RBFKernel(batch_shape=batch_shape)
self.likelihood = gpytorch.likelihoods.FixedNoiseGaussianLikelihood(noise)
def forward(self, inputs):
mean = self.mean_module(inputs)
covar = self.covar_module(inputs)
return gpytorch.distributions.MultivariateNormal(mean, covar)
batch_shape = [2]
train_x = torch.randn(*batch_shape, 101, 3)
train_y = torch.randn(*batch_shape, 101)
train_noise = torch.rand(*batch_shape, 101)
gp = BatchFixedNoiseGP(train_x, train_y, train_noise, batch_shape)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(gp.likelihood, gp)
with gpytorch.settings.max_cholesky_size(100), gpytorch.settings.min_preconditioning_size(100):
train_dist = gp(train_x)
loss = -mll(train_dist, train_y).sum()
```
** Stack trace/error message **
```
RuntimeError Traceback (most recent call last)
<ipython-input-4-9e151e2de37a> in <module>
24 with gpytorch.settings.max_cholesky_size(100), gpytorch.settings.min_preconditioning_size(100):
25 train_dist = gp(train_x)
---> 26 loss = -mll(train_dist, train_y).sum()
~/Code/gpytorch/gpytorch/module.py in __call__(self, *inputs, **kwargs)
26
27 def __call__(self, *inputs, **kwargs):
---> 28 outputs = self.forward(*inputs, **kwargs)
29 if isinstance(outputs, list):
30 return [_validate_module_outputs(output) for output in outputs]
~/Code/gpytorch/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, function_dist, target, *params)
49 # Get the log prob of the marginal distribution
50 output = self.likelihood(function_dist, *params)
---> 51 res = output.log_prob(target)
52
53 # Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)
~/Code/gpytorch/gpytorch/distributions/multivariate_normal.py in log_prob(self, value)
138
139 # Get log determininat and first part of quadratic form
--> 140 inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)
141
142 res = -0.5 * sum([inv_quad, logdet, diff.size(-1) * math.log(2 * math.pi)])
~/Code/gpytorch/gpytorch/lazy/lazy_tensor.py in inv_quad_logdet(self, inv_quad_rhs, logdet, reduce_inv_quad)
1069 probe_vectors,
1070 probe_vector_norms,
-> 1071 *args,
1072 )
1073
~/Code/gpytorch/gpytorch/functions/_inv_quad_log_det.py in forward(ctx, representation_tree, dtype, device, matrix_shape, batch_shape, inv_quad, logdet, probe_vectors, probe_vector_norms, *args)
65 lazy_tsr = ctx.representation_tree(*matrix_args)
66 with torch.no_grad():
---> 67 preconditioner, precond_lt, logdet_correction = lazy_tsr._preconditioner()
68
69 ctx.preconditioner = preconditioner
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _preconditioner(self)
84 )
85 return None, None, None
---> 86 self._init_cache()
87
88 # NOTE: We cannot memoize this precondition closure as it causes a memory leak
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache(self)
107 self._init_cache_for_constant_diag(eye, batch_shape, n, k)
108 else:
--> 109 self._init_cache_for_non_constant_diag(eye, batch_shape, n)
110
111 self._precond_lt = PsdSumLazyTensor(RootLazyTensor(self._piv_chol_self), self._diag_tensor)
~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache_for_non_constant_diag(self, eye, batch_shape, n)
125 # With non-constant diagonals, we cant factor out the noise as easily
126 # eye = eye.expand(*batch_shape, -1, -1)
--> 127 self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye)))
128 self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()
129
RuntimeError: Tensors must have same number of dimensions: got 3 and 2
```
## Expected Behavior
Everything works fine until the preconditioning threshold is reached. Obviously one would hope that it would continue to work.
## System information
**Please complete the following information:**
GPyTorch Version: 1.2.0
PyTorch Version: '1.6.0.dev20200522'
OS: Ubuntu 16.04 LTS
## Additional context
This appears to fix the problem
```
def _init_cache_for_non_constant_diag(self, eye, batch_shape, n):
# With non-constant diagonals, we cant factor out the noise as easily
eye = eye.expand(*batch_shape, -1, -1)
self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye), dim=-2))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpytorch/lazy/added_diag_lazy_tensor.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import warnings
4 from typing import Optional, Tuple
5
6 import torch
7 from torch import Tensor
8
9 from .. import settings
10 from ..utils import broadcasting, pivoted_cholesky
11 from ..utils.memoize import cached
12 from ..utils.warnings import NumericalWarning
13 from .diag_lazy_tensor import ConstantDiagLazyTensor, DiagLazyTensor
14 from .lazy_tensor import LazyTensor
15 from .psd_sum_lazy_tensor import PsdSumLazyTensor
16 from .root_lazy_tensor import RootLazyTensor
17 from .sum_lazy_tensor import SumLazyTensor
18
19
20 class AddedDiagLazyTensor(SumLazyTensor):
21 """
22 A SumLazyTensor, but of only two lazy tensors, the second of which must be
23 a DiagLazyTensor.
24 """
25
26 def __init__(self, *lazy_tensors, preconditioner_override=None):
27 lazy_tensors = list(lazy_tensors)
28 super(AddedDiagLazyTensor, self).__init__(*lazy_tensors, preconditioner_override=preconditioner_override)
29 if len(lazy_tensors) > 2:
30 raise RuntimeError("An AddedDiagLazyTensor can only have two components")
31
32 broadcasting._mul_broadcast_shape(lazy_tensors[0].shape, lazy_tensors[1].shape)
33
34 if isinstance(lazy_tensors[0], DiagLazyTensor) and isinstance(lazy_tensors[1], DiagLazyTensor):
35 raise RuntimeError("Trying to lazily add two DiagLazyTensors. Create a single DiagLazyTensor instead.")
36 elif isinstance(lazy_tensors[0], DiagLazyTensor):
37 self._diag_tensor = lazy_tensors[0]
38 self._lazy_tensor = lazy_tensors[1]
39 elif isinstance(lazy_tensors[1], DiagLazyTensor):
40 self._diag_tensor = lazy_tensors[1]
41 self._lazy_tensor = lazy_tensors[0]
42 else:
43 raise RuntimeError("One of the LazyTensors input to AddedDiagLazyTensor must be a DiagLazyTensor!")
44
45 self.preconditioner_override = preconditioner_override
46
47 # Placeholders
48 self._constant_diag = None
49 self._noise = None
50 self._piv_chol_self = None # <- Doesn't need to be an attribute, but used for testing purposes
51 self._precond_lt = None
52 self._precond_logdet_cache = None
53 self._q_cache = None
54 self._r_cache = None
55
56 def _matmul(self, rhs):
57 return torch.addcmul(self._lazy_tensor._matmul(rhs), self._diag_tensor._diag.unsqueeze(-1), rhs)
58
59 def add_diag(self, added_diag):
60 return AddedDiagLazyTensor(self._lazy_tensor, self._diag_tensor.add_diag(added_diag))
61
62 def __add__(self, other):
63 from .diag_lazy_tensor import DiagLazyTensor
64
65 if isinstance(other, DiagLazyTensor):
66 return AddedDiagLazyTensor(self._lazy_tensor, self._diag_tensor + other)
67 else:
68 return AddedDiagLazyTensor(self._lazy_tensor + other, self._diag_tensor)
69
70 def _preconditioner(self):
71 if self.preconditioner_override is not None:
72 return self.preconditioner_override(self)
73
74 if settings.max_preconditioner_size.value() == 0 or self.size(-1) < settings.min_preconditioning_size.value():
75 return None, None, None
76
77 if self._q_cache is None:
78 max_iter = settings.max_preconditioner_size.value()
79 self._piv_chol_self = pivoted_cholesky.pivoted_cholesky(self._lazy_tensor, max_iter)
80 if torch.any(torch.isnan(self._piv_chol_self)).item():
81 warnings.warn(
82 "NaNs encountered in preconditioner computation. Attempting to continue without preconditioning.",
83 NumericalWarning,
84 )
85 return None, None, None
86 self._init_cache()
87
88 # NOTE: We cannot memoize this precondition closure as it causes a memory leak
89 def precondition_closure(tensor):
90 qqt = self._q_cache.matmul(self._q_cache.transpose(-2, -1).matmul(tensor))
91 if self._constant_diag:
92 return (1 / self._noise) * (tensor - qqt)
93 return (tensor / self._noise) - qqt
94
95 return (precondition_closure, self._precond_lt, self._precond_logdet_cache)
96
97 def _init_cache(self):
98 *batch_shape, n, k = self._piv_chol_self.shape
99 self._noise = self._diag_tensor.diag().unsqueeze(-1)
100
101 # the check for constant diag needs to be done carefully for batches.
102 noise_first_element = self._noise[..., :1, :]
103 self._constant_diag = torch.equal(self._noise, noise_first_element * torch.ones_like(self._noise))
104 eye = torch.eye(k, dtype=self._piv_chol_self.dtype, device=self._piv_chol_self.device)
105
106 if self._constant_diag:
107 self._init_cache_for_constant_diag(eye, batch_shape, n, k)
108 else:
109 self._init_cache_for_non_constant_diag(eye, batch_shape, n)
110
111 self._precond_lt = PsdSumLazyTensor(RootLazyTensor(self._piv_chol_self), self._diag_tensor)
112
113 def _init_cache_for_constant_diag(self, eye, batch_shape, n, k):
114 # We can factor out the noise for for both QR and solves.
115 self._noise = self._noise.narrow(-2, 0, 1)
116 self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self, self._noise.sqrt() * eye), dim=-2))
117 self._q_cache = self._q_cache[..., :n, :]
118
119 # Use the matrix determinant lemma for the logdet, using the fact that R'R = L_k'L_k + s*I
120 logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)
121 logdet = logdet + (n - k) * self._noise.squeeze(-2).squeeze(-1).log()
122 self._precond_logdet_cache = logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze()
123
124 def _init_cache_for_non_constant_diag(self, eye, batch_shape, n):
125 # With non-constant diagonals, we cant factor out the noise as easily
126 self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye)))
127 self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()
128
129 logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)
130 logdet -= (1.0 / self._noise).log().sum([-1, -2])
131 self._precond_logdet_cache = logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze()
132
133 @cached(name="svd")
134 def _svd(self) -> Tuple["LazyTensor", Tensor, "LazyTensor"]:
135 if isinstance(self._diag_tensor, ConstantDiagLazyTensor):
136 U, S_, V = self._lazy_tensor.svd()
137 S = S_ + self._diag_tensor.diag()
138 return U, S, V
139 return super()._svd()
140
141 def _symeig(self, eigenvectors: bool = False) -> Tuple[Tensor, Optional[LazyTensor]]:
142 if isinstance(self._diag_tensor, ConstantDiagLazyTensor):
143 evals_, evecs = self._lazy_tensor.symeig(eigenvectors=eigenvectors)
144 evals = evals_ + self._diag_tensor.diag()
145 return evals, evecs
146 return super()._symeig(eigenvectors=eigenvectors)
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gpytorch/lazy/added_diag_lazy_tensor.py b/gpytorch/lazy/added_diag_lazy_tensor.py
--- a/gpytorch/lazy/added_diag_lazy_tensor.py
+++ b/gpytorch/lazy/added_diag_lazy_tensor.py
@@ -68,12 +68,33 @@
return AddedDiagLazyTensor(self._lazy_tensor + other, self._diag_tensor)
def _preconditioner(self):
+ r"""
+ Here we use a partial pivoted Cholesky preconditioner:
+
+ K \approx L L^T + D
+
+ where L L^T is a low rank approximation, and D is a diagonal.
+ We can compute the preconditioner's inverse using Woodbury
+
+ (L L^T + D)^{-1} = D^{-1} - D^{-1} L (I + L D^{-1} L^T)^{-1} L^T D^{-1}
+
+ This function returns:
+ - A function `precondition_closure` that computes the solve (L L^T + D)^{-1} x
+ - A LazyTensor `precondition_lt` that represents (L L^T + D)
+ - The log determinant of (L L^T + D)
+ """
+
if self.preconditioner_override is not None:
return self.preconditioner_override(self)
if settings.max_preconditioner_size.value() == 0 or self.size(-1) < settings.min_preconditioning_size.value():
return None, None, None
+ # Cache a QR decomposition [Q; Q'] R = [D^{-1/2}; L]
+ # This makes it fast to compute solves and log determinants with it
+ #
+ # Through woodbury, (L L^T + D)^{-1} reduces down to (D^{-1} - D^{-1/2} Q Q^T D^{-1/2})
+ # Through matrix determinant lemma, log |L L^T + D| reduces down to 2 log |R|
if self._q_cache is None:
max_iter = settings.max_preconditioner_size.value()
self._piv_chol_self = pivoted_cholesky.pivoted_cholesky(self._lazy_tensor, max_iter)
@@ -87,6 +108,7 @@
# NOTE: We cannot memoize this precondition closure as it causes a memory leak
def precondition_closure(tensor):
+ # This makes it fast to compute solves with it
qqt = self._q_cache.matmul(self._q_cache.transpose(-2, -1).matmul(tensor))
if self._constant_diag:
return (1 / self._noise) * (tensor - qqt)
@@ -102,6 +124,7 @@
noise_first_element = self._noise[..., :1, :]
self._constant_diag = torch.equal(self._noise, noise_first_element * torch.ones_like(self._noise))
eye = torch.eye(k, dtype=self._piv_chol_self.dtype, device=self._piv_chol_self.device)
+ eye = eye.expand(*batch_shape, k, k)
if self._constant_diag:
self._init_cache_for_constant_diag(eye, batch_shape, n, k)
@@ -123,9 +146,10 @@
def _init_cache_for_non_constant_diag(self, eye, batch_shape, n):
# With non-constant diagonals, we cant factor out the noise as easily
- self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye)))
+ self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye), dim=-2))
self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()
+ # Use the matrix determinant lemma for the logdet, using the fact that R'R = L_k'L_k + s*I
logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)
logdet -= (1.0 / self._noise).log().sum([-1, -2])
self._precond_logdet_cache = logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze()
| {"golden_diff": "diff --git a/gpytorch/lazy/added_diag_lazy_tensor.py b/gpytorch/lazy/added_diag_lazy_tensor.py\n--- a/gpytorch/lazy/added_diag_lazy_tensor.py\n+++ b/gpytorch/lazy/added_diag_lazy_tensor.py\n@@ -68,12 +68,33 @@\n return AddedDiagLazyTensor(self._lazy_tensor + other, self._diag_tensor)\n \n def _preconditioner(self):\n+ r\"\"\"\n+ Here we use a partial pivoted Cholesky preconditioner:\n+\n+ K \\approx L L^T + D\n+\n+ where L L^T is a low rank approximation, and D is a diagonal.\n+ We can compute the preconditioner's inverse using Woodbury\n+\n+ (L L^T + D)^{-1} = D^{-1} - D^{-1} L (I + L D^{-1} L^T)^{-1} L^T D^{-1}\n+\n+ This function returns:\n+ - A function `precondition_closure` that computes the solve (L L^T + D)^{-1} x\n+ - A LazyTensor `precondition_lt` that represents (L L^T + D)\n+ - The log determinant of (L L^T + D)\n+ \"\"\"\n+\n if self.preconditioner_override is not None:\n return self.preconditioner_override(self)\n \n if settings.max_preconditioner_size.value() == 0 or self.size(-1) < settings.min_preconditioning_size.value():\n return None, None, None\n \n+ # Cache a QR decomposition [Q; Q'] R = [D^{-1/2}; L]\n+ # This makes it fast to compute solves and log determinants with it\n+ #\n+ # Through woodbury, (L L^T + D)^{-1} reduces down to (D^{-1} - D^{-1/2} Q Q^T D^{-1/2})\n+ # Through matrix determinant lemma, log |L L^T + D| reduces down to 2 log |R|\n if self._q_cache is None:\n max_iter = settings.max_preconditioner_size.value()\n self._piv_chol_self = pivoted_cholesky.pivoted_cholesky(self._lazy_tensor, max_iter)\n@@ -87,6 +108,7 @@\n \n # NOTE: We cannot memoize this precondition closure as it causes a memory leak\n def precondition_closure(tensor):\n+ # This makes it fast to compute solves with it\n qqt = self._q_cache.matmul(self._q_cache.transpose(-2, -1).matmul(tensor))\n if self._constant_diag:\n return (1 / self._noise) * (tensor - qqt)\n@@ -102,6 +124,7 @@\n noise_first_element = self._noise[..., :1, :]\n self._constant_diag = torch.equal(self._noise, noise_first_element * torch.ones_like(self._noise))\n eye = torch.eye(k, dtype=self._piv_chol_self.dtype, device=self._piv_chol_self.device)\n+ eye = eye.expand(*batch_shape, k, k)\n \n if self._constant_diag:\n self._init_cache_for_constant_diag(eye, batch_shape, n, k)\n@@ -123,9 +146,10 @@\n \n def _init_cache_for_non_constant_diag(self, eye, batch_shape, n):\n # With non-constant diagonals, we cant factor out the noise as easily\n- self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye)))\n+ self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye), dim=-2))\n self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()\n \n+ # Use the matrix determinant lemma for the logdet, using the fact that R'R = L_k'L_k + s*I\n logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)\n logdet -= (1.0 / self._noise).log().sum([-1, -2])\n self._precond_logdet_cache = logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze()\n", "issue": "[Bug]\n# \ud83d\udc1b Bug\r\n\r\nBatched fixed noise GPs fail when the preconditioning threshold is reached. The concatenation in this line fails with a shape error.\r\n\r\nhttps://github.com/cornellius-gp/gpytorch/blob/master/gpytorch/lazy/added_diag_lazy_tensor.py#L126\r\n\r\n## To reproduce\r\n\r\n** Code snippet to reproduce **\r\n```python\r\nimport torch\r\nimport gpytorch\r\n\r\nclass BatchFixedNoiseGP(gpytorch.models.GP):\r\n def __init__(self, init_x, init_y, noise, batch_shape):\r\n super().__init__()\r\n self.mean_module = gpytorch.means.ZeroMean()\r\n self.covar_module = gpytorch.kernels.RBFKernel(batch_shape=batch_shape)\r\n self.likelihood = gpytorch.likelihoods.FixedNoiseGaussianLikelihood(noise)\r\n \r\n def forward(self, inputs):\r\n mean = self.mean_module(inputs)\r\n covar = self.covar_module(inputs)\r\n return gpytorch.distributions.MultivariateNormal(mean, covar)\r\n\r\nbatch_shape = [2]\r\ntrain_x = torch.randn(*batch_shape, 101, 3)\r\ntrain_y = torch.randn(*batch_shape, 101)\r\ntrain_noise = torch.rand(*batch_shape, 101)\r\n\r\ngp = BatchFixedNoiseGP(train_x, train_y, train_noise, batch_shape)\r\nmll = gpytorch.mlls.ExactMarginalLogLikelihood(gp.likelihood, gp)\r\n\r\nwith gpytorch.settings.max_cholesky_size(100), gpytorch.settings.min_preconditioning_size(100):\r\n train_dist = gp(train_x)\r\n loss = -mll(train_dist, train_y).sum()\r\n```\r\n\r\n** Stack trace/error message **\r\n```\r\nRuntimeError Traceback (most recent call last)\r\n<ipython-input-4-9e151e2de37a> in <module>\r\n 24 with gpytorch.settings.max_cholesky_size(100), gpytorch.settings.min_preconditioning_size(100):\r\n 25 train_dist = gp(train_x)\r\n---> 26 loss = -mll(train_dist, train_y).sum()\r\n\r\n~/Code/gpytorch/gpytorch/module.py in __call__(self, *inputs, **kwargs)\r\n 26 \r\n 27 def __call__(self, *inputs, **kwargs):\r\n---> 28 outputs = self.forward(*inputs, **kwargs)\r\n 29 if isinstance(outputs, list):\r\n 30 return [_validate_module_outputs(output) for output in outputs]\r\n\r\n~/Code/gpytorch/gpytorch/mlls/exact_marginal_log_likelihood.py in forward(self, function_dist, target, *params)\r\n 49 # Get the log prob of the marginal distribution\r\n 50 output = self.likelihood(function_dist, *params)\r\n---> 51 res = output.log_prob(target)\r\n 52 \r\n 53 # Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)\r\n\r\n~/Code/gpytorch/gpytorch/distributions/multivariate_normal.py in log_prob(self, value)\r\n 138 \r\n 139 # Get log determininat and first part of quadratic form\r\n--> 140 inv_quad, logdet = covar.inv_quad_logdet(inv_quad_rhs=diff.unsqueeze(-1), logdet=True)\r\n 141 \r\n 142 res = -0.5 * sum([inv_quad, logdet, diff.size(-1) * math.log(2 * math.pi)])\r\n\r\n~/Code/gpytorch/gpytorch/lazy/lazy_tensor.py in inv_quad_logdet(self, inv_quad_rhs, logdet, reduce_inv_quad)\r\n 1069 probe_vectors,\r\n 1070 probe_vector_norms,\r\n-> 1071 *args,\r\n 1072 )\r\n 1073 \r\n\r\n~/Code/gpytorch/gpytorch/functions/_inv_quad_log_det.py in forward(ctx, representation_tree, dtype, device, matrix_shape, batch_shape, inv_quad, logdet, probe_vectors, probe_vector_norms, *args)\r\n 65 lazy_tsr = ctx.representation_tree(*matrix_args)\r\n 66 with torch.no_grad():\r\n---> 67 preconditioner, precond_lt, logdet_correction = lazy_tsr._preconditioner()\r\n 68 \r\n 69 ctx.preconditioner = preconditioner\r\n\r\n~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _preconditioner(self)\r\n 84 )\r\n 85 return None, None, None\r\n---> 86 self._init_cache()\r\n 87 \r\n 88 # NOTE: We cannot memoize this precondition closure as it causes a memory leak\r\n\r\n~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache(self)\r\n 107 self._init_cache_for_constant_diag(eye, batch_shape, n, k)\r\n 108 else:\r\n--> 109 self._init_cache_for_non_constant_diag(eye, batch_shape, n)\r\n 110 \r\n 111 self._precond_lt = PsdSumLazyTensor(RootLazyTensor(self._piv_chol_self), self._diag_tensor)\r\n\r\n~/Code/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py in _init_cache_for_non_constant_diag(self, eye, batch_shape, n)\r\n 125 # With non-constant diagonals, we cant factor out the noise as easily\r\n 126 # eye = eye.expand(*batch_shape, -1, -1)\r\n--> 127 self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye)))\r\n 128 self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()\r\n 129 \r\n\r\nRuntimeError: Tensors must have same number of dimensions: got 3 and 2\r\n```\r\n\r\n## Expected Behavior\r\n\r\nEverything works fine until the preconditioning threshold is reached. Obviously one would hope that it would continue to work.\r\n\r\n## System information\r\n\r\n**Please complete the following information:**\r\nGPyTorch Version: 1.2.0\r\nPyTorch Version: '1.6.0.dev20200522'\r\nOS: Ubuntu 16.04 LTS\r\n\r\n## Additional context\r\nThis appears to fix the problem\r\n```\r\n def _init_cache_for_non_constant_diag(self, eye, batch_shape, n):\r\n # With non-constant diagonals, we cant factor out the noise as easily\r\n eye = eye.expand(*batch_shape, -1, -1)\r\n self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye), dim=-2))\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport warnings\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom .. import settings\nfrom ..utils import broadcasting, pivoted_cholesky\nfrom ..utils.memoize import cached\nfrom ..utils.warnings import NumericalWarning\nfrom .diag_lazy_tensor import ConstantDiagLazyTensor, DiagLazyTensor\nfrom .lazy_tensor import LazyTensor\nfrom .psd_sum_lazy_tensor import PsdSumLazyTensor\nfrom .root_lazy_tensor import RootLazyTensor\nfrom .sum_lazy_tensor import SumLazyTensor\n\n\nclass AddedDiagLazyTensor(SumLazyTensor):\n \"\"\"\n A SumLazyTensor, but of only two lazy tensors, the second of which must be\n a DiagLazyTensor.\n \"\"\"\n\n def __init__(self, *lazy_tensors, preconditioner_override=None):\n lazy_tensors = list(lazy_tensors)\n super(AddedDiagLazyTensor, self).__init__(*lazy_tensors, preconditioner_override=preconditioner_override)\n if len(lazy_tensors) > 2:\n raise RuntimeError(\"An AddedDiagLazyTensor can only have two components\")\n\n broadcasting._mul_broadcast_shape(lazy_tensors[0].shape, lazy_tensors[1].shape)\n\n if isinstance(lazy_tensors[0], DiagLazyTensor) and isinstance(lazy_tensors[1], DiagLazyTensor):\n raise RuntimeError(\"Trying to lazily add two DiagLazyTensors. Create a single DiagLazyTensor instead.\")\n elif isinstance(lazy_tensors[0], DiagLazyTensor):\n self._diag_tensor = lazy_tensors[0]\n self._lazy_tensor = lazy_tensors[1]\n elif isinstance(lazy_tensors[1], DiagLazyTensor):\n self._diag_tensor = lazy_tensors[1]\n self._lazy_tensor = lazy_tensors[0]\n else:\n raise RuntimeError(\"One of the LazyTensors input to AddedDiagLazyTensor must be a DiagLazyTensor!\")\n\n self.preconditioner_override = preconditioner_override\n\n # Placeholders\n self._constant_diag = None\n self._noise = None\n self._piv_chol_self = None # <- Doesn't need to be an attribute, but used for testing purposes\n self._precond_lt = None\n self._precond_logdet_cache = None\n self._q_cache = None\n self._r_cache = None\n\n def _matmul(self, rhs):\n return torch.addcmul(self._lazy_tensor._matmul(rhs), self._diag_tensor._diag.unsqueeze(-1), rhs)\n\n def add_diag(self, added_diag):\n return AddedDiagLazyTensor(self._lazy_tensor, self._diag_tensor.add_diag(added_diag))\n\n def __add__(self, other):\n from .diag_lazy_tensor import DiagLazyTensor\n\n if isinstance(other, DiagLazyTensor):\n return AddedDiagLazyTensor(self._lazy_tensor, self._diag_tensor + other)\n else:\n return AddedDiagLazyTensor(self._lazy_tensor + other, self._diag_tensor)\n\n def _preconditioner(self):\n if self.preconditioner_override is not None:\n return self.preconditioner_override(self)\n\n if settings.max_preconditioner_size.value() == 0 or self.size(-1) < settings.min_preconditioning_size.value():\n return None, None, None\n\n if self._q_cache is None:\n max_iter = settings.max_preconditioner_size.value()\n self._piv_chol_self = pivoted_cholesky.pivoted_cholesky(self._lazy_tensor, max_iter)\n if torch.any(torch.isnan(self._piv_chol_self)).item():\n warnings.warn(\n \"NaNs encountered in preconditioner computation. Attempting to continue without preconditioning.\",\n NumericalWarning,\n )\n return None, None, None\n self._init_cache()\n\n # NOTE: We cannot memoize this precondition closure as it causes a memory leak\n def precondition_closure(tensor):\n qqt = self._q_cache.matmul(self._q_cache.transpose(-2, -1).matmul(tensor))\n if self._constant_diag:\n return (1 / self._noise) * (tensor - qqt)\n return (tensor / self._noise) - qqt\n\n return (precondition_closure, self._precond_lt, self._precond_logdet_cache)\n\n def _init_cache(self):\n *batch_shape, n, k = self._piv_chol_self.shape\n self._noise = self._diag_tensor.diag().unsqueeze(-1)\n\n # the check for constant diag needs to be done carefully for batches.\n noise_first_element = self._noise[..., :1, :]\n self._constant_diag = torch.equal(self._noise, noise_first_element * torch.ones_like(self._noise))\n eye = torch.eye(k, dtype=self._piv_chol_self.dtype, device=self._piv_chol_self.device)\n\n if self._constant_diag:\n self._init_cache_for_constant_diag(eye, batch_shape, n, k)\n else:\n self._init_cache_for_non_constant_diag(eye, batch_shape, n)\n\n self._precond_lt = PsdSumLazyTensor(RootLazyTensor(self._piv_chol_self), self._diag_tensor)\n\n def _init_cache_for_constant_diag(self, eye, batch_shape, n, k):\n # We can factor out the noise for for both QR and solves.\n self._noise = self._noise.narrow(-2, 0, 1)\n self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self, self._noise.sqrt() * eye), dim=-2))\n self._q_cache = self._q_cache[..., :n, :]\n\n # Use the matrix determinant lemma for the logdet, using the fact that R'R = L_k'L_k + s*I\n logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)\n logdet = logdet + (n - k) * self._noise.squeeze(-2).squeeze(-1).log()\n self._precond_logdet_cache = logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze()\n\n def _init_cache_for_non_constant_diag(self, eye, batch_shape, n):\n # With non-constant diagonals, we cant factor out the noise as easily\n self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye)))\n self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()\n\n logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)\n logdet -= (1.0 / self._noise).log().sum([-1, -2])\n self._precond_logdet_cache = logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze()\n\n @cached(name=\"svd\")\n def _svd(self) -> Tuple[\"LazyTensor\", Tensor, \"LazyTensor\"]:\n if isinstance(self._diag_tensor, ConstantDiagLazyTensor):\n U, S_, V = self._lazy_tensor.svd()\n S = S_ + self._diag_tensor.diag()\n return U, S, V\n return super()._svd()\n\n def _symeig(self, eigenvectors: bool = False) -> Tuple[Tensor, Optional[LazyTensor]]:\n if isinstance(self._diag_tensor, ConstantDiagLazyTensor):\n evals_, evecs = self._lazy_tensor.symeig(eigenvectors=eigenvectors)\n evals = evals_ + self._diag_tensor.diag()\n return evals, evecs\n return super()._symeig(eigenvectors=eigenvectors)\n", "path": "gpytorch/lazy/added_diag_lazy_tensor.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport warnings\nfrom typing import Optional, Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom .. import settings\nfrom ..utils import broadcasting, pivoted_cholesky\nfrom ..utils.memoize import cached\nfrom ..utils.warnings import NumericalWarning\nfrom .diag_lazy_tensor import ConstantDiagLazyTensor, DiagLazyTensor\nfrom .lazy_tensor import LazyTensor\nfrom .psd_sum_lazy_tensor import PsdSumLazyTensor\nfrom .root_lazy_tensor import RootLazyTensor\nfrom .sum_lazy_tensor import SumLazyTensor\n\n\nclass AddedDiagLazyTensor(SumLazyTensor):\n \"\"\"\n A SumLazyTensor, but of only two lazy tensors, the second of which must be\n a DiagLazyTensor.\n \"\"\"\n\n def __init__(self, *lazy_tensors, preconditioner_override=None):\n lazy_tensors = list(lazy_tensors)\n super(AddedDiagLazyTensor, self).__init__(*lazy_tensors, preconditioner_override=preconditioner_override)\n if len(lazy_tensors) > 2:\n raise RuntimeError(\"An AddedDiagLazyTensor can only have two components\")\n\n broadcasting._mul_broadcast_shape(lazy_tensors[0].shape, lazy_tensors[1].shape)\n\n if isinstance(lazy_tensors[0], DiagLazyTensor) and isinstance(lazy_tensors[1], DiagLazyTensor):\n raise RuntimeError(\"Trying to lazily add two DiagLazyTensors. Create a single DiagLazyTensor instead.\")\n elif isinstance(lazy_tensors[0], DiagLazyTensor):\n self._diag_tensor = lazy_tensors[0]\n self._lazy_tensor = lazy_tensors[1]\n elif isinstance(lazy_tensors[1], DiagLazyTensor):\n self._diag_tensor = lazy_tensors[1]\n self._lazy_tensor = lazy_tensors[0]\n else:\n raise RuntimeError(\"One of the LazyTensors input to AddedDiagLazyTensor must be a DiagLazyTensor!\")\n\n self.preconditioner_override = preconditioner_override\n\n # Placeholders\n self._constant_diag = None\n self._noise = None\n self._piv_chol_self = None # <- Doesn't need to be an attribute, but used for testing purposes\n self._precond_lt = None\n self._precond_logdet_cache = None\n self._q_cache = None\n self._r_cache = None\n\n def _matmul(self, rhs):\n return torch.addcmul(self._lazy_tensor._matmul(rhs), self._diag_tensor._diag.unsqueeze(-1), rhs)\n\n def add_diag(self, added_diag):\n return AddedDiagLazyTensor(self._lazy_tensor, self._diag_tensor.add_diag(added_diag))\n\n def __add__(self, other):\n from .diag_lazy_tensor import DiagLazyTensor\n\n if isinstance(other, DiagLazyTensor):\n return AddedDiagLazyTensor(self._lazy_tensor, self._diag_tensor + other)\n else:\n return AddedDiagLazyTensor(self._lazy_tensor + other, self._diag_tensor)\n\n def _preconditioner(self):\n r\"\"\"\n Here we use a partial pivoted Cholesky preconditioner:\n\n K \\approx L L^T + D\n\n where L L^T is a low rank approximation, and D is a diagonal.\n We can compute the preconditioner's inverse using Woodbury\n\n (L L^T + D)^{-1} = D^{-1} - D^{-1} L (I + L D^{-1} L^T)^{-1} L^T D^{-1}\n\n This function returns:\n - A function `precondition_closure` that computes the solve (L L^T + D)^{-1} x\n - A LazyTensor `precondition_lt` that represents (L L^T + D)\n - The log determinant of (L L^T + D)\n \"\"\"\n\n if self.preconditioner_override is not None:\n return self.preconditioner_override(self)\n\n if settings.max_preconditioner_size.value() == 0 or self.size(-1) < settings.min_preconditioning_size.value():\n return None, None, None\n\n # Cache a QR decomposition [Q; Q'] R = [D^{-1/2}; L]\n # This makes it fast to compute solves and log determinants with it\n #\n # Through woodbury, (L L^T + D)^{-1} reduces down to (D^{-1} - D^{-1/2} Q Q^T D^{-1/2})\n # Through matrix determinant lemma, log |L L^T + D| reduces down to 2 log |R|\n if self._q_cache is None:\n max_iter = settings.max_preconditioner_size.value()\n self._piv_chol_self = pivoted_cholesky.pivoted_cholesky(self._lazy_tensor, max_iter)\n if torch.any(torch.isnan(self._piv_chol_self)).item():\n warnings.warn(\n \"NaNs encountered in preconditioner computation. Attempting to continue without preconditioning.\",\n NumericalWarning,\n )\n return None, None, None\n self._init_cache()\n\n # NOTE: We cannot memoize this precondition closure as it causes a memory leak\n def precondition_closure(tensor):\n # This makes it fast to compute solves with it\n qqt = self._q_cache.matmul(self._q_cache.transpose(-2, -1).matmul(tensor))\n if self._constant_diag:\n return (1 / self._noise) * (tensor - qqt)\n return (tensor / self._noise) - qqt\n\n return (precondition_closure, self._precond_lt, self._precond_logdet_cache)\n\n def _init_cache(self):\n *batch_shape, n, k = self._piv_chol_self.shape\n self._noise = self._diag_tensor.diag().unsqueeze(-1)\n\n # the check for constant diag needs to be done carefully for batches.\n noise_first_element = self._noise[..., :1, :]\n self._constant_diag = torch.equal(self._noise, noise_first_element * torch.ones_like(self._noise))\n eye = torch.eye(k, dtype=self._piv_chol_self.dtype, device=self._piv_chol_self.device)\n eye = eye.expand(*batch_shape, k, k)\n\n if self._constant_diag:\n self._init_cache_for_constant_diag(eye, batch_shape, n, k)\n else:\n self._init_cache_for_non_constant_diag(eye, batch_shape, n)\n\n self._precond_lt = PsdSumLazyTensor(RootLazyTensor(self._piv_chol_self), self._diag_tensor)\n\n def _init_cache_for_constant_diag(self, eye, batch_shape, n, k):\n # We can factor out the noise for for both QR and solves.\n self._noise = self._noise.narrow(-2, 0, 1)\n self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self, self._noise.sqrt() * eye), dim=-2))\n self._q_cache = self._q_cache[..., :n, :]\n\n # Use the matrix determinant lemma for the logdet, using the fact that R'R = L_k'L_k + s*I\n logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)\n logdet = logdet + (n - k) * self._noise.squeeze(-2).squeeze(-1).log()\n self._precond_logdet_cache = logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze()\n\n def _init_cache_for_non_constant_diag(self, eye, batch_shape, n):\n # With non-constant diagonals, we cant factor out the noise as easily\n self._q_cache, self._r_cache = torch.qr(torch.cat((self._piv_chol_self / self._noise.sqrt(), eye), dim=-2))\n self._q_cache = self._q_cache[..., :n, :] / self._noise.sqrt()\n\n # Use the matrix determinant lemma for the logdet, using the fact that R'R = L_k'L_k + s*I\n logdet = self._r_cache.diagonal(dim1=-1, dim2=-2).abs().log().sum(-1).mul(2)\n logdet -= (1.0 / self._noise).log().sum([-1, -2])\n self._precond_logdet_cache = logdet.view(*batch_shape) if len(batch_shape) else logdet.squeeze()\n\n @cached(name=\"svd\")\n def _svd(self) -> Tuple[\"LazyTensor\", Tensor, \"LazyTensor\"]:\n if isinstance(self._diag_tensor, ConstantDiagLazyTensor):\n U, S_, V = self._lazy_tensor.svd()\n S = S_ + self._diag_tensor.diag()\n return U, S, V\n return super()._svd()\n\n def _symeig(self, eigenvectors: bool = False) -> Tuple[Tensor, Optional[LazyTensor]]:\n if isinstance(self._diag_tensor, ConstantDiagLazyTensor):\n evals_, evecs = self._lazy_tensor.symeig(eigenvectors=eigenvectors)\n evals = evals_ + self._diag_tensor.diag()\n return evals, evecs\n return super()._symeig(eigenvectors=eigenvectors)\n", "path": "gpytorch/lazy/added_diag_lazy_tensor.py"}]} | 3,844 | 972 |
gh_patches_debug_21567 | rasdani/github-patches | git_diff | lutris__lutris-766 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[vice] Plus/4 crt loading broken
Tried to load a game in crt format, but vice throws a error and exit:
```
:Launching Pirate Adventure: {'command': ['/home/user01/.local/share/lutris/runners/vice/bin/xplus4', '-chdir', '/Games/Romz/Commodore/plus4', '-TEDdsize', '-sdl2renderer', 'opengl', '-cartgeneric', '/Games/Romz/Commodore/plus4/Pirate Adventure.crt']}
DEBUG 2017-03-10 10:26:58,891 [thread]:Command env: STEAM_RUNTIME="/home/user01/.local/share/lutris/runtime/steam" LD_PRELOAD="" SDL_VIDEO_FULLSCREEN_DISPLAY="off" LD_LIBRARY_PATH="/home/user01/.local/share/lutris/runtime/lib32:/home/user01/.local/share/lutris/runtime/steam/i386/lib/i386-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/i386/lib:/home/user01/.local/share/lutris/runtime/steam/i386/usr/lib/i386-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/i386/usr/lib:/home/user01/.local/share/lutris/runtime/lib64:/home/user01/.local/share/lutris/runtime/steam/amd64/lib/x86_64-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/amd64/lib:/home/user01/.local/share/lutris/runtime/steam/amd64/usr/lib/x86_64-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/amd64/usr/lib:$LD_LIBRARY_PATH"
DEBUG 2017-03-10 10:26:58,891 [thread]:Running command: "/home/user01/.local/share/lutris/runners/vice/bin/xplus4" "-chdir" "/Games/Romz/Commodore/plus4" "-TEDdsize" "-sdl2renderer" "opengl" "-cartgeneric" "/Games/Romz/Commodore/plus4/Pirate Adventure.crt"
DEBUG 2017-03-10 10:26:58,891 [thread]:Storing environment variable STEAM_RUNTIME to /home/user01/.local/share/lutris/runtime/steam
DEBUG 2017-03-10 10:26:58,892 [thread]:Storing environment variable LD_PRELOAD to
DEBUG 2017-03-10 10:26:58,892 [thread]:Storing environment variable SDL_VIDEO_FULLSCREEN_DISPLAY to off
DEBUG 2017-03-10 10:26:58,893 [thread]:Storing environment variable LD_LIBRARY_PATH to /home/user01/.local/share/lutris/runtime/lib32:/home/user01/.local/share/lutris/runtime/steam/i386/lib/i386-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/i386/lib:/home/user01/.local/share/lutris/runtime/steam/i386/usr/lib/i386-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/i386/usr/lib:/home/user01/.local/share/lutris/runtime/lib64:/home/user01/.local/share/lutris/runtime/steam/amd64/lib/x86_64-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/amd64/lib:/home/user01/.local/share/lutris/runtime/steam/amd64/usr/lib/x86_64-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/amd64/usr/lib:$LD_LIBRARY_PATH
Error - Default keymap not found, this should be fixed. Going on anyway...
Error - Default keymap not found, this should be fixed. Going on anyway...
Detecting ISA HardSID boards.
Could not open '/dev/port'.
Cannot get permission to access $300.
Detecting PCI HardSID boards.
No PCI HardSID boards found.
Unknown option '-cartgeneric'.
Error parsing command-line options, bailing out. For help use '-help'
Exiting...
```
according to the -help it's just -cart for the plus/4
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/runners/vice.py`
Content:
```
1 import os
2 from lutris import settings
3 from lutris.util import system
4 from lutris.util.log import logger
5 from lutris.runners.runner import Runner
6
7
8 class vice(Runner):
9 description = "Commodore Emulator"
10 human_name = "Vice"
11 platforms = [
12 'Commodore 64',
13 'Commodore 128',
14 'Commodore VIC20',
15 'Commodore PET',
16 'Commodore Plus/4',
17 'Commodore CBM II',
18 ]
19
20 game_options = [{
21 "option": "main_file",
22 "type": "file",
23 "label": "ROM file",
24 'help': ("The game data, commonly called a ROM image.\n"
25 "Supported formats: X64, D64, G64, P64, D67, D71, D81, "
26 "D80, D82, D1M, D2M, D4M, T46, P00 and CRT.")
27 }]
28
29 runner_options = [
30 {
31 "option": "joy",
32 "type": "bool",
33 "label": "Use joysticks",
34 'default': False,
35 },
36 {
37 "option": "fullscreen",
38 "type": "bool",
39 "label": "Fullscreen",
40 'default': False,
41 },
42 {
43 "option": "double",
44 "type": "bool",
45 "label": "Scale up display by 2",
46 'default': True,
47 },
48 {
49 'option': 'aspect_ratio',
50 'type': 'bool',
51 'label': 'Keep aspect ratio',
52 'default': True
53 },
54 {
55 'option': 'drivesound',
56 'type': 'bool',
57 'label': 'Enable sound emulation of disk drives',
58 'default': False
59 },
60 {
61 'option': 'renderer',
62 'type': 'choice',
63 'label': 'Graphics renderer',
64 'choices': [
65 ('OpenGL', 'opengl'),
66 ('Software', 'software')
67 ],
68 'default': 'opengl'
69 },
70 {
71 "option": "machine",
72 "type": "choice",
73 "label": "Machine",
74 "choices": [
75 ("C64", "c64"),
76 ("C128", "c128"),
77 ("vic20", "vic20"),
78 ("PET", "pet"),
79 ("Plus/4", "plus4"),
80 ("CBM-II", "cbmii")
81 ],
82 "default": "c64"
83 }
84 ]
85
86 def get_platform(self):
87 machine = self.game_config.get('machine')
88 if machine:
89 for index, choice in enumerate(self.machine_choices):
90 if choice[1] == machine:
91 return self.platforms[index]
92 return ''
93
94 def get_executable(self, machine=None):
95 if not machine:
96 machine = "c64"
97 executables = {
98 "c64": "x64",
99 "c128": "x128",
100 "vic20": "xvic",
101 "pet": "xpet",
102 "plus4": "xplus4",
103 "cbmii": "xcbm2"
104 }
105 try:
106 executable = executables[machine]
107 except KeyError:
108 raise ValueError("Invalid machine '%s'" % machine)
109 return os.path.join(settings.RUNNER_DIR, "vice/bin/%s" % executable)
110
111 def install(self, version=None, downloader=None, callback=None):
112 def on_runner_installed(*args):
113 config_path = system.create_folder('~/.vice')
114 lib_dir = os.path.join(settings.RUNNER_DIR, 'vice/lib/vice')
115 if not os.path.exists(lib_dir):
116 lib_dir = os.path.join(settings.RUNNER_DIR, 'vice/lib64/vice')
117 if not os.path.exists(lib_dir):
118 logger.error('Missing lib folder in the Vice runner')
119 else:
120 system.merge_folders(lib_dir, config_path)
121 if callback:
122 callback()
123
124 super(vice, self).install(version, downloader, on_runner_installed)
125
126 def get_roms_path(self, machine=None):
127 if not machine:
128 machine = "c64"
129 paths = {
130 "c64": "C64",
131 "c128": "C128",
132 "vic20": "VIC20",
133 "pet": "PET",
134 "plus4": "PLUS4",
135 "cmbii": "CBM-II"
136 }
137 root_dir = os.path.dirname(os.path.dirname(self.get_executable()))
138 return os.path.join(root_dir, 'lib64/vice', paths[machine])
139
140 def get_option_prefix(self, machine):
141 prefixes = {
142 'c64': 'VICII',
143 'c128': 'VICII',
144 'vic20': 'VIC',
145 'pet': 'CRTC',
146 'plus4': 'TED',
147 'cmbii': 'CRTC'
148 }
149 return prefixes[machine]
150
151 def get_joydevs(self, machine):
152 joydevs = {
153 'c64': 2,
154 'c128': 2,
155 'vic20': 1,
156 'pet': 0,
157 'plus4': 2,
158 'cmbii': 0
159 }
160 return joydevs[machine]
161
162 def play(self):
163 machine = self.runner_config.get("machine")
164
165 rom = self.game_config.get('main_file')
166 if not rom:
167 return {'error': 'CUSTOM', 'text': 'No rom provided'}
168 if not os.path.exists(rom):
169 return {'error': 'FILE_NOT_FOUND', 'file': rom}
170
171 params = [self.get_executable(machine)]
172 rom_dir = os.path.dirname(rom)
173 params.append('-chdir')
174 params.append(rom_dir)
175 option_prefix = self.get_option_prefix(machine)
176
177 if self.runner_config.get("fullscreen"):
178 params.append('-{}full'.format(option_prefix))
179
180 if self.runner_config.get("double"):
181 params.append("-{}dsize".format(option_prefix))
182
183 if self.runner_config.get('renderer'):
184 params.append('-sdl2renderer')
185 params.append(self.runner_config['renderer'])
186
187 if not self.runner_config.get('aspect_ratio', True):
188 params.append('-sdlaspectmode')
189 params.append('0')
190
191 if self.runner_config.get('drivesound'):
192 params.append('-drivesound')
193
194 if self.runner_config.get("joy"):
195 for dev in range(self.get_joydevs(machine)):
196 params += ["-joydev{}".format(dev + 1), "4"]
197 if rom.endswith('.crt'):
198 params.append('-cartgeneric')
199 params.append(rom)
200 return {'command': params}
201
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/runners/vice.py b/lutris/runners/vice.py
--- a/lutris/runners/vice.py
+++ b/lutris/runners/vice.py
@@ -159,6 +159,24 @@
}
return joydevs[machine]
+ def get_rom_args(self, machine, rom):
+ args = []
+
+ if rom.endswith('.crt'):
+ crt_option = {
+ 'c64': "-cartcrt",
+ 'c128': "-cartcrt",
+ 'vic20': "-cartgeneric",
+ 'pet': None,
+ 'plus4': "-cart",
+ 'cmbii': None,
+ }
+ if (crt_option[machine]):
+ args.append(crt_option[machine])
+
+ args.append(rom)
+ return args
+
def play(self):
machine = self.runner_config.get("machine")
@@ -194,7 +212,6 @@
if self.runner_config.get("joy"):
for dev in range(self.get_joydevs(machine)):
params += ["-joydev{}".format(dev + 1), "4"]
- if rom.endswith('.crt'):
- params.append('-cartgeneric')
- params.append(rom)
+
+ params.extend(self.get_rom_args(machine, rom))
return {'command': params}
| {"golden_diff": "diff --git a/lutris/runners/vice.py b/lutris/runners/vice.py\n--- a/lutris/runners/vice.py\n+++ b/lutris/runners/vice.py\n@@ -159,6 +159,24 @@\n }\n return joydevs[machine]\n \n+ def get_rom_args(self, machine, rom):\n+ args = []\n+\n+ if rom.endswith('.crt'):\n+ crt_option = {\n+ 'c64': \"-cartcrt\",\n+ 'c128': \"-cartcrt\",\n+ 'vic20': \"-cartgeneric\",\n+ 'pet': None,\n+ 'plus4': \"-cart\",\n+ 'cmbii': None,\n+ }\n+ if (crt_option[machine]):\n+ args.append(crt_option[machine])\n+\n+ args.append(rom)\n+ return args\n+\n def play(self):\n machine = self.runner_config.get(\"machine\")\n \n@@ -194,7 +212,6 @@\n if self.runner_config.get(\"joy\"):\n for dev in range(self.get_joydevs(machine)):\n params += [\"-joydev{}\".format(dev + 1), \"4\"]\n- if rom.endswith('.crt'):\n- params.append('-cartgeneric')\n- params.append(rom)\n+\n+ params.extend(self.get_rom_args(machine, rom))\n return {'command': params}\n", "issue": "[vice] Plus/4 crt loading broken\nTried to load a game in crt format, but vice throws a error and exit:\r\n\r\n```\r\n:Launching Pirate Adventure: {'command': ['/home/user01/.local/share/lutris/runners/vice/bin/xplus4', '-chdir', '/Games/Romz/Commodore/plus4', '-TEDdsize', '-sdl2renderer', 'opengl', '-cartgeneric', '/Games/Romz/Commodore/plus4/Pirate Adventure.crt']}\r\nDEBUG 2017-03-10 10:26:58,891 [thread]:Command env: STEAM_RUNTIME=\"/home/user01/.local/share/lutris/runtime/steam\" LD_PRELOAD=\"\" SDL_VIDEO_FULLSCREEN_DISPLAY=\"off\" LD_LIBRARY_PATH=\"/home/user01/.local/share/lutris/runtime/lib32:/home/user01/.local/share/lutris/runtime/steam/i386/lib/i386-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/i386/lib:/home/user01/.local/share/lutris/runtime/steam/i386/usr/lib/i386-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/i386/usr/lib:/home/user01/.local/share/lutris/runtime/lib64:/home/user01/.local/share/lutris/runtime/steam/amd64/lib/x86_64-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/amd64/lib:/home/user01/.local/share/lutris/runtime/steam/amd64/usr/lib/x86_64-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/amd64/usr/lib:$LD_LIBRARY_PATH\" \r\nDEBUG 2017-03-10 10:26:58,891 [thread]:Running command: \"/home/user01/.local/share/lutris/runners/vice/bin/xplus4\" \"-chdir\" \"/Games/Romz/Commodore/plus4\" \"-TEDdsize\" \"-sdl2renderer\" \"opengl\" \"-cartgeneric\" \"/Games/Romz/Commodore/plus4/Pirate Adventure.crt\"\r\nDEBUG 2017-03-10 10:26:58,891 [thread]:Storing environment variable STEAM_RUNTIME to /home/user01/.local/share/lutris/runtime/steam\r\nDEBUG 2017-03-10 10:26:58,892 [thread]:Storing environment variable LD_PRELOAD to \r\nDEBUG 2017-03-10 10:26:58,892 [thread]:Storing environment variable SDL_VIDEO_FULLSCREEN_DISPLAY to off\r\nDEBUG 2017-03-10 10:26:58,893 [thread]:Storing environment variable LD_LIBRARY_PATH to /home/user01/.local/share/lutris/runtime/lib32:/home/user01/.local/share/lutris/runtime/steam/i386/lib/i386-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/i386/lib:/home/user01/.local/share/lutris/runtime/steam/i386/usr/lib/i386-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/i386/usr/lib:/home/user01/.local/share/lutris/runtime/lib64:/home/user01/.local/share/lutris/runtime/steam/amd64/lib/x86_64-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/amd64/lib:/home/user01/.local/share/lutris/runtime/steam/amd64/usr/lib/x86_64-linux-gnu:/home/user01/.local/share/lutris/runtime/steam/amd64/usr/lib:$LD_LIBRARY_PATH\r\nError - Default keymap not found, this should be fixed. Going on anyway...\r\nError - Default keymap not found, this should be fixed. Going on anyway...\r\nDetecting ISA HardSID boards.\r\nCould not open '/dev/port'.\r\nCannot get permission to access $300.\r\nDetecting PCI HardSID boards.\r\nNo PCI HardSID boards found.\r\nUnknown option '-cartgeneric'.\r\nError parsing command-line options, bailing out. For help use '-help'\r\nExiting...\r\n```\r\n\r\naccording to the -help it's just -cart for the plus/4\n", "before_files": [{"content": "import os\nfrom lutris import settings\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris.runners.runner import Runner\n\n\nclass vice(Runner):\n description = \"Commodore Emulator\"\n human_name = \"Vice\"\n platforms = [\n 'Commodore 64',\n 'Commodore 128',\n 'Commodore VIC20',\n 'Commodore PET',\n 'Commodore Plus/4',\n 'Commodore CBM II',\n ]\n\n game_options = [{\n \"option\": \"main_file\",\n \"type\": \"file\",\n \"label\": \"ROM file\",\n 'help': (\"The game data, commonly called a ROM image.\\n\"\n \"Supported formats: X64, D64, G64, P64, D67, D71, D81, \"\n \"D80, D82, D1M, D2M, D4M, T46, P00 and CRT.\")\n }]\n\n runner_options = [\n {\n \"option\": \"joy\",\n \"type\": \"bool\",\n \"label\": \"Use joysticks\",\n 'default': False,\n },\n {\n \"option\": \"fullscreen\",\n \"type\": \"bool\",\n \"label\": \"Fullscreen\",\n 'default': False,\n },\n {\n \"option\": \"double\",\n \"type\": \"bool\",\n \"label\": \"Scale up display by 2\",\n 'default': True,\n },\n {\n 'option': 'aspect_ratio',\n 'type': 'bool',\n 'label': 'Keep aspect ratio',\n 'default': True\n },\n {\n 'option': 'drivesound',\n 'type': 'bool',\n 'label': 'Enable sound emulation of disk drives',\n 'default': False\n },\n {\n 'option': 'renderer',\n 'type': 'choice',\n 'label': 'Graphics renderer',\n 'choices': [\n ('OpenGL', 'opengl'),\n ('Software', 'software')\n ],\n 'default': 'opengl'\n },\n {\n \"option\": \"machine\",\n \"type\": \"choice\",\n \"label\": \"Machine\",\n \"choices\": [\n (\"C64\", \"c64\"),\n (\"C128\", \"c128\"),\n (\"vic20\", \"vic20\"),\n (\"PET\", \"pet\"),\n (\"Plus/4\", \"plus4\"),\n (\"CBM-II\", \"cbmii\")\n ],\n \"default\": \"c64\"\n }\n ]\n\n def get_platform(self):\n machine = self.game_config.get('machine')\n if machine:\n for index, choice in enumerate(self.machine_choices):\n if choice[1] == machine:\n return self.platforms[index]\n return ''\n\n def get_executable(self, machine=None):\n if not machine:\n machine = \"c64\"\n executables = {\n \"c64\": \"x64\",\n \"c128\": \"x128\",\n \"vic20\": \"xvic\",\n \"pet\": \"xpet\",\n \"plus4\": \"xplus4\",\n \"cbmii\": \"xcbm2\"\n }\n try:\n executable = executables[machine]\n except KeyError:\n raise ValueError(\"Invalid machine '%s'\" % machine)\n return os.path.join(settings.RUNNER_DIR, \"vice/bin/%s\" % executable)\n\n def install(self, version=None, downloader=None, callback=None):\n def on_runner_installed(*args):\n config_path = system.create_folder('~/.vice')\n lib_dir = os.path.join(settings.RUNNER_DIR, 'vice/lib/vice')\n if not os.path.exists(lib_dir):\n lib_dir = os.path.join(settings.RUNNER_DIR, 'vice/lib64/vice')\n if not os.path.exists(lib_dir):\n logger.error('Missing lib folder in the Vice runner')\n else:\n system.merge_folders(lib_dir, config_path)\n if callback:\n callback()\n\n super(vice, self).install(version, downloader, on_runner_installed)\n\n def get_roms_path(self, machine=None):\n if not machine:\n machine = \"c64\"\n paths = {\n \"c64\": \"C64\",\n \"c128\": \"C128\",\n \"vic20\": \"VIC20\",\n \"pet\": \"PET\",\n \"plus4\": \"PLUS4\",\n \"cmbii\": \"CBM-II\"\n }\n root_dir = os.path.dirname(os.path.dirname(self.get_executable()))\n return os.path.join(root_dir, 'lib64/vice', paths[machine])\n\n def get_option_prefix(self, machine):\n prefixes = {\n 'c64': 'VICII',\n 'c128': 'VICII',\n 'vic20': 'VIC',\n 'pet': 'CRTC',\n 'plus4': 'TED',\n 'cmbii': 'CRTC'\n }\n return prefixes[machine]\n\n def get_joydevs(self, machine):\n joydevs = {\n 'c64': 2,\n 'c128': 2,\n 'vic20': 1,\n 'pet': 0,\n 'plus4': 2,\n 'cmbii': 0\n }\n return joydevs[machine]\n\n def play(self):\n machine = self.runner_config.get(\"machine\")\n\n rom = self.game_config.get('main_file')\n if not rom:\n return {'error': 'CUSTOM', 'text': 'No rom provided'}\n if not os.path.exists(rom):\n return {'error': 'FILE_NOT_FOUND', 'file': rom}\n\n params = [self.get_executable(machine)]\n rom_dir = os.path.dirname(rom)\n params.append('-chdir')\n params.append(rom_dir)\n option_prefix = self.get_option_prefix(machine)\n\n if self.runner_config.get(\"fullscreen\"):\n params.append('-{}full'.format(option_prefix))\n\n if self.runner_config.get(\"double\"):\n params.append(\"-{}dsize\".format(option_prefix))\n\n if self.runner_config.get('renderer'):\n params.append('-sdl2renderer')\n params.append(self.runner_config['renderer'])\n\n if not self.runner_config.get('aspect_ratio', True):\n params.append('-sdlaspectmode')\n params.append('0')\n\n if self.runner_config.get('drivesound'):\n params.append('-drivesound')\n\n if self.runner_config.get(\"joy\"):\n for dev in range(self.get_joydevs(machine)):\n params += [\"-joydev{}\".format(dev + 1), \"4\"]\n if rom.endswith('.crt'):\n params.append('-cartgeneric')\n params.append(rom)\n return {'command': params}\n", "path": "lutris/runners/vice.py"}], "after_files": [{"content": "import os\nfrom lutris import settings\nfrom lutris.util import system\nfrom lutris.util.log import logger\nfrom lutris.runners.runner import Runner\n\n\nclass vice(Runner):\n description = \"Commodore Emulator\"\n human_name = \"Vice\"\n platforms = [\n 'Commodore 64',\n 'Commodore 128',\n 'Commodore VIC20',\n 'Commodore PET',\n 'Commodore Plus/4',\n 'Commodore CBM II',\n ]\n\n game_options = [{\n \"option\": \"main_file\",\n \"type\": \"file\",\n \"label\": \"ROM file\",\n 'help': (\"The game data, commonly called a ROM image.\\n\"\n \"Supported formats: X64, D64, G64, P64, D67, D71, D81, \"\n \"D80, D82, D1M, D2M, D4M, T46, P00 and CRT.\")\n }]\n\n runner_options = [\n {\n \"option\": \"joy\",\n \"type\": \"bool\",\n \"label\": \"Use joysticks\",\n 'default': False,\n },\n {\n \"option\": \"fullscreen\",\n \"type\": \"bool\",\n \"label\": \"Fullscreen\",\n 'default': False,\n },\n {\n \"option\": \"double\",\n \"type\": \"bool\",\n \"label\": \"Scale up display by 2\",\n 'default': True,\n },\n {\n 'option': 'aspect_ratio',\n 'type': 'bool',\n 'label': 'Keep aspect ratio',\n 'default': True\n },\n {\n 'option': 'drivesound',\n 'type': 'bool',\n 'label': 'Enable sound emulation of disk drives',\n 'default': False\n },\n {\n 'option': 'renderer',\n 'type': 'choice',\n 'label': 'Graphics renderer',\n 'choices': [\n ('OpenGL', 'opengl'),\n ('Software', 'software')\n ],\n 'default': 'opengl'\n },\n {\n \"option\": \"machine\",\n \"type\": \"choice\",\n \"label\": \"Machine\",\n \"choices\": [\n (\"C64\", \"c64\"),\n (\"C128\", \"c128\"),\n (\"vic20\", \"vic20\"),\n (\"PET\", \"pet\"),\n (\"Plus/4\", \"plus4\"),\n (\"CBM-II\", \"cbmii\")\n ],\n \"default\": \"c64\"\n }\n ]\n\n def get_platform(self):\n machine = self.game_config.get('machine')\n if machine:\n for index, choice in enumerate(self.machine_choices):\n if choice[1] == machine:\n return self.platforms[index]\n return ''\n\n def get_executable(self, machine=None):\n if not machine:\n machine = \"c64\"\n executables = {\n \"c64\": \"x64\",\n \"c128\": \"x128\",\n \"vic20\": \"xvic\",\n \"pet\": \"xpet\",\n \"plus4\": \"xplus4\",\n \"cbmii\": \"xcbm2\"\n }\n try:\n executable = executables[machine]\n except KeyError:\n raise ValueError(\"Invalid machine '%s'\" % machine)\n return os.path.join(settings.RUNNER_DIR, \"vice/bin/%s\" % executable)\n\n def install(self, version=None, downloader=None, callback=None):\n def on_runner_installed(*args):\n config_path = system.create_folder('~/.vice')\n lib_dir = os.path.join(settings.RUNNER_DIR, 'vice/lib/vice')\n if not os.path.exists(lib_dir):\n lib_dir = os.path.join(settings.RUNNER_DIR, 'vice/lib64/vice')\n if not os.path.exists(lib_dir):\n logger.error('Missing lib folder in the Vice runner')\n else:\n system.merge_folders(lib_dir, config_path)\n if callback:\n callback()\n\n super(vice, self).install(version, downloader, on_runner_installed)\n\n def get_roms_path(self, machine=None):\n if not machine:\n machine = \"c64\"\n paths = {\n \"c64\": \"C64\",\n \"c128\": \"C128\",\n \"vic20\": \"VIC20\",\n \"pet\": \"PET\",\n \"plus4\": \"PLUS4\",\n \"cmbii\": \"CBM-II\"\n }\n root_dir = os.path.dirname(os.path.dirname(self.get_executable()))\n return os.path.join(root_dir, 'lib64/vice', paths[machine])\n\n def get_option_prefix(self, machine):\n prefixes = {\n 'c64': 'VICII',\n 'c128': 'VICII',\n 'vic20': 'VIC',\n 'pet': 'CRTC',\n 'plus4': 'TED',\n 'cmbii': 'CRTC'\n }\n return prefixes[machine]\n\n def get_joydevs(self, machine):\n joydevs = {\n 'c64': 2,\n 'c128': 2,\n 'vic20': 1,\n 'pet': 0,\n 'plus4': 2,\n 'cmbii': 0\n }\n return joydevs[machine]\n\n def get_rom_args(self, machine, rom):\n args = []\n\n if rom.endswith('.crt'):\n crt_option = {\n 'c64': \"-cartcrt\",\n 'c128': \"-cartcrt\",\n 'vic20': \"-cartgeneric\",\n 'pet': None,\n 'plus4': \"-cart\",\n 'cmbii': None,\n }\n if (crt_option[machine]):\n args.append(crt_option[machine])\n\n args.append(rom)\n return args\n\n def play(self):\n machine = self.runner_config.get(\"machine\")\n\n rom = self.game_config.get('main_file')\n if not rom:\n return {'error': 'CUSTOM', 'text': 'No rom provided'}\n if not os.path.exists(rom):\n return {'error': 'FILE_NOT_FOUND', 'file': rom}\n\n params = [self.get_executable(machine)]\n rom_dir = os.path.dirname(rom)\n params.append('-chdir')\n params.append(rom_dir)\n option_prefix = self.get_option_prefix(machine)\n\n if self.runner_config.get(\"fullscreen\"):\n params.append('-{}full'.format(option_prefix))\n\n if self.runner_config.get(\"double\"):\n params.append(\"-{}dsize\".format(option_prefix))\n\n if self.runner_config.get('renderer'):\n params.append('-sdl2renderer')\n params.append(self.runner_config['renderer'])\n\n if not self.runner_config.get('aspect_ratio', True):\n params.append('-sdlaspectmode')\n params.append('0')\n\n if self.runner_config.get('drivesound'):\n params.append('-drivesound')\n\n if self.runner_config.get(\"joy\"):\n for dev in range(self.get_joydevs(machine)):\n params += [\"-joydev{}\".format(dev + 1), \"4\"]\n\n params.extend(self.get_rom_args(machine, rom))\n return {'command': params}\n", "path": "lutris/runners/vice.py"}]} | 3,248 | 313 |
gh_patches_debug_32597 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-2401 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add version command/flag to the Insights CLI
Add `version` command and `--version` flag/arg to print version info from `insights.get_nvr()`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `insights/command_parser.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Command Parser module
4 ---------------------
5 Implements the ``insights`` command line. Each function is the first
6 argument followed by the function specific arguments. See USAGE text
7 below.
8 """
9 from __future__ import print_function
10
11 import argparse
12 import sys
13
14 USAGE = """insights <command> [<args>]
15 Available commands:
16 cat Execute a spec and show the output
17 collect Collect all specs against the client and create an Insights archive.
18 inspect Execute component and shell out to ipython for evaluation.
19 info View info and docs for Insights Core components.
20 ocpshell Interactive evaluation of archives, directories, or individual yaml files.
21 run Run insights-core against host or an archive.
22 """
23
24
25 class InsightsCli(object):
26 """
27 Class to implement the cli module.
28 Each command is called as a method of this class and all
29 arg parsing is performed in the separate module that
30 actually implements the command. the args "insights command"
31 are not passed to the submodule.
32 """
33
34 def __init__(self):
35 parser = argparse.ArgumentParser(
36 description="Insights Core command line execution",
37 usage=USAGE)
38 parser.add_argument('command', help='Insights Core command to run')
39 args = parser.parse_args(sys.argv[1:2])
40 if not hasattr(self, args.command):
41 print('Unrecognized command')
42 parser.print_help()
43 sys.exit(1)
44 # remove the sub-command arg from sys.argv
45 sys.argv.pop(1)
46 # Use dispatch pattern to execute command
47 getattr(self, args.command)()
48
49 def cat(self):
50 from .tools.cat import main as cat_main
51 cat_main()
52
53 def collect(self):
54 from .collect import main as collect_main
55 collect_main()
56
57 def info(self):
58 from .tools.query import main as query_main
59 query_main()
60
61 def inspect(self):
62 from .tools.insights_inspect import main as inspect_main
63 inspect_main()
64
65 def ocpshell(self):
66 from .ocpshell import main as ocpshell_main
67 ocpshell_main()
68
69 def run(self):
70 from insights import run
71 if "" not in sys.path:
72 sys.path.insert(0, "")
73 run(print_summary=True)
74
75
76 def fix_arg_dashes():
77
78 en_dash = '\u2013'
79 em_dash = '\u2014'
80
81 # replace unicode (en dash and em dash) dashes from argument definitions that may have been copy
82 # and pasted from another source
83 i = 1
84 for a in sys.argv[1:]:
85 first = list(a)
86 first[0] = first[0].replace(em_dash, "--").replace(en_dash, "-")
87 sys.argv[i] = "".join(first)
88 i += 1
89
90
91 def main():
92 fix_arg_dashes()
93 try:
94 InsightsCli()
95 except SystemExit:
96 raise
97 except BaseException as ex:
98 print(ex)
99
100
101 if __name__ == "__main__":
102 main()
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/insights/command_parser.py b/insights/command_parser.py
--- a/insights/command_parser.py
+++ b/insights/command_parser.py
@@ -19,6 +19,7 @@
info View info and docs for Insights Core components.
ocpshell Interactive evaluation of archives, directories, or individual yaml files.
run Run insights-core against host or an archive.
+ version Show Insights Core version information and exit.
"""
@@ -36,6 +37,9 @@
description="Insights Core command line execution",
usage=USAGE)
parser.add_argument('command', help='Insights Core command to run')
+ parser.add_argument('--version', action='store_true', help='show Insights Core version information and exit')
+ if self._parse_version_arg():
+ self.version()
args = parser.parse_args(sys.argv[1:2])
if not hasattr(self, args.command):
print('Unrecognized command')
@@ -46,6 +50,12 @@
# Use dispatch pattern to execute command
getattr(self, args.command)()
+ def _parse_version_arg(self):
+ """
+ Manually check for version argument/flag in cases when command is not provided.
+ """
+ return '--version' in sys.argv[1:3]
+
def cat(self):
from .tools.cat import main as cat_main
cat_main()
@@ -72,6 +82,14 @@
sys.path.insert(0, "")
run(print_summary=True)
+ def version(self):
+ """
+ Print version information (NVR) and exit.
+ """
+ from insights import get_nvr
+ print(get_nvr())
+ sys.exit()
+
def fix_arg_dashes():
| {"golden_diff": "diff --git a/insights/command_parser.py b/insights/command_parser.py\n--- a/insights/command_parser.py\n+++ b/insights/command_parser.py\n@@ -19,6 +19,7 @@\n info View info and docs for Insights Core components.\n ocpshell Interactive evaluation of archives, directories, or individual yaml files.\n run Run insights-core against host or an archive.\n+ version Show Insights Core version information and exit.\n \"\"\"\n \n \n@@ -36,6 +37,9 @@\n description=\"Insights Core command line execution\",\n usage=USAGE)\n parser.add_argument('command', help='Insights Core command to run')\n+ parser.add_argument('--version', action='store_true', help='show Insights Core version information and exit')\n+ if self._parse_version_arg():\n+ self.version()\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n@@ -46,6 +50,12 @@\n # Use dispatch pattern to execute command\n getattr(self, args.command)()\n \n+ def _parse_version_arg(self):\n+ \"\"\"\n+ Manually check for version argument/flag in cases when command is not provided.\n+ \"\"\"\n+ return '--version' in sys.argv[1:3]\n+\n def cat(self):\n from .tools.cat import main as cat_main\n cat_main()\n@@ -72,6 +82,14 @@\n sys.path.insert(0, \"\")\n run(print_summary=True)\n \n+ def version(self):\n+ \"\"\"\n+ Print version information (NVR) and exit.\n+ \"\"\"\n+ from insights import get_nvr\n+ print(get_nvr())\n+ sys.exit()\n+\n \n def fix_arg_dashes():\n", "issue": "Add version command/flag to the Insights CLI \nAdd `version` command and `--version` flag/arg to print version info from `insights.get_nvr()`. \n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nCommand Parser module\n---------------------\nImplements the ``insights`` command line. Each function is the first\nargument followed by the function specific arguments. See USAGE text\nbelow.\n\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nUSAGE = \"\"\"insights <command> [<args>]\nAvailable commands:\n cat Execute a spec and show the output\n collect Collect all specs against the client and create an Insights archive.\n inspect Execute component and shell out to ipython for evaluation.\n info View info and docs for Insights Core components.\n ocpshell Interactive evaluation of archives, directories, or individual yaml files.\n run Run insights-core against host or an archive.\n\"\"\"\n\n\nclass InsightsCli(object):\n \"\"\"\n Class to implement the cli module.\n Each command is called as a method of this class and all\n arg parsing is performed in the separate module that\n actually implements the command. the args \"insights command\"\n are not passed to the submodule.\n \"\"\"\n\n def __init__(self):\n parser = argparse.ArgumentParser(\n description=\"Insights Core command line execution\",\n usage=USAGE)\n parser.add_argument('command', help='Insights Core command to run')\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n sys.exit(1)\n # remove the sub-command arg from sys.argv\n sys.argv.pop(1)\n # Use dispatch pattern to execute command\n getattr(self, args.command)()\n\n def cat(self):\n from .tools.cat import main as cat_main\n cat_main()\n\n def collect(self):\n from .collect import main as collect_main\n collect_main()\n\n def info(self):\n from .tools.query import main as query_main\n query_main()\n\n def inspect(self):\n from .tools.insights_inspect import main as inspect_main\n inspect_main()\n\n def ocpshell(self):\n from .ocpshell import main as ocpshell_main\n ocpshell_main()\n\n def run(self):\n from insights import run\n if \"\" not in sys.path:\n sys.path.insert(0, \"\")\n run(print_summary=True)\n\n\ndef fix_arg_dashes():\n\n en_dash = '\\u2013'\n em_dash = '\\u2014'\n\n # replace unicode (en dash and em dash) dashes from argument definitions that may have been copy\n # and pasted from another source\n i = 1\n for a in sys.argv[1:]:\n first = list(a)\n first[0] = first[0].replace(em_dash, \"--\").replace(en_dash, \"-\")\n sys.argv[i] = \"\".join(first)\n i += 1\n\n\ndef main():\n fix_arg_dashes()\n try:\n InsightsCli()\n except SystemExit:\n raise\n except BaseException as ex:\n print(ex)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "insights/command_parser.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nCommand Parser module\n---------------------\nImplements the ``insights`` command line. Each function is the first\nargument followed by the function specific arguments. See USAGE text\nbelow.\n\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport sys\n\nUSAGE = \"\"\"insights <command> [<args>]\nAvailable commands:\n cat Execute a spec and show the output\n collect Collect all specs against the client and create an Insights archive.\n inspect Execute component and shell out to ipython for evaluation.\n info View info and docs for Insights Core components.\n ocpshell Interactive evaluation of archives, directories, or individual yaml files.\n run Run insights-core against host or an archive.\n version Show Insights Core version information and exit.\n\"\"\"\n\n\nclass InsightsCli(object):\n \"\"\"\n Class to implement the cli module.\n Each command is called as a method of this class and all\n arg parsing is performed in the separate module that\n actually implements the command. the args \"insights command\"\n are not passed to the submodule.\n \"\"\"\n\n def __init__(self):\n parser = argparse.ArgumentParser(\n description=\"Insights Core command line execution\",\n usage=USAGE)\n parser.add_argument('command', help='Insights Core command to run')\n parser.add_argument('--version', action='store_true', help='show Insights Core version information and exit')\n if self._parse_version_arg():\n self.version()\n args = parser.parse_args(sys.argv[1:2])\n if not hasattr(self, args.command):\n print('Unrecognized command')\n parser.print_help()\n sys.exit(1)\n # remove the sub-command arg from sys.argv\n sys.argv.pop(1)\n # Use dispatch pattern to execute command\n getattr(self, args.command)()\n\n def _parse_version_arg(self):\n \"\"\"\n Manually check for version argument/flag in cases when command is not provided.\n \"\"\"\n return '--version' in sys.argv[1:3]\n\n def cat(self):\n from .tools.cat import main as cat_main\n cat_main()\n\n def collect(self):\n from .collect import main as collect_main\n collect_main()\n\n def info(self):\n from .tools.query import main as query_main\n query_main()\n\n def inspect(self):\n from .tools.insights_inspect import main as inspect_main\n inspect_main()\n\n def ocpshell(self):\n from .ocpshell import main as ocpshell_main\n ocpshell_main()\n\n def run(self):\n from insights import run\n if \"\" not in sys.path:\n sys.path.insert(0, \"\")\n run(print_summary=True)\n\n def version(self):\n \"\"\"\n Print version information (NVR) and exit.\n \"\"\"\n from insights import get_nvr\n print(get_nvr())\n sys.exit()\n\n\ndef fix_arg_dashes():\n\n en_dash = '\\u2013'\n em_dash = '\\u2014'\n\n # replace unicode (en dash and em dash) dashes from argument definitions that may have been copy\n # and pasted from another source\n i = 1\n for a in sys.argv[1:]:\n first = list(a)\n first[0] = first[0].replace(em_dash, \"--\").replace(en_dash, \"-\")\n sys.argv[i] = \"\".join(first)\n i += 1\n\n\ndef main():\n fix_arg_dashes()\n try:\n InsightsCli()\n except SystemExit:\n raise\n except BaseException as ex:\n print(ex)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "insights/command_parser.py"}]} | 1,161 | 392 |
gh_patches_debug_865 | rasdani/github-patches | git_diff | kubeflow__pipelines-1666 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`pip install kfp` does not install CLI
**What happened:**
```
$ virtualenv .venv
...
$ pip install kfp==0.1.23
...
$ kfp
Traceback (most recent call last):
File "/private/tmp/.venv/bin/kfp", line 6, in <module>
from kfp.__main__ import main
File "/private/tmp/.venv/lib/python3.7/site-packages/kfp/__main__.py", line 15, in <module>
from .cli.cli import main
ModuleNotFoundError: No module named 'kfp.cli'
```
**What did you expect to happen:**
To run the CLI.
**Anything else you would like to add:**
I could be confused about what is expected to be available after installing the kfp package from pip - setup.py mentions an entrypoint named kfp in
https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/setup.py#L74
but main.py imports a `kfp.cli` package https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/kfp/__main__.py#L15
which is not included in the distribution https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/setup.py#L46-L54
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup
16
17 NAME = 'kfp'
18 VERSION = '0.1.24'
19
20 REQUIRES = [
21 'urllib3>=1.15,<1.25', #Fixing the version conflict with the "requests" package
22 'six >= 1.10',
23 'certifi',
24 'python-dateutil',
25 'PyYAML',
26 'google-cloud-storage>=1.13.0',
27 'kubernetes>=8.0.0, <=9.0.0',
28 'PyJWT>=1.6.4',
29 'cryptography>=2.4.2',
30 'google-auth>=1.6.1',
31 'requests_toolbelt>=0.8.0',
32 'cloudpickle',
33 'kfp-server-api >= 0.1.18, < 0.1.19', #Update the upper version whenever a new version of the kfp-server-api package is released. Update the lower version when there is a breaking change in kfp-server-api.
34 'argo-models == 2.2.1a', #2.2.1a is equivalent to argo 2.2.1
35 'jsonschema >= 3.0.1',
36 'tabulate == 0.8.3',
37 'click == 7.0'
38 ]
39
40 setup(
41 name=NAME,
42 version=VERSION,
43 description='KubeFlow Pipelines SDK',
44 author='google',
45 install_requires=REQUIRES,
46 packages=[
47 'kfp',
48 'kfp.compiler',
49 'kfp.components',
50 'kfp.components.structures',
51 'kfp.components.structures.kubernetes',
52 'kfp.dsl',
53 'kfp.notebook',
54 ],
55 classifiers=[
56 'Intended Audience :: Developers',
57 'Intended Audience :: Education',
58 'Intended Audience :: Science/Research',
59 'License :: OSI Approved :: Apache Software License',
60 'Programming Language :: Python :: 3',
61 'Programming Language :: Python :: 3.5',
62 'Programming Language :: Python :: 3.6',
63 'Programming Language :: Python :: 3.7',
64 'Topic :: Scientific/Engineering',
65 'Topic :: Scientific/Engineering :: Artificial Intelligence',
66 'Topic :: Software Development',
67 'Topic :: Software Development :: Libraries',
68 'Topic :: Software Development :: Libraries :: Python Modules',
69 ],
70 python_requires='>=3.5.3',
71 include_package_data=True,
72 entry_points={'console_scripts': [
73 'dsl-compile = kfp.compiler.main:main',
74 'kfp=kfp.__main__:main']})
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -45,6 +45,7 @@
install_requires=REQUIRES,
packages=[
'kfp',
+ 'kfp.cli',
'kfp.compiler',
'kfp.components',
'kfp.components.structures',
| {"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -45,6 +45,7 @@\n install_requires=REQUIRES,\n packages=[\n 'kfp',\n+ 'kfp.cli',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n", "issue": "`pip install kfp` does not install CLI\n**What happened:**\r\n\r\n```\r\n$ virtualenv .venv\r\n...\r\n$ pip install kfp==0.1.23\r\n...\r\n$ kfp\r\nTraceback (most recent call last):\r\n File \"/private/tmp/.venv/bin/kfp\", line 6, in <module>\r\n from kfp.__main__ import main\r\n File \"/private/tmp/.venv/lib/python3.7/site-packages/kfp/__main__.py\", line 15, in <module>\r\n from .cli.cli import main\r\nModuleNotFoundError: No module named 'kfp.cli'\r\n```\r\n\r\n**What did you expect to happen:**\r\nTo run the CLI.\r\n\r\n**Anything else you would like to add:**\r\nI could be confused about what is expected to be available after installing the kfp package from pip - setup.py mentions an entrypoint named kfp in \r\n\r\nhttps://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/setup.py#L74\r\n\r\nbut main.py imports a `kfp.cli` package https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/kfp/__main__.py#L15\r\n\r\nwhich is not included in the distribution https://github.com/kubeflow/pipelines/blob/812ca7f8836c47039c3b1f3daf23e68fbcee1a92/sdk/python/setup.py#L46-L54\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup\n\nNAME = 'kfp'\nVERSION = '0.1.24'\n\nREQUIRES = [\n 'urllib3>=1.15,<1.25', #Fixing the version conflict with the \"requests\" package\n 'six >= 1.10',\n 'certifi',\n 'python-dateutil',\n 'PyYAML',\n 'google-cloud-storage>=1.13.0',\n 'kubernetes>=8.0.0, <=9.0.0',\n 'PyJWT>=1.6.4',\n 'cryptography>=2.4.2',\n 'google-auth>=1.6.1',\n 'requests_toolbelt>=0.8.0',\n 'cloudpickle',\n 'kfp-server-api >= 0.1.18, < 0.1.19', #Update the upper version whenever a new version of the kfp-server-api package is released. Update the lower version when there is a breaking change in kfp-server-api.\n 'argo-models == 2.2.1a', #2.2.1a is equivalent to argo 2.2.1\n 'jsonschema >= 3.0.1',\n 'tabulate == 0.8.3',\n 'click == 7.0'\n]\n\nsetup(\n name=NAME,\n version=VERSION,\n description='KubeFlow Pipelines SDK',\n author='google',\n install_requires=REQUIRES,\n packages=[\n 'kfp',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.components.structures.kubernetes',\n 'kfp.dsl',\n 'kfp.notebook',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.5.3',\n include_package_data=True,\n entry_points={'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'kfp=kfp.__main__:main']})\n", "path": "sdk/python/setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup\n\nNAME = 'kfp'\nVERSION = '0.1.24'\n\nREQUIRES = [\n 'urllib3>=1.15,<1.25', #Fixing the version conflict with the \"requests\" package\n 'six >= 1.10',\n 'certifi',\n 'python-dateutil',\n 'PyYAML',\n 'google-cloud-storage>=1.13.0',\n 'kubernetes>=8.0.0, <=9.0.0',\n 'PyJWT>=1.6.4',\n 'cryptography>=2.4.2',\n 'google-auth>=1.6.1',\n 'requests_toolbelt>=0.8.0',\n 'cloudpickle',\n 'kfp-server-api >= 0.1.18, < 0.1.19', #Update the upper version whenever a new version of the kfp-server-api package is released. Update the lower version when there is a breaking change in kfp-server-api.\n 'argo-models == 2.2.1a', #2.2.1a is equivalent to argo 2.2.1\n 'jsonschema >= 3.0.1',\n 'tabulate == 0.8.3',\n 'click == 7.0'\n]\n\nsetup(\n name=NAME,\n version=VERSION,\n description='KubeFlow Pipelines SDK',\n author='google',\n install_requires=REQUIRES,\n packages=[\n 'kfp',\n 'kfp.cli',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.components.structures.kubernetes',\n 'kfp.dsl',\n 'kfp.notebook',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.5.3',\n include_package_data=True,\n entry_points={'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'kfp=kfp.__main__:main']})\n", "path": "sdk/python/setup.py"}]} | 1,474 | 82 |
gh_patches_debug_48074 | rasdani/github-patches | git_diff | OpenMined__PySyft-1792 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modify documentation generation code to use napoleon
Napoleon https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html allows for us to use google style documentation with sphinx. This issue enables #1784
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # PySyft documentation build configuration file, created by
5 # sphinx-quickstart on Sat Jun 2 20:20:03 2018.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19
20 #
21 import os
22 import sys
23
24 sys.path.insert(0, os.path.abspath("../.."))
25
26
27 # -- General configuration ------------------------------------------------
28
29 # If your documentation needs a minimal Sphinx version, state it here.
30 #
31 # needs_sphinx = '1.0'
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 "sphinx.ext.autodoc",
38 "sphinx.ext.todo",
39 "sphinx.ext.coverage",
40 "sphinx.ext.mathjax",
41 "sphinx.ext.viewcode",
42 "sphinx.ext.githubpages",
43 "sphinx.ext.autosummary",
44 ]
45
46 # Add any paths that contain templates here, relative to this directory.
47 templates_path = ["_templates"]
48
49 # The suffix(es) of source filenames.
50 # You can specify multiple suffix as a list of string:
51 #
52 # source_suffix = ['.rst', '.md']
53 source_suffix = ".rst"
54
55 # The master toctree document.
56 master_doc = "index"
57
58 # General information about the project.
59 project = "PySyft"
60 copyright = "2018, OpenMined Contributors"
61 author = "Andrew Trask"
62
63 # The version info for the project you're documenting, acts as replacement for
64 # |version| and |release|, also used in various other places throughout the
65 # built documents.
66 #
67 # The short X.Y version.
68 version = "0.1"
69 # The full version, including alpha/beta/rc tags.
70 release = "0.1"
71
72 # The language for content autogenerated by Sphinx. Refer to documentation
73 # for a list of supported languages.
74 #
75 # This is also used if you do content translation via gettext catalogs.
76 # Usually you set "language" from the command line for these cases.
77 language = "python"
78
79 # List of patterns, relative to source directory, that match files and
80 # directories to ignore when looking for source files.
81 # This patterns also effect to html_static_path and html_extra_path
82 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
83
84 # The name of the Pygments (syntax highlighting) style to use.
85 pygments_style = "sphinx"
86
87 # If true, `todo` and `todoList` produce output, else they produce nothing.
88 todo_include_todos = True
89
90 autosummary_generate = True
91
92
93 # -- Options for HTML output ----------------------------------------------
94
95 # The theme to use for HTML and HTML Help pages. See the documentation for
96 # a list of builtin themes.
97 #
98 html_theme = "sphinx_rtd_theme"
99 html_logo = "_static/img/logo.png"
100
101 # Theme options are theme-specific and customize the look and feel of a theme
102 # further. For a list of options available for each theme, see the
103 # documentation.
104 #
105 # html_theme_options = {}
106
107 # Add any paths that contain custom static files (such as style sheets) here,
108 # relative to this directory. They are copied after the builtin static files,
109 # so a file named "default.css" will overwrite the builtin "default.css".
110 html_static_path = ["_static"]
111
112
113 # -- Options for HTMLHelp output ------------------------------------------
114
115 # Output file base name for HTML help builder.
116 htmlhelp_basename = "PySyftdoc"
117
118 html_context = {"css_files": ["_static/css/PySyft_docs.css"]}
119
120 favicon = "_static/img/logo.png"
121
122
123 # -- Options for LaTeX output ---------------------------------------------
124
125 latex_elements = {
126 # The paper size ('letterpaper' or 'a4paper').
127 #
128 # 'papersize': 'letterpaper',
129 # The font size ('10pt', '11pt' or '12pt').
130 #
131 # 'pointsize': '10pt',
132 # Additional stuff for the LaTeX preamble.
133 #
134 # 'preamble': '',
135 # Latex figure (float) alignment
136 #
137 # 'figure_align': 'htbp',
138 }
139
140 # Grouping the document tree into LaTeX files. List of tuples
141 # (source start file, target name, title,
142 # author, documentclass [howto, manual, or own class]).
143 latex_documents = [(master_doc, "PySyft.tex", "PySyft Documentation", "Andrew Trask", "manual")]
144
145
146 # -- Options for manual page output ---------------------------------------
147
148 # One entry per manual page. List of tuples
149 # (source start file, name, description, authors, manual section).
150 man_pages = [(master_doc, "pysyft", "PySyft Documentation", [author], 1)]
151
152
153 # -- Options for Texinfo output -------------------------------------------
154
155 # Grouping the document tree into Texinfo files. List of tuples
156 # (source start file, target name, title, author,
157 # dir menu entry, description, category)
158 texinfo_documents = [
159 (
160 master_doc,
161 "PySyft",
162 "PySyft Documentation",
163 author,
164 "PySyft",
165 "One line description of project.",
166 "Miscellaneous",
167 )
168 ]
169
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -35,12 +35,13 @@
# ones.
extensions = [
"sphinx.ext.autodoc",
- "sphinx.ext.todo",
+ "sphinx.ext.autosummary",
"sphinx.ext.coverage",
+ "sphinx.ext.githubpages",
"sphinx.ext.mathjax",
+ "sphinx.ext.napoleon",
+ "sphinx.ext.todo",
"sphinx.ext.viewcode",
- "sphinx.ext.githubpages",
- "sphinx.ext.autosummary",
]
# Add any paths that contain templates here, relative to this directory.
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -35,12 +35,13 @@\n # ones.\n extensions = [\n \"sphinx.ext.autodoc\",\n- \"sphinx.ext.todo\",\n+ \"sphinx.ext.autosummary\",\n \"sphinx.ext.coverage\",\n+ \"sphinx.ext.githubpages\",\n \"sphinx.ext.mathjax\",\n+ \"sphinx.ext.napoleon\",\n+ \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n- \"sphinx.ext.githubpages\",\n- \"sphinx.ext.autosummary\",\n ]\n \n # Add any paths that contain templates here, relative to this directory.\n", "issue": "Modify documentation generation code to use napoleon\nNapoleon https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html allows for us to use google style documentation with sphinx. This issue enables #1784 \n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# PySyft documentation build configuration file, created by\n# sphinx-quickstart on Sat Jun 2 20:20:03 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.githubpages\",\n \"sphinx.ext.autosummary\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"PySyft\"\ncopyright = \"2018, OpenMined Contributors\"\nauthor = \"Andrew Trask\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"0.1\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"0.1\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"python\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\nautosummary_generate = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_logo = \"_static/img/logo.png\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"PySyftdoc\"\n\nhtml_context = {\"css_files\": [\"_static/css/PySyft_docs.css\"]}\n\nfavicon = \"_static/img/logo.png\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [(master_doc, \"PySyft.tex\", \"PySyft Documentation\", \"Andrew Trask\", \"manual\")]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pysyft\", \"PySyft Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"PySyft\",\n \"PySyft Documentation\",\n author,\n \"PySyft\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n", "path": "docs/conf.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# PySyft documentation build configuration file, created by\n# sphinx-quickstart on Sat Jun 2 20:20:03 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.githubpages\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"PySyft\"\ncopyright = \"2018, OpenMined Contributors\"\nauthor = \"Andrew Trask\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \"0.1\"\n# The full version, including alpha/beta/rc tags.\nrelease = \"0.1\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"python\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\nautosummary_generate = True\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_logo = \"_static/img/logo.png\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"PySyftdoc\"\n\nhtml_context = {\"css_files\": [\"_static/css/PySyft_docs.css\"]}\n\nfavicon = \"_static/img/logo.png\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [(master_doc, \"PySyft.tex\", \"PySyft Documentation\", \"Andrew Trask\", \"manual\")]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pysyft\", \"PySyft Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"PySyft\",\n \"PySyft Documentation\",\n author,\n \"PySyft\",\n \"One line description of project.\",\n \"Miscellaneous\",\n )\n]\n", "path": "docs/conf.py"}]} | 1,952 | 159 |
gh_patches_debug_3094 | rasdani/github-patches | git_diff | streamlit__streamlit-2611 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
st.expander cuts off sliders
# Summary
Expander in sidebar cuts off sliders and potentially other stuff
# Steps to reproduce
Run the following code
```
import streamlit as st
sidebar_expander = st.sidebar.beta_expander("Bad sliders underneath!")
with sidebar_expander:
st.slider("Bad layout slider 1", 0, 100, value=0)
st.slider("Bad layout slider 2", 0, 100, value=(0,100))
st.sidebar.slider("Good layout slider")
```
## Expected behavior:
Sliders appear nicely under the expander
## Actual behavior:
Sliders ends are cut off under the expander (see picture)

## Is this a regression?
No
# Debug info
- Streamlit version: 0.69.1
- Python version: 3.8.2
- Using PipEnv
- OS version: Windows 10
- Browser version: Firefox and Chrome, recent versions
# Additional information
May also happen in containers outside of sidebar? Have not checked it though..
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `e2e/scripts/st_expander.py`
Content:
```
1 # Copyright 2018-2020 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 container = st.beta_container()
18 container.write("I cannot collapse")
19
20 expander = st.beta_expander("Collapse me!", expanded=True)
21 expander.write("I can collapse")
22
23 collapsed = st.beta_expander("Expand me!")
24 collapsed.write("I am already collapsed")
25
26 sidebar = st.sidebar.beta_expander("Expand me!")
27 sidebar.write("I am in the sidebar")
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/e2e/scripts/st_expander.py b/e2e/scripts/st_expander.py
--- a/e2e/scripts/st_expander.py
+++ b/e2e/scripts/st_expander.py
@@ -19,6 +19,8 @@
expander = st.beta_expander("Collapse me!", expanded=True)
expander.write("I can collapse")
+expander.slider("I don't get cut off")
+expander.button("I'm also not cut off (while focused)")
collapsed = st.beta_expander("Expand me!")
collapsed.write("I am already collapsed")
| {"golden_diff": "diff --git a/e2e/scripts/st_expander.py b/e2e/scripts/st_expander.py\n--- a/e2e/scripts/st_expander.py\n+++ b/e2e/scripts/st_expander.py\n@@ -19,6 +19,8 @@\n \n expander = st.beta_expander(\"Collapse me!\", expanded=True)\n expander.write(\"I can collapse\")\n+expander.slider(\"I don't get cut off\")\n+expander.button(\"I'm also not cut off (while focused)\")\n \n collapsed = st.beta_expander(\"Expand me!\")\n collapsed.write(\"I am already collapsed\")\n", "issue": "st.expander cuts off sliders\n# Summary\r\n\r\nExpander in sidebar cuts off sliders and potentially other stuff\r\n\r\n# Steps to reproduce\r\nRun the following code\r\n```\r\nimport streamlit as st\r\n\r\nsidebar_expander = st.sidebar.beta_expander(\"Bad sliders underneath!\")\r\nwith sidebar_expander:\r\n st.slider(\"Bad layout slider 1\", 0, 100, value=0)\r\n st.slider(\"Bad layout slider 2\", 0, 100, value=(0,100))\r\n\r\nst.sidebar.slider(\"Good layout slider\")\r\n```\r\n## Expected behavior:\r\n\r\nSliders appear nicely under the expander\r\n\r\n## Actual behavior:\r\n\r\nSliders ends are cut off under the expander (see picture)\r\n\r\n\r\n\r\n\r\n## Is this a regression?\r\n\r\nNo\r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.69.1\r\n- Python version: 3.8.2\r\n- Using PipEnv\r\n- OS version: Windows 10\r\n- Browser version: Firefox and Chrome, recent versions\r\n\r\n# Additional information\r\n\r\nMay also happen in containers outside of sidebar? Have not checked it though..\n", "before_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\ncontainer = st.beta_container()\ncontainer.write(\"I cannot collapse\")\n\nexpander = st.beta_expander(\"Collapse me!\", expanded=True)\nexpander.write(\"I can collapse\")\n\ncollapsed = st.beta_expander(\"Expand me!\")\ncollapsed.write(\"I am already collapsed\")\n\nsidebar = st.sidebar.beta_expander(\"Expand me!\")\nsidebar.write(\"I am in the sidebar\")\n", "path": "e2e/scripts/st_expander.py"}], "after_files": [{"content": "# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\ncontainer = st.beta_container()\ncontainer.write(\"I cannot collapse\")\n\nexpander = st.beta_expander(\"Collapse me!\", expanded=True)\nexpander.write(\"I can collapse\")\nexpander.slider(\"I don't get cut off\")\nexpander.button(\"I'm also not cut off (while focused)\")\n\ncollapsed = st.beta_expander(\"Expand me!\")\ncollapsed.write(\"I am already collapsed\")\n\nsidebar = st.sidebar.beta_expander(\"Expand me!\")\nsidebar.write(\"I am in the sidebar\")\n", "path": "e2e/scripts/st_expander.py"}]} | 821 | 125 |
gh_patches_debug_4371 | rasdani/github-patches | git_diff | beeware__toga-2475 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Winforms stack trace dialog buttons are unreliable
### Steps to reproduce
Open the `dialogs` example, and click "Stack Trace". The dialog appears, and then 1 second later, this appears in the console:
```
Task was destroyed but it is pending!
task: <Task pending name='Task-1' coro=<handler_with_cleanup() done, defined at C:\Users\smith\cygwin\git\beeware\toga\core\src\toga\handlers.py:36> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x000002612A605880>()]>>
```
Click the OK button. Nothing happens.
Click the close button. The dialog closes.
This only happens the first time you open the dialog in a process. In all subsequent dialogs, the OK button closes the dialog but the close button does not.
### Expected behavior
Based on the code, it looks like the intention is that the OK button closes the dialog but the close button does not.
### Environment
- Operating System: Windows 10
- Python version: 3.8
- Software versions:
- Toga: 4a27e6e79b39aa3120ee255b2eec1da27bdf30be
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `winforms/src/toga_winforms/dialogs.py`
Content:
```
1 import asyncio
2 from abc import ABC
3 from pathlib import Path
4
5 import System.Windows.Forms as WinForms
6 from System.Drawing import (
7 ContentAlignment,
8 Font as WinFont,
9 FontFamily,
10 FontStyle,
11 SystemFonts,
12 )
13 from System.Windows.Forms import DialogResult, MessageBoxButtons, MessageBoxIcon
14
15 from .libs.wrapper import WeakrefCallable
16
17
18 class BaseDialog(ABC):
19 def __init__(self, interface):
20 self.interface = interface
21 self.interface._impl = self
22
23 # See libs/proactor.py
24 def start_inner_loop(self, callback, *args):
25 asyncio.get_event_loop().start_inner_loop(callback, *args)
26
27 def set_result(self, result):
28 self.interface.set_result(result)
29
30
31 class MessageDialog(BaseDialog):
32 def __init__(
33 self,
34 interface,
35 title,
36 message,
37 buttons,
38 icon,
39 success_result=None,
40 ):
41 super().__init__(interface)
42
43 def show():
44 return_value = WinForms.MessageBox.Show(message, title, buttons, icon)
45 if success_result:
46 self.set_result(return_value == success_result)
47 else:
48 self.set_result(None)
49
50 self.start_inner_loop(show)
51
52
53 class InfoDialog(MessageDialog):
54 def __init__(self, interface, title, message):
55 super().__init__(
56 interface,
57 title,
58 message,
59 MessageBoxButtons.OK,
60 MessageBoxIcon.Information,
61 )
62
63
64 class QuestionDialog(MessageDialog):
65 def __init__(self, interface, title, message):
66 super().__init__(
67 interface,
68 title,
69 message,
70 MessageBoxButtons.YesNo,
71 MessageBoxIcon.Information,
72 success_result=DialogResult.Yes,
73 )
74
75
76 class ConfirmDialog(MessageDialog):
77 def __init__(self, interface, title, message):
78 super().__init__(
79 interface,
80 title,
81 message,
82 MessageBoxButtons.OKCancel,
83 MessageBoxIcon.Warning,
84 success_result=DialogResult.OK,
85 )
86
87
88 class ErrorDialog(MessageDialog):
89 def __init__(self, interface, title, message=None):
90 super().__init__(
91 interface,
92 title,
93 message,
94 WinForms.MessageBoxButtons.OK,
95 WinForms.MessageBoxIcon.Error,
96 )
97
98
99 class StackTraceDialog(BaseDialog):
100 def __init__(self, interface, title, message, content, retry):
101 super().__init__(interface)
102
103 self.native = WinForms.Form()
104 self.native.MinimizeBox = False
105 self.native.FormBorderStyle = self.native.FormBorderStyle.FixedSingle
106 self.native.MaximizeBox = False
107 self.native.FormClosing += WeakrefCallable(self.winforms_FormClosing)
108 self.native.Width = 540
109 self.native.Height = 320
110 self.native.Text = title
111
112 # The top-of-page introductory message
113 textLabel = WinForms.Label()
114 textLabel.Left = 10
115 textLabel.Top = 10
116 textLabel.Width = 520
117 textLabel.Alignment = ContentAlignment.MiddleCenter
118 textLabel.Text = message
119
120 self.native.Controls.Add(textLabel)
121
122 # A scrolling text box for the stack trace.
123 trace = WinForms.RichTextBox()
124 trace.Left = 10
125 trace.Top = 30
126 trace.Width = 504
127 trace.Height = 210
128 trace.Multiline = True
129 trace.ReadOnly = True
130 trace.Font = WinFont(
131 FontFamily.GenericMonospace,
132 float(SystemFonts.DefaultFont.Size),
133 FontStyle.Regular,
134 )
135 trace.Text = content
136
137 self.native.Controls.Add(trace)
138
139 # Add acceptance/close buttons
140 if retry:
141 retry = WinForms.Button()
142 retry.Left = 290
143 retry.Top = 250
144 retry.Width = 100
145 retry.Text = "&Retry"
146 retry.Click += WeakrefCallable(self.winforms_Click_retry)
147
148 self.native.Controls.Add(retry)
149
150 quit = WinForms.Button()
151 quit.Left = 400
152 quit.Top = 250
153 quit.Width = 100
154 quit.Text = "&Quit"
155 quit.Click += WeakrefCallable(self.winforms_Click_quit)
156
157 self.native.Controls.Add(quit)
158 else:
159 accept = WinForms.Button()
160 accept.Left = 400
161 accept.Top = 250
162 accept.Width = 100
163 accept.Text = "&OK"
164 accept.Click += WeakrefCallable(self.winforms_Click_accept)
165
166 self.native.Controls.Add(accept)
167
168 self.start_inner_loop(self.native.ShowDialog)
169
170 def winforms_FormClosing(self, sender, event):
171 # If the close button is pressed, there won't be a future yet.
172 # We cancel this event to prevent the dialog from closing.
173 # If a button is pressed, the future will be set, and a close
174 # event will be triggered.
175 try:
176 self.interface.future.result()
177 except asyncio.InvalidStateError: # pragma: no cover
178 event.Cancel = True
179
180 def set_result(self, result):
181 super().set_result(result)
182 self.native.Close()
183
184 def winforms_Click_quit(self, sender, event):
185 self.set_result(False)
186
187 def winforms_Click_retry(self, sender, event):
188 self.set_result(True)
189
190 def winforms_Click_accept(self, sender, event):
191 self.set_result(None)
192
193
194 class FileDialog(BaseDialog):
195 def __init__(
196 self,
197 native,
198 interface,
199 title,
200 initial_directory,
201 *,
202 filename=None,
203 file_types=None,
204 ):
205 super().__init__(interface)
206 self.native = native
207
208 self._set_title(title)
209 if filename is not None:
210 native.FileName = filename
211
212 if initial_directory is not None:
213 self._set_initial_directory(str(initial_directory))
214
215 if file_types is not None:
216 filters = [f"{ext} files (*.{ext})|*.{ext}" for ext in file_types] + [
217 "All files (*.*)|*.*"
218 ]
219
220 if len(file_types) > 1:
221 pattern = ";".join([f"*.{ext}" for ext in file_types])
222 filters.insert(0, f"All matching files ({pattern})|{pattern}")
223
224 native.Filter = "|".join(filters)
225
226 def show():
227 response = native.ShowDialog()
228 if response == DialogResult.OK:
229 self.set_result(self._get_filenames())
230 else:
231 self.set_result(None)
232
233 self.start_inner_loop(show)
234
235 def _set_title(self, title):
236 self.native.Title = title
237
238 def _set_initial_directory(self, initial_directory):
239 self.native.InitialDirectory = initial_directory
240
241
242 class SaveFileDialog(FileDialog):
243 def __init__(self, interface, title, filename, initial_directory, file_types):
244 super().__init__(
245 WinForms.SaveFileDialog(),
246 interface,
247 title,
248 initial_directory,
249 filename=filename,
250 file_types=file_types,
251 )
252
253 def _get_filenames(self):
254 return Path(self.native.FileName)
255
256
257 class OpenFileDialog(FileDialog):
258 def __init__(
259 self,
260 interface,
261 title,
262 initial_directory,
263 file_types,
264 multiple_select,
265 ):
266 super().__init__(
267 WinForms.OpenFileDialog(),
268 interface,
269 title,
270 initial_directory,
271 file_types=file_types,
272 )
273 if multiple_select:
274 self.native.Multiselect = True
275
276 def _get_filenames(self):
277 if self.native.Multiselect:
278 return [Path(filename) for filename in self.native.FileNames]
279 else:
280 return Path(self.native.FileName)
281
282
283 class SelectFolderDialog(FileDialog):
284 def __init__(self, interface, title, initial_directory, multiple_select):
285 super().__init__(
286 WinForms.FolderBrowserDialog(),
287 interface,
288 title,
289 initial_directory,
290 )
291
292 # The native dialog doesn't support multiple selection, so the only effect
293 # this has is to change whether we return a list.
294 self.multiple_select = multiple_select
295
296 def _get_filenames(self):
297 filename = Path(self.native.SelectedPath)
298 return [filename] if self.multiple_select else filename
299
300 def _set_title(self, title):
301 self.native.Description = title
302
303 def _set_initial_directory(self, initial_directory):
304 self.native.SelectedPath = initial_directory
305
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/winforms/src/toga_winforms/dialogs.py b/winforms/src/toga_winforms/dialogs.py
--- a/winforms/src/toga_winforms/dialogs.py
+++ b/winforms/src/toga_winforms/dialogs.py
@@ -165,7 +165,10 @@
self.native.Controls.Add(accept)
- self.start_inner_loop(self.native.ShowDialog)
+ def show():
+ self.native.ShowDialog()
+
+ self.start_inner_loop(show)
def winforms_FormClosing(self, sender, event):
# If the close button is pressed, there won't be a future yet.
| {"golden_diff": "diff --git a/winforms/src/toga_winforms/dialogs.py b/winforms/src/toga_winforms/dialogs.py\n--- a/winforms/src/toga_winforms/dialogs.py\n+++ b/winforms/src/toga_winforms/dialogs.py\n@@ -165,7 +165,10 @@\n \n self.native.Controls.Add(accept)\n \n- self.start_inner_loop(self.native.ShowDialog)\n+ def show():\n+ self.native.ShowDialog()\n+\n+ self.start_inner_loop(show)\n \n def winforms_FormClosing(self, sender, event):\n # If the close button is pressed, there won't be a future yet.\n", "issue": "Winforms stack trace dialog buttons are unreliable\n### Steps to reproduce\r\n\r\nOpen the `dialogs` example, and click \"Stack Trace\". The dialog appears, and then 1 second later, this appears in the console:\r\n```\r\nTask was destroyed but it is pending!\r\ntask: <Task pending name='Task-1' coro=<handler_with_cleanup() done, defined at C:\\Users\\smith\\cygwin\\git\\beeware\\toga\\core\\src\\toga\\handlers.py:36> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x000002612A605880>()]>>\r\n```\r\n\r\nClick the OK button. Nothing happens.\r\n\r\nClick the close button. The dialog closes.\r\n\r\nThis only happens the first time you open the dialog in a process. In all subsequent dialogs, the OK button closes the dialog but the close button does not.\r\n\r\n### Expected behavior\r\n\r\nBased on the code, it looks like the intention is that the OK button closes the dialog but the close button does not.\r\n\r\n### Environment\r\n\r\n- Operating System: Windows 10\r\n- Python version: 3.8\r\n- Software versions:\r\n - Toga: 4a27e6e79b39aa3120ee255b2eec1da27bdf30be\n", "before_files": [{"content": "import asyncio\nfrom abc import ABC\nfrom pathlib import Path\n\nimport System.Windows.Forms as WinForms\nfrom System.Drawing import (\n ContentAlignment,\n Font as WinFont,\n FontFamily,\n FontStyle,\n SystemFonts,\n)\nfrom System.Windows.Forms import DialogResult, MessageBoxButtons, MessageBoxIcon\n\nfrom .libs.wrapper import WeakrefCallable\n\n\nclass BaseDialog(ABC):\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n # See libs/proactor.py\n def start_inner_loop(self, callback, *args):\n asyncio.get_event_loop().start_inner_loop(callback, *args)\n\n def set_result(self, result):\n self.interface.set_result(result)\n\n\nclass MessageDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n message,\n buttons,\n icon,\n success_result=None,\n ):\n super().__init__(interface)\n\n def show():\n return_value = WinForms.MessageBox.Show(message, title, buttons, icon)\n if success_result:\n self.set_result(return_value == success_result)\n else:\n self.set_result(None)\n\n self.start_inner_loop(show)\n\n\nclass InfoDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface,\n title,\n message,\n MessageBoxButtons.OK,\n MessageBoxIcon.Information,\n )\n\n\nclass QuestionDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface,\n title,\n message,\n MessageBoxButtons.YesNo,\n MessageBoxIcon.Information,\n success_result=DialogResult.Yes,\n )\n\n\nclass ConfirmDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface,\n title,\n message,\n MessageBoxButtons.OKCancel,\n MessageBoxIcon.Warning,\n success_result=DialogResult.OK,\n )\n\n\nclass ErrorDialog(MessageDialog):\n def __init__(self, interface, title, message=None):\n super().__init__(\n interface,\n title,\n message,\n WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error,\n )\n\n\nclass StackTraceDialog(BaseDialog):\n def __init__(self, interface, title, message, content, retry):\n super().__init__(interface)\n\n self.native = WinForms.Form()\n self.native.MinimizeBox = False\n self.native.FormBorderStyle = self.native.FormBorderStyle.FixedSingle\n self.native.MaximizeBox = False\n self.native.FormClosing += WeakrefCallable(self.winforms_FormClosing)\n self.native.Width = 540\n self.native.Height = 320\n self.native.Text = title\n\n # The top-of-page introductory message\n textLabel = WinForms.Label()\n textLabel.Left = 10\n textLabel.Top = 10\n textLabel.Width = 520\n textLabel.Alignment = ContentAlignment.MiddleCenter\n textLabel.Text = message\n\n self.native.Controls.Add(textLabel)\n\n # A scrolling text box for the stack trace.\n trace = WinForms.RichTextBox()\n trace.Left = 10\n trace.Top = 30\n trace.Width = 504\n trace.Height = 210\n trace.Multiline = True\n trace.ReadOnly = True\n trace.Font = WinFont(\n FontFamily.GenericMonospace,\n float(SystemFonts.DefaultFont.Size),\n FontStyle.Regular,\n )\n trace.Text = content\n\n self.native.Controls.Add(trace)\n\n # Add acceptance/close buttons\n if retry:\n retry = WinForms.Button()\n retry.Left = 290\n retry.Top = 250\n retry.Width = 100\n retry.Text = \"&Retry\"\n retry.Click += WeakrefCallable(self.winforms_Click_retry)\n\n self.native.Controls.Add(retry)\n\n quit = WinForms.Button()\n quit.Left = 400\n quit.Top = 250\n quit.Width = 100\n quit.Text = \"&Quit\"\n quit.Click += WeakrefCallable(self.winforms_Click_quit)\n\n self.native.Controls.Add(quit)\n else:\n accept = WinForms.Button()\n accept.Left = 400\n accept.Top = 250\n accept.Width = 100\n accept.Text = \"&OK\"\n accept.Click += WeakrefCallable(self.winforms_Click_accept)\n\n self.native.Controls.Add(accept)\n\n self.start_inner_loop(self.native.ShowDialog)\n\n def winforms_FormClosing(self, sender, event):\n # If the close button is pressed, there won't be a future yet.\n # We cancel this event to prevent the dialog from closing.\n # If a button is pressed, the future will be set, and a close\n # event will be triggered.\n try:\n self.interface.future.result()\n except asyncio.InvalidStateError: # pragma: no cover\n event.Cancel = True\n\n def set_result(self, result):\n super().set_result(result)\n self.native.Close()\n\n def winforms_Click_quit(self, sender, event):\n self.set_result(False)\n\n def winforms_Click_retry(self, sender, event):\n self.set_result(True)\n\n def winforms_Click_accept(self, sender, event):\n self.set_result(None)\n\n\nclass FileDialog(BaseDialog):\n def __init__(\n self,\n native,\n interface,\n title,\n initial_directory,\n *,\n filename=None,\n file_types=None,\n ):\n super().__init__(interface)\n self.native = native\n\n self._set_title(title)\n if filename is not None:\n native.FileName = filename\n\n if initial_directory is not None:\n self._set_initial_directory(str(initial_directory))\n\n if file_types is not None:\n filters = [f\"{ext} files (*.{ext})|*.{ext}\" for ext in file_types] + [\n \"All files (*.*)|*.*\"\n ]\n\n if len(file_types) > 1:\n pattern = \";\".join([f\"*.{ext}\" for ext in file_types])\n filters.insert(0, f\"All matching files ({pattern})|{pattern}\")\n\n native.Filter = \"|\".join(filters)\n\n def show():\n response = native.ShowDialog()\n if response == DialogResult.OK:\n self.set_result(self._get_filenames())\n else:\n self.set_result(None)\n\n self.start_inner_loop(show)\n\n def _set_title(self, title):\n self.native.Title = title\n\n def _set_initial_directory(self, initial_directory):\n self.native.InitialDirectory = initial_directory\n\n\nclass SaveFileDialog(FileDialog):\n def __init__(self, interface, title, filename, initial_directory, file_types):\n super().__init__(\n WinForms.SaveFileDialog(),\n interface,\n title,\n initial_directory,\n filename=filename,\n file_types=file_types,\n )\n\n def _get_filenames(self):\n return Path(self.native.FileName)\n\n\nclass OpenFileDialog(FileDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n file_types,\n multiple_select,\n ):\n super().__init__(\n WinForms.OpenFileDialog(),\n interface,\n title,\n initial_directory,\n file_types=file_types,\n )\n if multiple_select:\n self.native.Multiselect = True\n\n def _get_filenames(self):\n if self.native.Multiselect:\n return [Path(filename) for filename in self.native.FileNames]\n else:\n return Path(self.native.FileName)\n\n\nclass SelectFolderDialog(FileDialog):\n def __init__(self, interface, title, initial_directory, multiple_select):\n super().__init__(\n WinForms.FolderBrowserDialog(),\n interface,\n title,\n initial_directory,\n )\n\n # The native dialog doesn't support multiple selection, so the only effect\n # this has is to change whether we return a list.\n self.multiple_select = multiple_select\n\n def _get_filenames(self):\n filename = Path(self.native.SelectedPath)\n return [filename] if self.multiple_select else filename\n\n def _set_title(self, title):\n self.native.Description = title\n\n def _set_initial_directory(self, initial_directory):\n self.native.SelectedPath = initial_directory\n", "path": "winforms/src/toga_winforms/dialogs.py"}], "after_files": [{"content": "import asyncio\nfrom abc import ABC\nfrom pathlib import Path\n\nimport System.Windows.Forms as WinForms\nfrom System.Drawing import (\n ContentAlignment,\n Font as WinFont,\n FontFamily,\n FontStyle,\n SystemFonts,\n)\nfrom System.Windows.Forms import DialogResult, MessageBoxButtons, MessageBoxIcon\n\nfrom .libs.wrapper import WeakrefCallable\n\n\nclass BaseDialog(ABC):\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n # See libs/proactor.py\n def start_inner_loop(self, callback, *args):\n asyncio.get_event_loop().start_inner_loop(callback, *args)\n\n def set_result(self, result):\n self.interface.set_result(result)\n\n\nclass MessageDialog(BaseDialog):\n def __init__(\n self,\n interface,\n title,\n message,\n buttons,\n icon,\n success_result=None,\n ):\n super().__init__(interface)\n\n def show():\n return_value = WinForms.MessageBox.Show(message, title, buttons, icon)\n if success_result:\n self.set_result(return_value == success_result)\n else:\n self.set_result(None)\n\n self.start_inner_loop(show)\n\n\nclass InfoDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface,\n title,\n message,\n MessageBoxButtons.OK,\n MessageBoxIcon.Information,\n )\n\n\nclass QuestionDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface,\n title,\n message,\n MessageBoxButtons.YesNo,\n MessageBoxIcon.Information,\n success_result=DialogResult.Yes,\n )\n\n\nclass ConfirmDialog(MessageDialog):\n def __init__(self, interface, title, message):\n super().__init__(\n interface,\n title,\n message,\n MessageBoxButtons.OKCancel,\n MessageBoxIcon.Warning,\n success_result=DialogResult.OK,\n )\n\n\nclass ErrorDialog(MessageDialog):\n def __init__(self, interface, title, message=None):\n super().__init__(\n interface,\n title,\n message,\n WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error,\n )\n\n\nclass StackTraceDialog(BaseDialog):\n def __init__(self, interface, title, message, content, retry):\n super().__init__(interface)\n\n self.native = WinForms.Form()\n self.native.MinimizeBox = False\n self.native.FormBorderStyle = self.native.FormBorderStyle.FixedSingle\n self.native.MaximizeBox = False\n self.native.FormClosing += WeakrefCallable(self.winforms_FormClosing)\n self.native.Width = 540\n self.native.Height = 320\n self.native.Text = title\n\n # The top-of-page introductory message\n textLabel = WinForms.Label()\n textLabel.Left = 10\n textLabel.Top = 10\n textLabel.Width = 520\n textLabel.Alignment = ContentAlignment.MiddleCenter\n textLabel.Text = message\n\n self.native.Controls.Add(textLabel)\n\n # A scrolling text box for the stack trace.\n trace = WinForms.RichTextBox()\n trace.Left = 10\n trace.Top = 30\n trace.Width = 504\n trace.Height = 210\n trace.Multiline = True\n trace.ReadOnly = True\n trace.Font = WinFont(\n FontFamily.GenericMonospace,\n float(SystemFonts.DefaultFont.Size),\n FontStyle.Regular,\n )\n trace.Text = content\n\n self.native.Controls.Add(trace)\n\n # Add acceptance/close buttons\n if retry:\n retry = WinForms.Button()\n retry.Left = 290\n retry.Top = 250\n retry.Width = 100\n retry.Text = \"&Retry\"\n retry.Click += WeakrefCallable(self.winforms_Click_retry)\n\n self.native.Controls.Add(retry)\n\n quit = WinForms.Button()\n quit.Left = 400\n quit.Top = 250\n quit.Width = 100\n quit.Text = \"&Quit\"\n quit.Click += WeakrefCallable(self.winforms_Click_quit)\n\n self.native.Controls.Add(quit)\n else:\n accept = WinForms.Button()\n accept.Left = 400\n accept.Top = 250\n accept.Width = 100\n accept.Text = \"&OK\"\n accept.Click += WeakrefCallable(self.winforms_Click_accept)\n\n self.native.Controls.Add(accept)\n\n def show():\n self.native.ShowDialog()\n\n self.start_inner_loop(show)\n\n def winforms_FormClosing(self, sender, event):\n # If the close button is pressed, there won't be a future yet.\n # We cancel this event to prevent the dialog from closing.\n # If a button is pressed, the future will be set, and a close\n # event will be triggered.\n try:\n self.interface.future.result()\n except asyncio.InvalidStateError: # pragma: no cover\n event.Cancel = True\n\n def set_result(self, result):\n super().set_result(result)\n self.native.Close()\n\n def winforms_Click_quit(self, sender, event):\n self.set_result(False)\n\n def winforms_Click_retry(self, sender, event):\n self.set_result(True)\n\n def winforms_Click_accept(self, sender, event):\n self.set_result(None)\n\n\nclass FileDialog(BaseDialog):\n def __init__(\n self,\n native,\n interface,\n title,\n initial_directory,\n *,\n filename=None,\n file_types=None,\n ):\n super().__init__(interface)\n self.native = native\n\n self._set_title(title)\n if filename is not None:\n native.FileName = filename\n\n if initial_directory is not None:\n self._set_initial_directory(str(initial_directory))\n\n if file_types is not None:\n filters = [f\"{ext} files (*.{ext})|*.{ext}\" for ext in file_types] + [\n \"All files (*.*)|*.*\"\n ]\n\n if len(file_types) > 1:\n pattern = \";\".join([f\"*.{ext}\" for ext in file_types])\n filters.insert(0, f\"All matching files ({pattern})|{pattern}\")\n\n native.Filter = \"|\".join(filters)\n\n def show():\n response = native.ShowDialog()\n if response == DialogResult.OK:\n self.set_result(self._get_filenames())\n else:\n self.set_result(None)\n\n self.start_inner_loop(show)\n\n def _set_title(self, title):\n self.native.Title = title\n\n def _set_initial_directory(self, initial_directory):\n self.native.InitialDirectory = initial_directory\n\n\nclass SaveFileDialog(FileDialog):\n def __init__(self, interface, title, filename, initial_directory, file_types):\n super().__init__(\n WinForms.SaveFileDialog(),\n interface,\n title,\n initial_directory,\n filename=filename,\n file_types=file_types,\n )\n\n def _get_filenames(self):\n return Path(self.native.FileName)\n\n\nclass OpenFileDialog(FileDialog):\n def __init__(\n self,\n interface,\n title,\n initial_directory,\n file_types,\n multiple_select,\n ):\n super().__init__(\n WinForms.OpenFileDialog(),\n interface,\n title,\n initial_directory,\n file_types=file_types,\n )\n if multiple_select:\n self.native.Multiselect = True\n\n def _get_filenames(self):\n if self.native.Multiselect:\n return [Path(filename) for filename in self.native.FileNames]\n else:\n return Path(self.native.FileName)\n\n\nclass SelectFolderDialog(FileDialog):\n def __init__(self, interface, title, initial_directory, multiple_select):\n super().__init__(\n WinForms.FolderBrowserDialog(),\n interface,\n title,\n initial_directory,\n )\n\n # The native dialog doesn't support multiple selection, so the only effect\n # this has is to change whether we return a list.\n self.multiple_select = multiple_select\n\n def _get_filenames(self):\n filename = Path(self.native.SelectedPath)\n return [filename] if self.multiple_select else filename\n\n def _set_title(self, title):\n self.native.Description = title\n\n def _set_initial_directory(self, initial_directory):\n self.native.SelectedPath = initial_directory\n", "path": "winforms/src/toga_winforms/dialogs.py"}]} | 3,119 | 137 |
gh_patches_debug_57166 | rasdani/github-patches | git_diff | unionai-oss__pandera-69 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
update readme on release of next version
suggest replacing
```**Supports:** python 2.7, 3.5, 3.6```
with:
```[](https://pypi.python.org/pypi/pandera/)```
when the new metadata is available on pypi.
It will be 'missing' before pypi is updated:
[](https://pypi.python.org/pypi/pandera/)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup
2
3 with open('README.md') as f:
4 long_description = f.read()
5
6 setup(
7 name="pandera",
8 version="0.1.3",
9 author="Niels Bantilan",
10 author_email="[email protected]",
11 description='A light-weight and flexible validation package for pandas data structures.',
12 long_description=long_description,
13 long_description_content_type='text/markdown',
14 url="https://github.com/cosmicBboy/pandera",
15 project_urls={
16 'Documentation': 'https://pandera.readthedocs.io',
17 'Issue Tracker': 'https://github.com/cosmicBboy/pandera/issues'
18 },
19 keywords=["pandas", "validation", "data-structures"],
20 license="MIT",
21 packages=[
22 "pandera",
23 ],
24 install_requires=[
25 "enum34 ; python_version<'3.4'",
26 "numpy >= 1.9.0",
27 "pandas >= 0.23.0",
28 "wrapt",
29 "scipy ; python_version<'2.7'",
30 ],
31 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
32 platforms='any',
33 classifiers=[
34 'Development Status :: 5 - Production/Stable',
35 'Operating System :: OS Independent',
36 'Intended Audience :: Science/Research',
37 'Programming Language :: Python',
38 'Programming Language :: Python :: 3',
39 'Programming Language :: Python :: 3.5',
40 'Programming Language :: Python :: 3.6',
41 'Programming Language :: Python :: 3.7',
42 'Topic :: Scientific/Engineering'
43 ],
44
45 )
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -35,6 +35,7 @@
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
+ 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -35,6 +35,7 @@\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n+ 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n", "issue": "update readme on release of next version\nsuggest replacing\r\n\r\n```**Supports:** python 2.7, 3.5, 3.6```\r\n\r\nwith:\r\n\r\n```[](https://pypi.python.org/pypi/pandera/)```\r\n\r\nwhen the new metadata is available on pypi.\r\n\r\nIt will be 'missing' before pypi is updated:\r\n[](https://pypi.python.org/pypi/pandera/)\n", "before_files": [{"content": "from setuptools import setup\n\nwith open('README.md') as f:\n long_description = f.read()\n\nsetup(\n name=\"pandera\",\n version=\"0.1.3\",\n author=\"Niels Bantilan\",\n author_email=\"[email protected]\",\n description='A light-weight and flexible validation package for pandas data structures.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url=\"https://github.com/cosmicBboy/pandera\",\n project_urls={\n 'Documentation': 'https://pandera.readthedocs.io',\n 'Issue Tracker': 'https://github.com/cosmicBboy/pandera/issues'\n },\n keywords=[\"pandas\", \"validation\", \"data-structures\"],\n license=\"MIT\",\n packages=[\n \"pandera\",\n ],\n install_requires=[\n \"enum34 ; python_version<'3.4'\",\n \"numpy >= 1.9.0\",\n \"pandas >= 0.23.0\",\n \"wrapt\",\n \"scipy ; python_version<'2.7'\",\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering'\n ],\n\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nwith open('README.md') as f:\n long_description = f.read()\n\nsetup(\n name=\"pandera\",\n version=\"0.1.3\",\n author=\"Niels Bantilan\",\n author_email=\"[email protected]\",\n description='A light-weight and flexible validation package for pandas data structures.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url=\"https://github.com/cosmicBboy/pandera\",\n project_urls={\n 'Documentation': 'https://pandera.readthedocs.io',\n 'Issue Tracker': 'https://github.com/cosmicBboy/pandera/issues'\n },\n keywords=[\"pandas\", \"validation\", \"data-structures\"],\n license=\"MIT\",\n packages=[\n \"pandera\",\n ],\n install_requires=[\n \"enum34 ; python_version<'3.4'\",\n \"numpy >= 1.9.0\",\n \"pandas >= 0.23.0\",\n \"wrapt\",\n \"scipy ; python_version<'2.7'\",\n ],\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering'\n ],\n\n)\n", "path": "setup.py"}]} | 844 | 104 |
gh_patches_debug_61695 | rasdani/github-patches | git_diff | mdn__kuma-7256 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Report a content problem" gets wrong title
**Summary**
You end up on https://github.com/mdn/sprints/issues/new?template=issue-template.md&projects=mdn/sprints/2&labels=user-report&title=%2Fen-US
That prefills the title only to `/en-US`.
**Steps To Reproduce (STR)**
1. Go to https://developer.mozilla.org/en-US/docs/Web/JavaScript
2. Click "Report a content problem" in the "Feedback" menu
**Actual behavior**
GitHub issue form title is just `/en-US`
**Expected behavior**
`/en-US/docs/Web/JavaScript`
**Additional context**
Should it be more than just the document URI?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/wiki/templatetags/ssr.py`
Content:
```
1 import json
2 import os
3 from functools import lru_cache
4
5 import requests
6 import requests.exceptions
7 from django.conf import settings
8 from django_jinja import library
9
10
11 @lru_cache()
12 def get_localization_data(locale):
13 """
14 Read the frontend string catalog for the specified locale, parse
15 it as JSON, and return the resulting dict. The returned values
16 are cached so that we don't have to read files all the time.
17 """
18 path = os.path.join(settings.BASE_DIR, "static", "jsi18n", locale, "react.json")
19 with open(path, "r") as f:
20 return json.load(f)
21
22
23 @library.global_function
24 def render_react(component_name, locale, url, document_data, ssr=True):
25 """
26 Render a script tag to define the data and any other HTML tags needed
27 to enable the display of a React-based UI. By default, this does
28 server side rendering, falling back to client-side rendering if
29 the SSR attempt fails. Pass False as the second argument to do
30 client-side rendering unconditionally.
31
32 Note that we are not defining a generic Jinja template tag here.
33 The code in this file is specific to Kuma's React-based UI.
34 """
35 localization_data = get_localization_data(locale)
36
37 data = {
38 "locale": locale,
39 "stringCatalog": localization_data["catalog"],
40 "pluralExpression": localization_data["plural"],
41 "url": url,
42 "documentData": document_data,
43 }
44
45 if ssr:
46 return server_side_render(component_name, data)
47 else:
48 return client_side_render(component_name, data)
49
50
51 def _render(component_name, html, script, needs_serialization=False):
52 """A utility function used by both client side and server side rendering.
53 Returns a string that includes the specified HTML and a serialized
54 form of the state dict, in the format expected by the client-side code
55 in kuma/javascript/src/index.jsx.
56 """
57 if needs_serialization:
58 assert isinstance(script, dict), type(script)
59 script = json.dumps(script).replace("</", "<\\/")
60 else:
61 script = "JSON.parse({})".format(script)
62
63 return (
64 '<div id="react-container" data-component-name="{}">{}</div>\n'
65 "<script>window._react_data = {};</script>\n"
66 ).format(component_name, html, script)
67
68
69 def client_side_render(component_name, data):
70 """
71 Output an empty <div> and a script with complete state so that
72 the UI can be rendered on the client-side.
73 """
74 return _render(component_name, "", data, needs_serialization=True)
75
76
77 def server_side_render(component_name, data):
78 """
79 Pre-render the React UI to HTML and output it in a <div>, and then
80 also pass the necessary serialized state in a <script> so that
81 React on the client side can sync itself with the pre-rendred HTML.
82
83 If any exceptions are thrown during the server-side rendering, we
84 fall back to client-side rendering instead.
85 """
86 url = "{}/{}".format(settings.SSR_URL, component_name)
87 timeout = settings.SSR_TIMEOUT
88 # Try server side rendering
89 try:
90 # POST the document data as JSON to the SSR server and we
91 # should get HTML text (encoded as plain text) in the body
92 # of the response
93 response = requests.post(
94 url,
95 headers={"Content-Type": "application/json"},
96 data=json.dumps(data).encode("utf8"),
97 timeout=timeout,
98 )
99
100 # Even though we've got fully rendered HTML now, we still need to
101 # send the document data along with it so that React can sync its
102 # state on the client side with what is in the HTML. When rendering
103 # a document page, the data includes long strings of HTML that
104 # we can get away without duplicating. So as an optimization when
105 # component_name is "document", we're going to make a copy of the
106 # data (because the original belongs to our caller) and delete those
107 # strings from the copy.
108 #
109 # WARNING: This optimization can save 20kb in data transfer
110 # for typical pages, but it requires us to be very careful on
111 # the frontend. If any components render conditionally based on
112 # the state of bodyHTML, tocHTML or quickLinkHTML, then they will
113 # render differently on the client than during SSR, and the hydrate
114 # will not just work cleanly, and those components will re-render
115 # with empty strings. This has already caused Bug 1558308, and
116 # I've commented it out because the benefit in file size doesn't
117 # seem worth the risk of client-side bugs.
118 #
119 # As an alternative, it ought to be possible to extract the HTML
120 # strings from the SSR'ed document and rebuild the document object
121 # on the client right before we call hydrate(). So if you uncomment
122 # the lines below, you should also edit kuma/javascript/src/index.jsx
123 # to extract the HTML from the document as well.
124 #
125 # if component_name == 'document':
126 # data = data.copy()
127 # data['documentData'] = data['documentData'].copy()
128 # data['documentData'].update(bodyHTML='',
129 # tocHTML='',
130 # quickLinksHTML='')
131 response.raise_for_status()
132 result = response.json()
133 return _render(component_name, result["html"], result["script"])
134
135 except requests.exceptions.RequestException as exception:
136 print(f"{exception.__class__} error contacting SSR server.")
137 print("Falling back to client side rendering.")
138 return client_side_render(component_name, data)
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/wiki/templatetags/ssr.py b/kuma/wiki/templatetags/ssr.py
--- a/kuma/wiki/templatetags/ssr.py
+++ b/kuma/wiki/templatetags/ssr.py
@@ -41,7 +41,6 @@
"url": url,
"documentData": document_data,
}
-
if ssr:
return server_side_render(component_name, data)
else:
| {"golden_diff": "diff --git a/kuma/wiki/templatetags/ssr.py b/kuma/wiki/templatetags/ssr.py\n--- a/kuma/wiki/templatetags/ssr.py\n+++ b/kuma/wiki/templatetags/ssr.py\n@@ -41,7 +41,6 @@\n \"url\": url,\n \"documentData\": document_data,\n }\n-\n if ssr:\n return server_side_render(component_name, data)\n else:\n", "issue": "\"Report a content problem\" gets wrong title\n**Summary**\r\nYou end up on https://github.com/mdn/sprints/issues/new?template=issue-template.md&projects=mdn/sprints/2&labels=user-report&title=%2Fen-US\r\n\r\nThat prefills the title only to `/en-US`.\r\n\r\n\r\n**Steps To Reproduce (STR)**\r\n\r\n1. Go to https://developer.mozilla.org/en-US/docs/Web/JavaScript\r\n2. Click \"Report a content problem\" in the \"Feedback\" menu\r\n\r\n\r\n**Actual behavior**\r\nGitHub issue form title is just `/en-US`\r\n\r\n\r\n**Expected behavior**\r\n`/en-US/docs/Web/JavaScript`\r\n\r\n\r\n**Additional context**\r\nShould it be more than just the document URI?\r\n\n", "before_files": [{"content": "import json\nimport os\nfrom functools import lru_cache\n\nimport requests\nimport requests.exceptions\nfrom django.conf import settings\nfrom django_jinja import library\n\n\n@lru_cache()\ndef get_localization_data(locale):\n \"\"\"\n Read the frontend string catalog for the specified locale, parse\n it as JSON, and return the resulting dict. The returned values\n are cached so that we don't have to read files all the time.\n \"\"\"\n path = os.path.join(settings.BASE_DIR, \"static\", \"jsi18n\", locale, \"react.json\")\n with open(path, \"r\") as f:\n return json.load(f)\n\n\[email protected]_function\ndef render_react(component_name, locale, url, document_data, ssr=True):\n \"\"\"\n Render a script tag to define the data and any other HTML tags needed\n to enable the display of a React-based UI. By default, this does\n server side rendering, falling back to client-side rendering if\n the SSR attempt fails. Pass False as the second argument to do\n client-side rendering unconditionally.\n\n Note that we are not defining a generic Jinja template tag here.\n The code in this file is specific to Kuma's React-based UI.\n \"\"\"\n localization_data = get_localization_data(locale)\n\n data = {\n \"locale\": locale,\n \"stringCatalog\": localization_data[\"catalog\"],\n \"pluralExpression\": localization_data[\"plural\"],\n \"url\": url,\n \"documentData\": document_data,\n }\n\n if ssr:\n return server_side_render(component_name, data)\n else:\n return client_side_render(component_name, data)\n\n\ndef _render(component_name, html, script, needs_serialization=False):\n \"\"\"A utility function used by both client side and server side rendering.\n Returns a string that includes the specified HTML and a serialized\n form of the state dict, in the format expected by the client-side code\n in kuma/javascript/src/index.jsx.\n \"\"\"\n if needs_serialization:\n assert isinstance(script, dict), type(script)\n script = json.dumps(script).replace(\"</\", \"<\\\\/\")\n else:\n script = \"JSON.parse({})\".format(script)\n\n return (\n '<div id=\"react-container\" data-component-name=\"{}\">{}</div>\\n'\n \"<script>window._react_data = {};</script>\\n\"\n ).format(component_name, html, script)\n\n\ndef client_side_render(component_name, data):\n \"\"\"\n Output an empty <div> and a script with complete state so that\n the UI can be rendered on the client-side.\n \"\"\"\n return _render(component_name, \"\", data, needs_serialization=True)\n\n\ndef server_side_render(component_name, data):\n \"\"\"\n Pre-render the React UI to HTML and output it in a <div>, and then\n also pass the necessary serialized state in a <script> so that\n React on the client side can sync itself with the pre-rendred HTML.\n\n If any exceptions are thrown during the server-side rendering, we\n fall back to client-side rendering instead.\n \"\"\"\n url = \"{}/{}\".format(settings.SSR_URL, component_name)\n timeout = settings.SSR_TIMEOUT\n # Try server side rendering\n try:\n # POST the document data as JSON to the SSR server and we\n # should get HTML text (encoded as plain text) in the body\n # of the response\n response = requests.post(\n url,\n headers={\"Content-Type\": \"application/json\"},\n data=json.dumps(data).encode(\"utf8\"),\n timeout=timeout,\n )\n\n # Even though we've got fully rendered HTML now, we still need to\n # send the document data along with it so that React can sync its\n # state on the client side with what is in the HTML. When rendering\n # a document page, the data includes long strings of HTML that\n # we can get away without duplicating. So as an optimization when\n # component_name is \"document\", we're going to make a copy of the\n # data (because the original belongs to our caller) and delete those\n # strings from the copy.\n #\n # WARNING: This optimization can save 20kb in data transfer\n # for typical pages, but it requires us to be very careful on\n # the frontend. If any components render conditionally based on\n # the state of bodyHTML, tocHTML or quickLinkHTML, then they will\n # render differently on the client than during SSR, and the hydrate\n # will not just work cleanly, and those components will re-render\n # with empty strings. This has already caused Bug 1558308, and\n # I've commented it out because the benefit in file size doesn't\n # seem worth the risk of client-side bugs.\n #\n # As an alternative, it ought to be possible to extract the HTML\n # strings from the SSR'ed document and rebuild the document object\n # on the client right before we call hydrate(). So if you uncomment\n # the lines below, you should also edit kuma/javascript/src/index.jsx\n # to extract the HTML from the document as well.\n #\n # if component_name == 'document':\n # data = data.copy()\n # data['documentData'] = data['documentData'].copy()\n # data['documentData'].update(bodyHTML='',\n # tocHTML='',\n # quickLinksHTML='')\n response.raise_for_status()\n result = response.json()\n return _render(component_name, result[\"html\"], result[\"script\"])\n\n except requests.exceptions.RequestException as exception:\n print(f\"{exception.__class__} error contacting SSR server.\")\n print(\"Falling back to client side rendering.\")\n return client_side_render(component_name, data)\n", "path": "kuma/wiki/templatetags/ssr.py"}], "after_files": [{"content": "import json\nimport os\nfrom functools import lru_cache\n\nimport requests\nimport requests.exceptions\nfrom django.conf import settings\nfrom django_jinja import library\n\n\n@lru_cache()\ndef get_localization_data(locale):\n \"\"\"\n Read the frontend string catalog for the specified locale, parse\n it as JSON, and return the resulting dict. The returned values\n are cached so that we don't have to read files all the time.\n \"\"\"\n path = os.path.join(settings.BASE_DIR, \"static\", \"jsi18n\", locale, \"react.json\")\n with open(path, \"r\") as f:\n return json.load(f)\n\n\[email protected]_function\ndef render_react(component_name, locale, url, document_data, ssr=True):\n \"\"\"\n Render a script tag to define the data and any other HTML tags needed\n to enable the display of a React-based UI. By default, this does\n server side rendering, falling back to client-side rendering if\n the SSR attempt fails. Pass False as the second argument to do\n client-side rendering unconditionally.\n\n Note that we are not defining a generic Jinja template tag here.\n The code in this file is specific to Kuma's React-based UI.\n \"\"\"\n localization_data = get_localization_data(locale)\n\n data = {\n \"locale\": locale,\n \"stringCatalog\": localization_data[\"catalog\"],\n \"pluralExpression\": localization_data[\"plural\"],\n \"url\": url,\n \"documentData\": document_data,\n }\n if ssr:\n return server_side_render(component_name, data)\n else:\n return client_side_render(component_name, data)\n\n\ndef _render(component_name, html, script, needs_serialization=False):\n \"\"\"A utility function used by both client side and server side rendering.\n Returns a string that includes the specified HTML and a serialized\n form of the state dict, in the format expected by the client-side code\n in kuma/javascript/src/index.jsx.\n \"\"\"\n if needs_serialization:\n assert isinstance(script, dict), type(script)\n script = json.dumps(script).replace(\"</\", \"<\\\\/\")\n else:\n script = \"JSON.parse({})\".format(script)\n\n return (\n '<div id=\"react-container\" data-component-name=\"{}\">{}</div>\\n'\n \"<script>window._react_data = {};</script>\\n\"\n ).format(component_name, html, script)\n\n\ndef client_side_render(component_name, data):\n \"\"\"\n Output an empty <div> and a script with complete state so that\n the UI can be rendered on the client-side.\n \"\"\"\n return _render(component_name, \"\", data, needs_serialization=True)\n\n\ndef server_side_render(component_name, data):\n \"\"\"\n Pre-render the React UI to HTML and output it in a <div>, and then\n also pass the necessary serialized state in a <script> so that\n React on the client side can sync itself with the pre-rendred HTML.\n\n If any exceptions are thrown during the server-side rendering, we\n fall back to client-side rendering instead.\n \"\"\"\n url = \"{}/{}\".format(settings.SSR_URL, component_name)\n timeout = settings.SSR_TIMEOUT\n # Try server side rendering\n try:\n # POST the document data as JSON to the SSR server and we\n # should get HTML text (encoded as plain text) in the body\n # of the response\n response = requests.post(\n url,\n headers={\"Content-Type\": \"application/json\"},\n data=json.dumps(data).encode(\"utf8\"),\n timeout=timeout,\n )\n\n # Even though we've got fully rendered HTML now, we still need to\n # send the document data along with it so that React can sync its\n # state on the client side with what is in the HTML. When rendering\n # a document page, the data includes long strings of HTML that\n # we can get away without duplicating. So as an optimization when\n # component_name is \"document\", we're going to make a copy of the\n # data (because the original belongs to our caller) and delete those\n # strings from the copy.\n #\n # WARNING: This optimization can save 20kb in data transfer\n # for typical pages, but it requires us to be very careful on\n # the frontend. If any components render conditionally based on\n # the state of bodyHTML, tocHTML or quickLinkHTML, then they will\n # render differently on the client than during SSR, and the hydrate\n # will not just work cleanly, and those components will re-render\n # with empty strings. This has already caused Bug 1558308, and\n # I've commented it out because the benefit in file size doesn't\n # seem worth the risk of client-side bugs.\n #\n # As an alternative, it ought to be possible to extract the HTML\n # strings from the SSR'ed document and rebuild the document object\n # on the client right before we call hydrate(). So if you uncomment\n # the lines below, you should also edit kuma/javascript/src/index.jsx\n # to extract the HTML from the document as well.\n #\n # if component_name == 'document':\n # data = data.copy()\n # data['documentData'] = data['documentData'].copy()\n # data['documentData'].update(bodyHTML='',\n # tocHTML='',\n # quickLinksHTML='')\n response.raise_for_status()\n result = response.json()\n return _render(component_name, result[\"html\"], result[\"script\"])\n\n except requests.exceptions.RequestException as exception:\n print(f\"{exception.__class__} error contacting SSR server.\")\n print(\"Falling back to client side rendering.\")\n return client_side_render(component_name, data)\n", "path": "kuma/wiki/templatetags/ssr.py"}]} | 1,979 | 103 |
gh_patches_debug_38441 | rasdani/github-patches | git_diff | vispy__vispy-1824 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
meshio for i/o
Main author of [meshio](https://github.com/nschloe/meshio) here.
meshio can read a large number of formats which are applicable for vispy, I think it would be a good match for meshio.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/io/mesh.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) Vispy Development Team. All Rights Reserved.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 """ Reading and writing of data like images and meshes.
6 """
7
8 from os import path as op
9
10 from .wavefront import WavefrontReader, WavefrontWriter
11 from .stl import load_stl
12
13
14 def read_mesh(fname):
15 """Read mesh data from file.
16
17 Parameters
18 ----------
19 fname : str
20 File name to read. Format will be inferred from the filename.
21 Currently only '.obj' and '.obj.gz' are supported.
22
23 Returns
24 -------
25 vertices : array
26 Vertices.
27 faces : array | None
28 Triangle face definitions.
29 normals : array
30 Normals for the mesh.
31 texcoords : array | None
32 Texture coordinates.
33 """
34 # Check format
35 fmt = op.splitext(fname)[1].lower()
36 if fmt == '.gz':
37 fmt = op.splitext(op.splitext(fname)[0])[1].lower()
38
39 if fmt in ('.obj'):
40 return WavefrontReader.read(fname)
41 elif fmt in ('.stl'):
42 file_obj = open(fname, mode='rb')
43 mesh = load_stl(file_obj)
44 vertices = mesh['vertices']
45 faces = mesh['faces']
46 normals = mesh['face_normals']
47 texcoords = None
48 return vertices, faces, normals, texcoords
49 elif not format:
50 raise ValueError('read_mesh needs could not determine format.')
51 else:
52 raise ValueError('read_mesh does not understand format %s.' % fmt)
53
54
55 def write_mesh(fname, vertices, faces, normals, texcoords, name='',
56 format='obj', overwrite=False, reshape_faces=True):
57 """ Write mesh data to file.
58
59 Parameters
60 ----------
61 fname : str
62 Filename to write. Must end with ".obj" or ".gz".
63 vertices : array
64 Vertices.
65 faces : array | None
66 Triangle face definitions.
67 normals : array
68 Normals for the mesh.
69 texcoords : array | None
70 Texture coordinates.
71 name : str
72 Name of the object.
73 format : str
74 Currently only "obj" is supported.
75 overwrite : bool
76 If the file exists, overwrite it.
77 reshape_faces : bool
78 Reshape the `faces` array to (Nf, 3). Set to `False`
79 if you need to write a mesh with non triangular faces.
80 """
81 # Check file
82 if op.isfile(fname) and not overwrite:
83 raise IOError('file "%s" exists, use overwrite=True' % fname)
84
85 # Check format
86 if format not in ('obj'):
87 raise ValueError('Only "obj" format writing currently supported')
88 WavefrontWriter.write(fname, vertices, faces,
89 normals, texcoords, name, reshape_faces)
90
```
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) Vispy Development Team. All Rights Reserved.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4 """Vispy setup script.
5
6 Steps to do a new release:
7
8 Preparations:
9 * Test on Windows, Linux, Mac
10 * Make release notes
11 * Update API documentation and other docs that need updating.
12
13 Define the version and release:
14 * tag the tip changeset as version x.x.x; `git tag -a 'vX.Y.Z' -m "Version X.Y.Z"`
15 * push tag to github
16 * verify that azure pipelines complete
17 * verify that `.tar.gz` sdist and binary wheels are available on PyPI
18
19 Announcing:
20 * It can be worth waiting a day for eager users to report critical bugs
21 * Announce in scipy-user, vispy mailing list, twitter (@vispyproject)
22
23 """
24
25 import os
26 import sys
27 import platform
28 from os import path as op
29 from distutils import log
30 from setuptools import setup, find_packages, Command, Extension
31 from setuptools.command.sdist import sdist
32 from setuptools.command.build_py import build_py
33 from setuptools.command.egg_info import egg_info
34 from subprocess import check_call
35
36 import numpy as np
37 from Cython.Build import cythonize
38
39 log.set_verbosity(log.DEBUG)
40 log.info('setup.py entered')
41 log.info('$PATH=%s' % os.environ['PATH'])
42
43 name = 'vispy'
44 description = 'Interactive visualization in Python'
45
46 # Special commands for building jupyter notebook extension
47 here = os.path.dirname(os.path.abspath(__file__))
48 node_root = os.path.join(here, 'js')
49 is_repo = os.path.exists(os.path.join(here, '.git'))
50
51 npm_path = os.pathsep.join([
52 os.path.join(node_root, 'node_modules', '.bin'),
53 os.environ.get('PATH', os.defpath),
54 ])
55
56
57 def set_builtin(name, value):
58 if isinstance(__builtins__, dict):
59 __builtins__[name] = value
60 else:
61 setattr(__builtins__, name, value)
62
63
64 def js_prerelease(command, strict=False):
65 """decorator for building minified js/css prior to another command"""
66 class DecoratedCommand(command):
67 def run(self):
68 jsdeps = self.distribution.get_command_obj('jsdeps')
69 if not is_repo and all(os.path.exists(t) for t in jsdeps.targets):
70 # sdist, nothing to do
71 command.run(self)
72 return
73
74 try:
75 self.distribution.run_command('jsdeps')
76 except Exception as e:
77 missing = [t for t in jsdeps.targets if not os.path.exists(t)]
78 if strict or missing:
79 log.warn('rebuilding js and css failed')
80 if missing:
81 log.error('missing files: %s' % missing)
82 # HACK: Allow users who can't build the JS to still install vispy
83 if not is_repo:
84 raise e
85 log.warn('WARNING: continuing installation WITHOUT nbextension javascript')
86 # remove JS files from data_files so setuptools doesn't try to copy
87 # non-existent files
88 self.distribution.data_files = [x for x in self.distribution.data_files
89 if 'jupyter' not in x[0]]
90 else:
91 log.warn('rebuilding js and css failed (not a problem)')
92 log.warn(str(e))
93 command.run(self)
94 update_package_data(self.distribution)
95 return DecoratedCommand
96
97
98 def update_package_data(distribution):
99 """update package_data to catch changes during setup"""
100 build_py = distribution.get_command_obj('build_py')
101 # distribution.package_data = find_package_data()
102 # re-init build_py options which load package_data
103 build_py.finalize_options()
104
105
106 class NPM(Command):
107 description = 'install package.json dependencies using npm'
108
109 user_options = []
110
111 node_modules = os.path.join(node_root, 'node_modules')
112
113 targets = [
114 os.path.join(here, 'vispy', 'static', 'extension.js'),
115 os.path.join(here, 'vispy', 'static', 'index.js')
116 ]
117
118 def initialize_options(self):
119 pass
120
121 def finalize_options(self):
122 pass
123
124 def get_npm_name(self):
125 npmName = 'npm';
126 if platform.system() == 'Windows':
127 npmName = 'npm.cmd';
128
129 return npmName;
130
131 def has_npm(self):
132 npmName = self.get_npm_name();
133 try:
134 check_call([npmName, '--version'])
135 return True
136 except:
137 return False
138
139 def should_run_npm_install(self):
140 package_json = os.path.join(node_root, 'package.json')
141 node_modules_exists = os.path.exists(self.node_modules)
142 return self.has_npm()
143
144 def run(self):
145 has_npm = self.has_npm()
146 if not has_npm:
147 log.error("`npm` unavailable. If you're running this command "
148 "using sudo, make sure `npm` is available to sudo")
149
150 env = os.environ.copy()
151 env['PATH'] = npm_path
152
153 if self.should_run_npm_install():
154 log.info("Installing build dependencies with npm. This may take "
155 "a while...")
156 npmName = self.get_npm_name();
157 check_call([npmName, 'install', '--verbose'], cwd=node_root,
158 stdout=sys.stdout, stderr=sys.stderr)
159 os.utime(self.node_modules, None)
160
161 for t in self.targets:
162 if not os.path.exists(t):
163 msg = 'Missing file: %s' % t
164 if not has_npm:
165 msg += '\nnpm is required to build a development ' \
166 'version of a widget extension'
167 raise ValueError(msg)
168
169 # update package data in case this created new files
170 update_package_data(self.distribution)
171
172
173 extensions = [Extension('vispy.visuals.text._sdf_cpu',
174 [op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')],
175 include_dirs=[np.get_include()]),
176 ]
177
178 readme = open('README.rst', 'r').read()
179 setup(
180 name=name,
181 use_scm_version={'write_to': 'vispy/version.py'},
182 author='Vispy contributors',
183 author_email='[email protected]',
184 license='(new) BSD',
185 url='http://vispy.org',
186 download_url='https://pypi.python.org/pypi/vispy',
187 keywords=[
188 'visualization',
189 'OpenGl',
190 'ES',
191 'medical',
192 'imaging',
193 '3D',
194 'plotting',
195 'numpy',
196 'bigdata',
197 'ipython',
198 'jupyter',
199 'widgets',
200 ],
201 description=description,
202 long_description=readme,
203 platforms='any',
204 provides=['vispy'],
205 cmdclass={
206 'build_py': js_prerelease(build_py),
207 'egg_info': js_prerelease(egg_info),
208 'sdist': js_prerelease(sdist, strict=True),
209 'jsdeps': NPM,
210 },
211 python_requires='>=3.6',
212 install_requires=['numpy', 'freetype-py'],
213 setup_requires=['numpy', 'cython', 'setuptools_scm', 'setuptools_scm_git_archive'],
214 extras_require={
215 'ipython-static': ['ipython'],
216 'ipython-vnc': ['ipython>=7'],
217 'ipython-webgl': ['ipywidgets>=7.0', 'ipython>=7', 'tornado'],
218 'pyglet': ['pyglet>=1.2'],
219 'pyqt5': ['pyqt5'],
220 'pyside': ['PySide'],
221 'pyside2': ['PySide2'],
222 'sdl2': ['PySDL2'],
223 'wx': ['wxPython'],
224 'doc': ['sphinx_bootstrap_theme', 'numpydoc'],
225 },
226 packages=find_packages(exclude=['make']),
227 ext_modules=cythonize(extensions),
228 package_dir={'vispy': 'vispy'},
229 data_files=[
230 ('share/jupyter/nbextensions/vispy', [
231 'vispy/static/extension.js',
232 'vispy/static/index.js',
233 'vispy/static/index.js.map',
234 ]),
235 ('etc/jupyter/nbconfig/notebook.d', ['vispy.json']),
236 ],
237 include_package_data=True,
238 package_data={
239 'vispy': [op.join('io', '_data', '*'),
240 op.join('html', 'static', 'js', '*'),
241 op.join('app', 'tests', 'qt-designer.ui'),
242 op.join('util', 'fonts', 'data', '*.ttf'),
243 ],
244
245 'vispy.glsl': ['*.vert','*.frag', "*.glsl"],
246 'vispy.glsl.antialias': ['*.vert','*.frag', "*.glsl"],
247 'vispy.glsl.arrowheads': ['*.vert','*.frag', "*.glsl"],
248 'vispy.glsl.arrows': ['*.vert','*.frag', "*.glsl"],
249 'vispy.glsl.collections': ['*.vert','*.frag', "*.glsl"],
250 'vispy.glsl.colormaps': ['*.vert','*.frag', "*.glsl"],
251 'vispy.glsl.lines': ['*.vert','*.frag', "*.glsl"],
252 'vispy.glsl.markers': ['*.vert','*.frag', "*.glsl"],
253 'vispy.glsl.math': ['*.vert','*.frag', "*.glsl"],
254 'vispy.glsl.misc': ['*.vert','*.frag', "*.glsl"],
255 'vispy.glsl.transforms': ['*.vert','*.frag', "*.glsl"],
256
257 },
258 zip_safe=False,
259 classifiers=[
260 'Development Status :: 3 - Alpha',
261 'Intended Audience :: Science/Research',
262 'Intended Audience :: Education',
263 'Intended Audience :: Developers',
264 'Topic :: Scientific/Engineering :: Visualization',
265 'License :: OSI Approved :: BSD License',
266 'Operating System :: MacOS :: MacOS X',
267 'Operating System :: Microsoft :: Windows',
268 'Operating System :: POSIX',
269 'Programming Language :: Python',
270 'Programming Language :: Python :: 3.6',
271 'Programming Language :: Python :: 3.7',
272 'Programming Language :: Python :: 3.8',
273 'Framework :: IPython'
274 ],
275 )
276
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -222,6 +222,7 @@
'sdl2': ['PySDL2'],
'wx': ['wxPython'],
'doc': ['sphinx_bootstrap_theme', 'numpydoc'],
+ 'io': ['meshio'],
},
packages=find_packages(exclude=['make']),
ext_modules=cythonize(extensions),
diff --git a/vispy/io/mesh.py b/vispy/io/mesh.py
--- a/vispy/io/mesh.py
+++ b/vispy/io/mesh.py
@@ -5,6 +5,7 @@
""" Reading and writing of data like images and meshes.
"""
+import os
from os import path as op
from .wavefront import WavefrontReader, WavefrontWriter
@@ -46,14 +47,26 @@
normals = mesh['face_normals']
texcoords = None
return vertices, faces, normals, texcoords
- elif not format:
- raise ValueError('read_mesh needs could not determine format.')
else:
- raise ValueError('read_mesh does not understand format %s.' % fmt)
+ try:
+ import meshio
+ except ImportError:
+ raise ValueError('read_mesh does not understand format %s.' % fmt)
+
+ try:
+ mesh = meshio.read(fname)
+ except meshio.ReadError:
+ raise ValueError('read_mesh does not understand format %s.' % fmt)
+
+ triangles = mesh.get_cells_type("triangle")
+ if len(triangles) == 0:
+ raise ValueError('mesh file does not contain triangles.')
+
+ return mesh.points, triangles, None, None
def write_mesh(fname, vertices, faces, normals, texcoords, name='',
- format='obj', overwrite=False, reshape_faces=True):
+ format=None, overwrite=False, reshape_faces=True):
""" Write mesh data to file.
Parameters
@@ -82,8 +95,29 @@
if op.isfile(fname) and not overwrite:
raise IOError('file "%s" exists, use overwrite=True' % fname)
+ if format is None:
+ format = os.path.splitext(fname)[1][1:]
+
# Check format
- if format not in ('obj'):
- raise ValueError('Only "obj" format writing currently supported')
- WavefrontWriter.write(fname, vertices, faces,
- normals, texcoords, name, reshape_faces)
+ if format == 'obj':
+ WavefrontWriter.write(fname, vertices, faces,
+ normals, texcoords, name, reshape_faces)
+ return
+
+ try:
+ import meshio
+ except ImportError:
+ raise ValueError('write_mesh does not understand format %s.' % format)
+
+ cell_data = {}
+ if normals is not None:
+ cell_data["normals"] = [normals]
+ if texcoords is not None:
+ cell_data["texcoords"] = [texcoords]
+
+ mesh = meshio.Mesh(vertices, [("triangle", faces)], cell_data=cell_data)
+
+ try:
+ mesh.write(fname, file_format=format)
+ except meshio.WriteError:
+ raise ValueError('write_mesh does not understand format %s.' % format)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -222,6 +222,7 @@\n 'sdl2': ['PySDL2'],\n 'wx': ['wxPython'],\n 'doc': ['sphinx_bootstrap_theme', 'numpydoc'],\n+ 'io': ['meshio'],\n },\n packages=find_packages(exclude=['make']),\n ext_modules=cythonize(extensions),\ndiff --git a/vispy/io/mesh.py b/vispy/io/mesh.py\n--- a/vispy/io/mesh.py\n+++ b/vispy/io/mesh.py\n@@ -5,6 +5,7 @@\n \"\"\" Reading and writing of data like images and meshes.\n \"\"\"\n \n+import os\n from os import path as op\n \n from .wavefront import WavefrontReader, WavefrontWriter\n@@ -46,14 +47,26 @@\n normals = mesh['face_normals']\n texcoords = None\n return vertices, faces, normals, texcoords\n- elif not format:\n- raise ValueError('read_mesh needs could not determine format.')\n else:\n- raise ValueError('read_mesh does not understand format %s.' % fmt)\n+ try:\n+ import meshio\n+ except ImportError:\n+ raise ValueError('read_mesh does not understand format %s.' % fmt)\n+\n+ try:\n+ mesh = meshio.read(fname)\n+ except meshio.ReadError:\n+ raise ValueError('read_mesh does not understand format %s.' % fmt)\n+\n+ triangles = mesh.get_cells_type(\"triangle\")\n+ if len(triangles) == 0:\n+ raise ValueError('mesh file does not contain triangles.')\n+\n+ return mesh.points, triangles, None, None\n \n \n def write_mesh(fname, vertices, faces, normals, texcoords, name='',\n- format='obj', overwrite=False, reshape_faces=True):\n+ format=None, overwrite=False, reshape_faces=True):\n \"\"\" Write mesh data to file.\n \n Parameters\n@@ -82,8 +95,29 @@\n if op.isfile(fname) and not overwrite:\n raise IOError('file \"%s\" exists, use overwrite=True' % fname)\n \n+ if format is None:\n+ format = os.path.splitext(fname)[1][1:]\n+\n # Check format\n- if format not in ('obj'):\n- raise ValueError('Only \"obj\" format writing currently supported')\n- WavefrontWriter.write(fname, vertices, faces,\n- normals, texcoords, name, reshape_faces)\n+ if format == 'obj':\n+ WavefrontWriter.write(fname, vertices, faces,\n+ normals, texcoords, name, reshape_faces)\n+ return\n+\n+ try:\n+ import meshio\n+ except ImportError:\n+ raise ValueError('write_mesh does not understand format %s.' % format)\n+\n+ cell_data = {}\n+ if normals is not None:\n+ cell_data[\"normals\"] = [normals]\n+ if texcoords is not None:\n+ cell_data[\"texcoords\"] = [texcoords]\n+\n+ mesh = meshio.Mesh(vertices, [(\"triangle\", faces)], cell_data=cell_data)\n+\n+ try:\n+ mesh.write(fname, file_format=format)\n+ except meshio.WriteError:\n+ raise ValueError('write_mesh does not understand format %s.' % format)\n", "issue": "meshio for i/o\nMain author of [meshio](https://github.com/nschloe/meshio) here.\r\n\r\nmeshio can read a large number of formats which are applicable for vispy, I think it would be a good match for meshio.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\" Reading and writing of data like images and meshes.\n\"\"\"\n\nfrom os import path as op\n\nfrom .wavefront import WavefrontReader, WavefrontWriter\nfrom .stl import load_stl\n\n\ndef read_mesh(fname):\n \"\"\"Read mesh data from file.\n\n Parameters\n ----------\n fname : str\n File name to read. Format will be inferred from the filename.\n Currently only '.obj' and '.obj.gz' are supported.\n\n Returns\n -------\n vertices : array\n Vertices.\n faces : array | None\n Triangle face definitions.\n normals : array\n Normals for the mesh.\n texcoords : array | None\n Texture coordinates.\n \"\"\"\n # Check format\n fmt = op.splitext(fname)[1].lower()\n if fmt == '.gz':\n fmt = op.splitext(op.splitext(fname)[0])[1].lower()\n\n if fmt in ('.obj'):\n return WavefrontReader.read(fname)\n elif fmt in ('.stl'):\n file_obj = open(fname, mode='rb')\n mesh = load_stl(file_obj)\n vertices = mesh['vertices']\n faces = mesh['faces']\n normals = mesh['face_normals']\n texcoords = None\n return vertices, faces, normals, texcoords\n elif not format:\n raise ValueError('read_mesh needs could not determine format.')\n else:\n raise ValueError('read_mesh does not understand format %s.' % fmt)\n\n\ndef write_mesh(fname, vertices, faces, normals, texcoords, name='',\n format='obj', overwrite=False, reshape_faces=True):\n \"\"\" Write mesh data to file.\n\n Parameters\n ----------\n fname : str\n Filename to write. Must end with \".obj\" or \".gz\".\n vertices : array\n Vertices.\n faces : array | None\n Triangle face definitions.\n normals : array\n Normals for the mesh.\n texcoords : array | None\n Texture coordinates.\n name : str\n Name of the object.\n format : str\n Currently only \"obj\" is supported.\n overwrite : bool\n If the file exists, overwrite it.\n reshape_faces : bool\n Reshape the `faces` array to (Nf, 3). Set to `False`\n if you need to write a mesh with non triangular faces.\n \"\"\"\n # Check file\n if op.isfile(fname) and not overwrite:\n raise IOError('file \"%s\" exists, use overwrite=True' % fname)\n\n # Check format\n if format not in ('obj'):\n raise ValueError('Only \"obj\" format writing currently supported')\n WavefrontWriter.write(fname, vertices, faces,\n normals, texcoords, name, reshape_faces)\n", "path": "vispy/io/mesh.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"Vispy setup script.\n\nSteps to do a new release:\n\nPreparations:\n * Test on Windows, Linux, Mac\n * Make release notes\n * Update API documentation and other docs that need updating.\n\nDefine the version and release:\n * tag the tip changeset as version x.x.x; `git tag -a 'vX.Y.Z' -m \"Version X.Y.Z\"`\n * push tag to github\n * verify that azure pipelines complete\n * verify that `.tar.gz` sdist and binary wheels are available on PyPI\n\nAnnouncing:\n * It can be worth waiting a day for eager users to report critical bugs\n * Announce in scipy-user, vispy mailing list, twitter (@vispyproject)\n\n\"\"\"\n\nimport os\nimport sys\nimport platform\nfrom os import path as op\nfrom distutils import log\nfrom setuptools import setup, find_packages, Command, Extension\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.egg_info import egg_info\nfrom subprocess import check_call\n\nimport numpy as np\nfrom Cython.Build import cythonize\n\nlog.set_verbosity(log.DEBUG)\nlog.info('setup.py entered')\nlog.info('$PATH=%s' % os.environ['PATH'])\n\nname = 'vispy'\ndescription = 'Interactive visualization in Python'\n\n# Special commands for building jupyter notebook extension\nhere = os.path.dirname(os.path.abspath(__file__))\nnode_root = os.path.join(here, 'js')\nis_repo = os.path.exists(os.path.join(here, '.git'))\n\nnpm_path = os.pathsep.join([\n os.path.join(node_root, 'node_modules', '.bin'),\n os.environ.get('PATH', os.defpath),\n])\n\n\ndef set_builtin(name, value):\n if isinstance(__builtins__, dict):\n __builtins__[name] = value\n else:\n setattr(__builtins__, name, value)\n\n\ndef js_prerelease(command, strict=False):\n \"\"\"decorator for building minified js/css prior to another command\"\"\"\n class DecoratedCommand(command):\n def run(self):\n jsdeps = self.distribution.get_command_obj('jsdeps')\n if not is_repo and all(os.path.exists(t) for t in jsdeps.targets):\n # sdist, nothing to do\n command.run(self)\n return\n\n try:\n self.distribution.run_command('jsdeps')\n except Exception as e:\n missing = [t for t in jsdeps.targets if not os.path.exists(t)]\n if strict or missing:\n log.warn('rebuilding js and css failed')\n if missing:\n log.error('missing files: %s' % missing)\n # HACK: Allow users who can't build the JS to still install vispy\n if not is_repo:\n raise e\n log.warn('WARNING: continuing installation WITHOUT nbextension javascript')\n # remove JS files from data_files so setuptools doesn't try to copy\n # non-existent files\n self.distribution.data_files = [x for x in self.distribution.data_files\n if 'jupyter' not in x[0]]\n else:\n log.warn('rebuilding js and css failed (not a problem)')\n log.warn(str(e))\n command.run(self)\n update_package_data(self.distribution)\n return DecoratedCommand\n\n\ndef update_package_data(distribution):\n \"\"\"update package_data to catch changes during setup\"\"\"\n build_py = distribution.get_command_obj('build_py')\n # distribution.package_data = find_package_data()\n # re-init build_py options which load package_data\n build_py.finalize_options()\n\n\nclass NPM(Command):\n description = 'install package.json dependencies using npm'\n\n user_options = []\n\n node_modules = os.path.join(node_root, 'node_modules')\n\n targets = [\n os.path.join(here, 'vispy', 'static', 'extension.js'),\n os.path.join(here, 'vispy', 'static', 'index.js')\n ]\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def get_npm_name(self):\n npmName = 'npm';\n if platform.system() == 'Windows':\n npmName = 'npm.cmd';\n\n return npmName;\n\n def has_npm(self):\n npmName = self.get_npm_name();\n try:\n check_call([npmName, '--version'])\n return True\n except:\n return False\n\n def should_run_npm_install(self):\n package_json = os.path.join(node_root, 'package.json')\n node_modules_exists = os.path.exists(self.node_modules)\n return self.has_npm()\n\n def run(self):\n has_npm = self.has_npm()\n if not has_npm:\n log.error(\"`npm` unavailable. If you're running this command \"\n \"using sudo, make sure `npm` is available to sudo\")\n\n env = os.environ.copy()\n env['PATH'] = npm_path\n\n if self.should_run_npm_install():\n log.info(\"Installing build dependencies with npm. This may take \"\n \"a while...\")\n npmName = self.get_npm_name();\n check_call([npmName, 'install', '--verbose'], cwd=node_root,\n stdout=sys.stdout, stderr=sys.stderr)\n os.utime(self.node_modules, None)\n\n for t in self.targets:\n if not os.path.exists(t):\n msg = 'Missing file: %s' % t\n if not has_npm:\n msg += '\\nnpm is required to build a development ' \\\n 'version of a widget extension'\n raise ValueError(msg)\n\n # update package data in case this created new files\n update_package_data(self.distribution)\n\n\nextensions = [Extension('vispy.visuals.text._sdf_cpu',\n [op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')],\n include_dirs=[np.get_include()]),\n ]\n\nreadme = open('README.rst', 'r').read()\nsetup(\n name=name,\n use_scm_version={'write_to': 'vispy/version.py'},\n author='Vispy contributors',\n author_email='[email protected]',\n license='(new) BSD',\n url='http://vispy.org',\n download_url='https://pypi.python.org/pypi/vispy',\n keywords=[\n 'visualization',\n 'OpenGl',\n 'ES',\n 'medical',\n 'imaging',\n '3D',\n 'plotting',\n 'numpy',\n 'bigdata',\n 'ipython',\n 'jupyter',\n 'widgets',\n ],\n description=description,\n long_description=readme,\n platforms='any',\n provides=['vispy'],\n cmdclass={\n 'build_py': js_prerelease(build_py),\n 'egg_info': js_prerelease(egg_info),\n 'sdist': js_prerelease(sdist, strict=True),\n 'jsdeps': NPM,\n },\n python_requires='>=3.6',\n install_requires=['numpy', 'freetype-py'],\n setup_requires=['numpy', 'cython', 'setuptools_scm', 'setuptools_scm_git_archive'],\n extras_require={\n 'ipython-static': ['ipython'],\n 'ipython-vnc': ['ipython>=7'],\n 'ipython-webgl': ['ipywidgets>=7.0', 'ipython>=7', 'tornado'],\n 'pyglet': ['pyglet>=1.2'],\n 'pyqt5': ['pyqt5'],\n 'pyside': ['PySide'],\n 'pyside2': ['PySide2'],\n 'sdl2': ['PySDL2'],\n 'wx': ['wxPython'],\n 'doc': ['sphinx_bootstrap_theme', 'numpydoc'],\n },\n packages=find_packages(exclude=['make']),\n ext_modules=cythonize(extensions),\n package_dir={'vispy': 'vispy'},\n data_files=[\n ('share/jupyter/nbextensions/vispy', [\n 'vispy/static/extension.js',\n 'vispy/static/index.js',\n 'vispy/static/index.js.map',\n ]),\n ('etc/jupyter/nbconfig/notebook.d', ['vispy.json']),\n ],\n include_package_data=True,\n package_data={\n 'vispy': [op.join('io', '_data', '*'),\n op.join('html', 'static', 'js', '*'),\n op.join('app', 'tests', 'qt-designer.ui'),\n op.join('util', 'fonts', 'data', '*.ttf'),\n ],\n\n 'vispy.glsl': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.antialias': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.arrowheads': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.arrows': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.collections': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.colormaps': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.lines': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.markers': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.math': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.misc': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.transforms': ['*.vert','*.frag', \"*.glsl\"],\n\n },\n zip_safe=False,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Education',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: IPython'\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\" Reading and writing of data like images and meshes.\n\"\"\"\n\nimport os\nfrom os import path as op\n\nfrom .wavefront import WavefrontReader, WavefrontWriter\nfrom .stl import load_stl\n\n\ndef read_mesh(fname):\n \"\"\"Read mesh data from file.\n\n Parameters\n ----------\n fname : str\n File name to read. Format will be inferred from the filename.\n Currently only '.obj' and '.obj.gz' are supported.\n\n Returns\n -------\n vertices : array\n Vertices.\n faces : array | None\n Triangle face definitions.\n normals : array\n Normals for the mesh.\n texcoords : array | None\n Texture coordinates.\n \"\"\"\n # Check format\n fmt = op.splitext(fname)[1].lower()\n if fmt == '.gz':\n fmt = op.splitext(op.splitext(fname)[0])[1].lower()\n\n if fmt in ('.obj'):\n return WavefrontReader.read(fname)\n elif fmt in ('.stl'):\n file_obj = open(fname, mode='rb')\n mesh = load_stl(file_obj)\n vertices = mesh['vertices']\n faces = mesh['faces']\n normals = mesh['face_normals']\n texcoords = None\n return vertices, faces, normals, texcoords\n else:\n try:\n import meshio\n except ImportError:\n raise ValueError('read_mesh does not understand format %s.' % fmt)\n\n try:\n mesh = meshio.read(fname)\n except meshio.ReadError:\n raise ValueError('read_mesh does not understand format %s.' % fmt)\n\n triangles = mesh.get_cells_type(\"triangle\")\n if len(triangles) == 0:\n raise ValueError('mesh file does not contain triangles.')\n\n return mesh.points, triangles, None, None\n\n\ndef write_mesh(fname, vertices, faces, normals, texcoords, name='',\n format=None, overwrite=False, reshape_faces=True):\n \"\"\" Write mesh data to file.\n\n Parameters\n ----------\n fname : str\n Filename to write. Must end with \".obj\" or \".gz\".\n vertices : array\n Vertices.\n faces : array | None\n Triangle face definitions.\n normals : array\n Normals for the mesh.\n texcoords : array | None\n Texture coordinates.\n name : str\n Name of the object.\n format : str\n Currently only \"obj\" is supported.\n overwrite : bool\n If the file exists, overwrite it.\n reshape_faces : bool\n Reshape the `faces` array to (Nf, 3). Set to `False`\n if you need to write a mesh with non triangular faces.\n \"\"\"\n # Check file\n if op.isfile(fname) and not overwrite:\n raise IOError('file \"%s\" exists, use overwrite=True' % fname)\n\n if format is None:\n format = os.path.splitext(fname)[1][1:]\n\n # Check format\n if format == 'obj':\n WavefrontWriter.write(fname, vertices, faces,\n normals, texcoords, name, reshape_faces)\n return\n\n try:\n import meshio\n except ImportError:\n raise ValueError('write_mesh does not understand format %s.' % format)\n\n cell_data = {}\n if normals is not None:\n cell_data[\"normals\"] = [normals]\n if texcoords is not None:\n cell_data[\"texcoords\"] = [texcoords]\n\n mesh = meshio.Mesh(vertices, [(\"triangle\", faces)], cell_data=cell_data)\n\n try:\n mesh.write(fname, file_format=format)\n except meshio.WriteError:\n raise ValueError('write_mesh does not understand format %s.' % format)\n", "path": "vispy/io/mesh.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"Vispy setup script.\n\nSteps to do a new release:\n\nPreparations:\n * Test on Windows, Linux, Mac\n * Make release notes\n * Update API documentation and other docs that need updating.\n\nDefine the version and release:\n * tag the tip changeset as version x.x.x; `git tag -a 'vX.Y.Z' -m \"Version X.Y.Z\"`\n * push tag to github\n * verify that azure pipelines complete\n * verify that `.tar.gz` sdist and binary wheels are available on PyPI\n\nAnnouncing:\n * It can be worth waiting a day for eager users to report critical bugs\n * Announce in scipy-user, vispy mailing list, twitter (@vispyproject)\n\n\"\"\"\n\nimport os\nimport sys\nimport platform\nfrom os import path as op\nfrom distutils import log\nfrom setuptools import setup, find_packages, Command, Extension\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.egg_info import egg_info\nfrom subprocess import check_call\n\nimport numpy as np\nfrom Cython.Build import cythonize\n\nlog.set_verbosity(log.DEBUG)\nlog.info('setup.py entered')\nlog.info('$PATH=%s' % os.environ['PATH'])\n\nname = 'vispy'\ndescription = 'Interactive visualization in Python'\n\n# Special commands for building jupyter notebook extension\nhere = os.path.dirname(os.path.abspath(__file__))\nnode_root = os.path.join(here, 'js')\nis_repo = os.path.exists(os.path.join(here, '.git'))\n\nnpm_path = os.pathsep.join([\n os.path.join(node_root, 'node_modules', '.bin'),\n os.environ.get('PATH', os.defpath),\n])\n\n\ndef set_builtin(name, value):\n if isinstance(__builtins__, dict):\n __builtins__[name] = value\n else:\n setattr(__builtins__, name, value)\n\n\ndef js_prerelease(command, strict=False):\n \"\"\"decorator for building minified js/css prior to another command\"\"\"\n class DecoratedCommand(command):\n def run(self):\n jsdeps = self.distribution.get_command_obj('jsdeps')\n if not is_repo and all(os.path.exists(t) for t in jsdeps.targets):\n # sdist, nothing to do\n command.run(self)\n return\n\n try:\n self.distribution.run_command('jsdeps')\n except Exception as e:\n missing = [t for t in jsdeps.targets if not os.path.exists(t)]\n if strict or missing:\n log.warn('rebuilding js and css failed')\n if missing:\n log.error('missing files: %s' % missing)\n # HACK: Allow users who can't build the JS to still install vispy\n if not is_repo:\n raise e\n log.warn('WARNING: continuing installation WITHOUT nbextension javascript')\n # remove JS files from data_files so setuptools doesn't try to copy\n # non-existent files\n self.distribution.data_files = [x for x in self.distribution.data_files\n if 'jupyter' not in x[0]]\n else:\n log.warn('rebuilding js and css failed (not a problem)')\n log.warn(str(e))\n command.run(self)\n update_package_data(self.distribution)\n return DecoratedCommand\n\n\ndef update_package_data(distribution):\n \"\"\"update package_data to catch changes during setup\"\"\"\n build_py = distribution.get_command_obj('build_py')\n # distribution.package_data = find_package_data()\n # re-init build_py options which load package_data\n build_py.finalize_options()\n\n\nclass NPM(Command):\n description = 'install package.json dependencies using npm'\n\n user_options = []\n\n node_modules = os.path.join(node_root, 'node_modules')\n\n targets = [\n os.path.join(here, 'vispy', 'static', 'extension.js'),\n os.path.join(here, 'vispy', 'static', 'index.js')\n ]\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def get_npm_name(self):\n npmName = 'npm';\n if platform.system() == 'Windows':\n npmName = 'npm.cmd';\n\n return npmName;\n\n def has_npm(self):\n npmName = self.get_npm_name();\n try:\n check_call([npmName, '--version'])\n return True\n except:\n return False\n\n def should_run_npm_install(self):\n package_json = os.path.join(node_root, 'package.json')\n node_modules_exists = os.path.exists(self.node_modules)\n return self.has_npm()\n\n def run(self):\n has_npm = self.has_npm()\n if not has_npm:\n log.error(\"`npm` unavailable. If you're running this command \"\n \"using sudo, make sure `npm` is available to sudo\")\n\n env = os.environ.copy()\n env['PATH'] = npm_path\n\n if self.should_run_npm_install():\n log.info(\"Installing build dependencies with npm. This may take \"\n \"a while...\")\n npmName = self.get_npm_name();\n check_call([npmName, 'install', '--verbose'], cwd=node_root,\n stdout=sys.stdout, stderr=sys.stderr)\n os.utime(self.node_modules, None)\n\n for t in self.targets:\n if not os.path.exists(t):\n msg = 'Missing file: %s' % t\n if not has_npm:\n msg += '\\nnpm is required to build a development ' \\\n 'version of a widget extension'\n raise ValueError(msg)\n\n # update package data in case this created new files\n update_package_data(self.distribution)\n\n\nextensions = [Extension('vispy.visuals.text._sdf_cpu',\n [op.join('vispy', 'visuals', 'text', '_sdf_cpu.pyx')],\n include_dirs=[np.get_include()]),\n ]\n\nreadme = open('README.rst', 'r').read()\nsetup(\n name=name,\n use_scm_version={'write_to': 'vispy/version.py'},\n author='Vispy contributors',\n author_email='[email protected]',\n license='(new) BSD',\n url='http://vispy.org',\n download_url='https://pypi.python.org/pypi/vispy',\n keywords=[\n 'visualization',\n 'OpenGl',\n 'ES',\n 'medical',\n 'imaging',\n '3D',\n 'plotting',\n 'numpy',\n 'bigdata',\n 'ipython',\n 'jupyter',\n 'widgets',\n ],\n description=description,\n long_description=readme,\n platforms='any',\n provides=['vispy'],\n cmdclass={\n 'build_py': js_prerelease(build_py),\n 'egg_info': js_prerelease(egg_info),\n 'sdist': js_prerelease(sdist, strict=True),\n 'jsdeps': NPM,\n },\n python_requires='>=3.6',\n install_requires=['numpy', 'freetype-py'],\n setup_requires=['numpy', 'cython', 'setuptools_scm', 'setuptools_scm_git_archive'],\n extras_require={\n 'ipython-static': ['ipython'],\n 'ipython-vnc': ['ipython>=7'],\n 'ipython-webgl': ['ipywidgets>=7.0', 'ipython>=7', 'tornado'],\n 'pyglet': ['pyglet>=1.2'],\n 'pyqt5': ['pyqt5'],\n 'pyside': ['PySide'],\n 'pyside2': ['PySide2'],\n 'sdl2': ['PySDL2'],\n 'wx': ['wxPython'],\n 'doc': ['sphinx_bootstrap_theme', 'numpydoc'],\n 'io': ['meshio'],\n },\n packages=find_packages(exclude=['make']),\n ext_modules=cythonize(extensions),\n package_dir={'vispy': 'vispy'},\n data_files=[\n ('share/jupyter/nbextensions/vispy', [\n 'vispy/static/extension.js',\n 'vispy/static/index.js',\n 'vispy/static/index.js.map',\n ]),\n ('etc/jupyter/nbconfig/notebook.d', ['vispy.json']),\n ],\n include_package_data=True,\n package_data={\n 'vispy': [op.join('io', '_data', '*'),\n op.join('html', 'static', 'js', '*'),\n op.join('app', 'tests', 'qt-designer.ui'),\n op.join('util', 'fonts', 'data', '*.ttf'),\n ],\n\n 'vispy.glsl': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.antialias': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.arrowheads': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.arrows': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.collections': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.colormaps': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.lines': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.markers': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.math': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.misc': ['*.vert','*.frag', \"*.glsl\"],\n 'vispy.glsl.transforms': ['*.vert','*.frag', \"*.glsl\"],\n\n },\n zip_safe=False,\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Education',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: IPython'\n ],\n)\n", "path": "setup.py"}]} | 4,089 | 730 |
gh_patches_debug_17315 | rasdani/github-patches | git_diff | astronomer__astro-sdk-1160 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Row count not working for snowflake
**Describe the bug**
Currently row count fails for snowflake
**Expected behavior**
Row count should work snowflake
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python-sdk/src/astro/table.py`
Content:
```
1 from __future__ import annotations
2
3 import random
4 import string
5 from typing import Any
6
7 from attr import define, field, fields_dict
8 from sqlalchemy import Column, MetaData, func, select
9
10 from astro.airflow.datasets import Dataset
11 from astro.databases import create_database
12
13 MAX_TABLE_NAME_LENGTH = 62
14 TEMP_PREFIX = "_tmp_"
15
16
17 @define
18 class Metadata:
19 """
20 Contains additional information to access a SQL Table, which is very likely optional and, in some cases, may
21 be database-specific.
22
23 :param schema: A schema name
24 :param database: A database name
25 """
26
27 # This property is used by several databases, including: Postgres, Snowflake and BigQuery ("namespace")
28 schema: str | None = None
29 database: str | None = None
30
31 def is_empty(self) -> bool:
32 """Check if all the fields are None."""
33 return all(getattr(self, field_name) is None for field_name in fields_dict(self.__class__))
34
35
36 @define(slots=False)
37 class BaseTable:
38 """
39 Base class that has information necessary to access a SQL Table. It is agnostic to the database type.
40 If no name is given, it auto-generates a name for the Table and considers it temporary.
41
42 Temporary tables are prefixed with the prefix TEMP_PREFIX.
43
44 :param name: The name of the database table. If name not provided then it would create a temporary name
45 :param conn_id: The Airflow connection id. This will be used to identify the right database type at the runtime
46 :param metadata: A metadata object which will have database or schema name
47 :param columns: columns which define the database table schema.
48 :sphinx-autoapi-skip:
49 """
50
51 template_fields = ("name",)
52
53 # TODO: discuss alternative names to this class, since it contains metadata as opposed to be the
54 # SQL table itself
55 # Some ideas: TableRef, TableMetadata, TableData, TableDataset
56 _name: str = field(default="")
57 conn_id: str = field(default="")
58 # Setting converter allows passing a dictionary to metadata arg
59 metadata: Metadata = field(
60 factory=Metadata,
61 converter=lambda val: Metadata(**val) if isinstance(val, dict) else val,
62 )
63 columns: list[Column] = field(factory=list)
64 temp: bool = field(default=False)
65
66 # We need this method to pickle Table object, without this we cannot push/pull this object from xcom.
67 def __getstate__(self):
68 return self.__dict__
69
70 def __attrs_post_init__(self) -> None:
71 if not self._name or self._name.startswith("_tmp"):
72 self.temp = True
73
74 def _create_unique_table_name(self, prefix: str = "") -> str:
75 """
76 If a table is instantiated without a name, create a unique table for it.
77 This new name should be compatible with all supported databases.
78 """
79 schema_length = len((self.metadata and self.metadata.schema) or "") + 1
80 prefix_length = len(prefix)
81
82 unique_id = random.choice(string.ascii_lowercase) + "".join(
83 random.choice(string.ascii_lowercase + string.digits)
84 for _ in range(MAX_TABLE_NAME_LENGTH - schema_length - prefix_length)
85 )
86 if prefix:
87 unique_id = f"{prefix}{unique_id}"
88
89 return unique_id
90
91 def create_similar_table(self) -> Table:
92 """
93 Create a new table with a unique name but with the same metadata.
94 """
95 return Table( # type: ignore
96 name=self._create_unique_table_name(),
97 conn_id=self.conn_id,
98 metadata=self.metadata,
99 )
100
101 @property
102 def sqlalchemy_metadata(self) -> MetaData:
103 """Return the Sqlalchemy metadata for the given table."""
104 if self.metadata and self.metadata.schema:
105 alchemy_metadata = MetaData(schema=self.metadata.schema)
106 else:
107 alchemy_metadata = MetaData()
108 return alchemy_metadata
109
110 @property
111 def name(self) -> str:
112 """
113 Return either the user-defined name or auto-generate one.
114 :sphinx-autoapi-skip:
115 """
116 if self.temp and not self._name:
117 self._name = self._create_unique_table_name(TEMP_PREFIX)
118 return self._name
119
120 @name.setter
121 def name(self, value: str) -> None:
122 """
123 Set the table name. Once this happens, the table is no longer considered temporary.
124 """
125 if not isinstance(value, property) and value != self._name:
126 self._name = value
127 self.temp = False
128
129 @property
130 def row_count(self) -> Any:
131 """
132 Return the row count of table.
133 """
134 db = create_database(self.conn_id)
135 tb = db.get_sqla_table(table=self)
136 query = select(func.count("*")).select_from(tb)
137 return db.run_sql(query).scalar()
138
139 def to_json(self):
140 return {
141 "class": "Table",
142 "name": self.name,
143 "metadata": {
144 "schema": self.metadata.schema,
145 "database": self.metadata.database,
146 },
147 "temp": self.temp,
148 "conn_id": self.conn_id,
149 }
150
151 @classmethod
152 def from_json(cls, obj: dict):
153 return Table(
154 name=obj["name"],
155 metadata=Metadata(**obj["metadata"]),
156 temp=obj["temp"],
157 conn_id=obj["conn_id"],
158 )
159
160 def openlineage_dataset_name(self) -> str:
161 """
162 Returns the open lineage dataset name as per
163 https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md
164 """
165 database = create_database(self.conn_id)
166 return database.openlineage_dataset_name(table=self)
167
168 def openlineage_dataset_namespace(self) -> str:
169 """
170 Returns the open lineage dataset namespace as per
171 https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md
172 """
173 database = create_database(self.conn_id)
174 return database.openlineage_dataset_namespace()
175
176
177 @define(slots=False)
178 class TempTable(BaseTable):
179 """
180 Internal class to represent a Temporary table
181
182 :sphinx-autoapi-skip:
183 """
184
185 temp: bool = field(default=True)
186
187
188 @define(slots=False)
189 class Table(BaseTable, Dataset):
190 """
191 User-facing class that has information necessary to access a SQL Table. It is agnostic to the database type.
192 If no name is given, it auto-generates a name for the Table and considers it temporary.
193
194 Temporary tables are prefixed with the prefix TEMP_PREFIX.
195
196 :param name: The name of the database table. If name not provided then it would create a temporary name
197 :param conn_id: The Airflow connection id. This will be used to identify the right database type at the runtime
198 :param metadata: A metadata object which will have database or schema name
199 :param columns: columns which define the database table schema.
200 """
201
202 uri: str = field(init=False)
203 extra: dict | None = field(init=False, factory=dict)
204
205 def __new__(cls, *args, **kwargs):
206 name = kwargs.get("name") or args and args[0] or ""
207 temp = kwargs.get("temp", False)
208 if temp or (not name or name.startswith("_tmp")):
209 return TempTable(*args, **kwargs)
210 return super().__new__(cls)
211
212 @uri.default
213 def _path_to_dataset_uri(self) -> str:
214 """Build a URI to be passed to Dataset obj introduced in Airflow 2.4"""
215 from urllib.parse import urlencode, urlparse
216
217 path = f"astro://{self.conn_id}@"
218 db_extra = {"table": self.name}
219 if self.metadata.schema:
220 db_extra["schema"] = self.metadata.schema
221 if self.metadata.database:
222 db_extra["database"] = self.metadata.database
223 parsed_url = urlparse(url=path)
224 new_parsed_url = parsed_url._replace(query=urlencode(db_extra))
225 return new_parsed_url.geturl()
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python-sdk/src/astro/table.py b/python-sdk/src/astro/table.py
--- a/python-sdk/src/astro/table.py
+++ b/python-sdk/src/astro/table.py
@@ -5,7 +5,7 @@
from typing import Any
from attr import define, field, fields_dict
-from sqlalchemy import Column, MetaData, func, select
+from sqlalchemy import Column, MetaData
from astro.airflow.datasets import Dataset
from astro.databases import create_database
@@ -132,9 +132,10 @@
Return the row count of table.
"""
db = create_database(self.conn_id)
- tb = db.get_sqla_table(table=self)
- query = select(func.count("*")).select_from(tb)
- return db.run_sql(query).scalar()
+ result = db.run_sql(
+ f"select count(*) from {db.get_table_qualified_name(self)}" # skipcq: BAN-B608
+ ).scalar()
+ return result
def to_json(self):
return {
| {"golden_diff": "diff --git a/python-sdk/src/astro/table.py b/python-sdk/src/astro/table.py\n--- a/python-sdk/src/astro/table.py\n+++ b/python-sdk/src/astro/table.py\n@@ -5,7 +5,7 @@\n from typing import Any\n \n from attr import define, field, fields_dict\n-from sqlalchemy import Column, MetaData, func, select\n+from sqlalchemy import Column, MetaData\n \n from astro.airflow.datasets import Dataset\n from astro.databases import create_database\n@@ -132,9 +132,10 @@\n Return the row count of table.\n \"\"\"\n db = create_database(self.conn_id)\n- tb = db.get_sqla_table(table=self)\n- query = select(func.count(\"*\")).select_from(tb)\n- return db.run_sql(query).scalar()\n+ result = db.run_sql(\n+ f\"select count(*) from {db.get_table_qualified_name(self)}\" # skipcq: BAN-B608\n+ ).scalar()\n+ return result\n \n def to_json(self):\n return {\n", "issue": "Row count not working for snowflake\n**Describe the bug**\r\nCurrently row count fails for snowflake \r\n\r\n**Expected behavior**\r\nRow count should work snowflake\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport random\nimport string\nfrom typing import Any\n\nfrom attr import define, field, fields_dict\nfrom sqlalchemy import Column, MetaData, func, select\n\nfrom astro.airflow.datasets import Dataset\nfrom astro.databases import create_database\n\nMAX_TABLE_NAME_LENGTH = 62\nTEMP_PREFIX = \"_tmp_\"\n\n\n@define\nclass Metadata:\n \"\"\"\n Contains additional information to access a SQL Table, which is very likely optional and, in some cases, may\n be database-specific.\n\n :param schema: A schema name\n :param database: A database name\n \"\"\"\n\n # This property is used by several databases, including: Postgres, Snowflake and BigQuery (\"namespace\")\n schema: str | None = None\n database: str | None = None\n\n def is_empty(self) -> bool:\n \"\"\"Check if all the fields are None.\"\"\"\n return all(getattr(self, field_name) is None for field_name in fields_dict(self.__class__))\n\n\n@define(slots=False)\nclass BaseTable:\n \"\"\"\n Base class that has information necessary to access a SQL Table. It is agnostic to the database type.\n If no name is given, it auto-generates a name for the Table and considers it temporary.\n\n Temporary tables are prefixed with the prefix TEMP_PREFIX.\n\n :param name: The name of the database table. If name not provided then it would create a temporary name\n :param conn_id: The Airflow connection id. This will be used to identify the right database type at the runtime\n :param metadata: A metadata object which will have database or schema name\n :param columns: columns which define the database table schema.\n :sphinx-autoapi-skip:\n \"\"\"\n\n template_fields = (\"name\",)\n\n # TODO: discuss alternative names to this class, since it contains metadata as opposed to be the\n # SQL table itself\n # Some ideas: TableRef, TableMetadata, TableData, TableDataset\n _name: str = field(default=\"\")\n conn_id: str = field(default=\"\")\n # Setting converter allows passing a dictionary to metadata arg\n metadata: Metadata = field(\n factory=Metadata,\n converter=lambda val: Metadata(**val) if isinstance(val, dict) else val,\n )\n columns: list[Column] = field(factory=list)\n temp: bool = field(default=False)\n\n # We need this method to pickle Table object, without this we cannot push/pull this object from xcom.\n def __getstate__(self):\n return self.__dict__\n\n def __attrs_post_init__(self) -> None:\n if not self._name or self._name.startswith(\"_tmp\"):\n self.temp = True\n\n def _create_unique_table_name(self, prefix: str = \"\") -> str:\n \"\"\"\n If a table is instantiated without a name, create a unique table for it.\n This new name should be compatible with all supported databases.\n \"\"\"\n schema_length = len((self.metadata and self.metadata.schema) or \"\") + 1\n prefix_length = len(prefix)\n\n unique_id = random.choice(string.ascii_lowercase) + \"\".join(\n random.choice(string.ascii_lowercase + string.digits)\n for _ in range(MAX_TABLE_NAME_LENGTH - schema_length - prefix_length)\n )\n if prefix:\n unique_id = f\"{prefix}{unique_id}\"\n\n return unique_id\n\n def create_similar_table(self) -> Table:\n \"\"\"\n Create a new table with a unique name but with the same metadata.\n \"\"\"\n return Table( # type: ignore\n name=self._create_unique_table_name(),\n conn_id=self.conn_id,\n metadata=self.metadata,\n )\n\n @property\n def sqlalchemy_metadata(self) -> MetaData:\n \"\"\"Return the Sqlalchemy metadata for the given table.\"\"\"\n if self.metadata and self.metadata.schema:\n alchemy_metadata = MetaData(schema=self.metadata.schema)\n else:\n alchemy_metadata = MetaData()\n return alchemy_metadata\n\n @property\n def name(self) -> str:\n \"\"\"\n Return either the user-defined name or auto-generate one.\n :sphinx-autoapi-skip:\n \"\"\"\n if self.temp and not self._name:\n self._name = self._create_unique_table_name(TEMP_PREFIX)\n return self._name\n\n @name.setter\n def name(self, value: str) -> None:\n \"\"\"\n Set the table name. Once this happens, the table is no longer considered temporary.\n \"\"\"\n if not isinstance(value, property) and value != self._name:\n self._name = value\n self.temp = False\n\n @property\n def row_count(self) -> Any:\n \"\"\"\n Return the row count of table.\n \"\"\"\n db = create_database(self.conn_id)\n tb = db.get_sqla_table(table=self)\n query = select(func.count(\"*\")).select_from(tb)\n return db.run_sql(query).scalar()\n\n def to_json(self):\n return {\n \"class\": \"Table\",\n \"name\": self.name,\n \"metadata\": {\n \"schema\": self.metadata.schema,\n \"database\": self.metadata.database,\n },\n \"temp\": self.temp,\n \"conn_id\": self.conn_id,\n }\n\n @classmethod\n def from_json(cls, obj: dict):\n return Table(\n name=obj[\"name\"],\n metadata=Metadata(**obj[\"metadata\"]),\n temp=obj[\"temp\"],\n conn_id=obj[\"conn_id\"],\n )\n\n def openlineage_dataset_name(self) -> str:\n \"\"\"\n Returns the open lineage dataset name as per\n https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md\n \"\"\"\n database = create_database(self.conn_id)\n return database.openlineage_dataset_name(table=self)\n\n def openlineage_dataset_namespace(self) -> str:\n \"\"\"\n Returns the open lineage dataset namespace as per\n https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md\n \"\"\"\n database = create_database(self.conn_id)\n return database.openlineage_dataset_namespace()\n\n\n@define(slots=False)\nclass TempTable(BaseTable):\n \"\"\"\n Internal class to represent a Temporary table\n\n :sphinx-autoapi-skip:\n \"\"\"\n\n temp: bool = field(default=True)\n\n\n@define(slots=False)\nclass Table(BaseTable, Dataset):\n \"\"\"\n User-facing class that has information necessary to access a SQL Table. It is agnostic to the database type.\n If no name is given, it auto-generates a name for the Table and considers it temporary.\n\n Temporary tables are prefixed with the prefix TEMP_PREFIX.\n\n :param name: The name of the database table. If name not provided then it would create a temporary name\n :param conn_id: The Airflow connection id. This will be used to identify the right database type at the runtime\n :param metadata: A metadata object which will have database or schema name\n :param columns: columns which define the database table schema.\n \"\"\"\n\n uri: str = field(init=False)\n extra: dict | None = field(init=False, factory=dict)\n\n def __new__(cls, *args, **kwargs):\n name = kwargs.get(\"name\") or args and args[0] or \"\"\n temp = kwargs.get(\"temp\", False)\n if temp or (not name or name.startswith(\"_tmp\")):\n return TempTable(*args, **kwargs)\n return super().__new__(cls)\n\n @uri.default\n def _path_to_dataset_uri(self) -> str:\n \"\"\"Build a URI to be passed to Dataset obj introduced in Airflow 2.4\"\"\"\n from urllib.parse import urlencode, urlparse\n\n path = f\"astro://{self.conn_id}@\"\n db_extra = {\"table\": self.name}\n if self.metadata.schema:\n db_extra[\"schema\"] = self.metadata.schema\n if self.metadata.database:\n db_extra[\"database\"] = self.metadata.database\n parsed_url = urlparse(url=path)\n new_parsed_url = parsed_url._replace(query=urlencode(db_extra))\n return new_parsed_url.geturl()\n", "path": "python-sdk/src/astro/table.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport random\nimport string\nfrom typing import Any\n\nfrom attr import define, field, fields_dict\nfrom sqlalchemy import Column, MetaData\n\nfrom astro.airflow.datasets import Dataset\nfrom astro.databases import create_database\n\nMAX_TABLE_NAME_LENGTH = 62\nTEMP_PREFIX = \"_tmp_\"\n\n\n@define\nclass Metadata:\n \"\"\"\n Contains additional information to access a SQL Table, which is very likely optional and, in some cases, may\n be database-specific.\n\n :param schema: A schema name\n :param database: A database name\n \"\"\"\n\n # This property is used by several databases, including: Postgres, Snowflake and BigQuery (\"namespace\")\n schema: str | None = None\n database: str | None = None\n\n def is_empty(self) -> bool:\n \"\"\"Check if all the fields are None.\"\"\"\n return all(getattr(self, field_name) is None for field_name in fields_dict(self.__class__))\n\n\n@define(slots=False)\nclass BaseTable:\n \"\"\"\n Base class that has information necessary to access a SQL Table. It is agnostic to the database type.\n If no name is given, it auto-generates a name for the Table and considers it temporary.\n\n Temporary tables are prefixed with the prefix TEMP_PREFIX.\n\n :param name: The name of the database table. If name not provided then it would create a temporary name\n :param conn_id: The Airflow connection id. This will be used to identify the right database type at the runtime\n :param metadata: A metadata object which will have database or schema name\n :param columns: columns which define the database table schema.\n :sphinx-autoapi-skip:\n \"\"\"\n\n template_fields = (\"name\",)\n\n # TODO: discuss alternative names to this class, since it contains metadata as opposed to be the\n # SQL table itself\n # Some ideas: TableRef, TableMetadata, TableData, TableDataset\n _name: str = field(default=\"\")\n conn_id: str = field(default=\"\")\n # Setting converter allows passing a dictionary to metadata arg\n metadata: Metadata = field(\n factory=Metadata,\n converter=lambda val: Metadata(**val) if isinstance(val, dict) else val,\n )\n columns: list[Column] = field(factory=list)\n temp: bool = field(default=False)\n\n # We need this method to pickle Table object, without this we cannot push/pull this object from xcom.\n def __getstate__(self):\n return self.__dict__\n\n def __attrs_post_init__(self) -> None:\n if not self._name or self._name.startswith(\"_tmp\"):\n self.temp = True\n\n def _create_unique_table_name(self, prefix: str = \"\") -> str:\n \"\"\"\n If a table is instantiated without a name, create a unique table for it.\n This new name should be compatible with all supported databases.\n \"\"\"\n schema_length = len((self.metadata and self.metadata.schema) or \"\") + 1\n prefix_length = len(prefix)\n\n unique_id = random.choice(string.ascii_lowercase) + \"\".join(\n random.choice(string.ascii_lowercase + string.digits)\n for _ in range(MAX_TABLE_NAME_LENGTH - schema_length - prefix_length)\n )\n if prefix:\n unique_id = f\"{prefix}{unique_id}\"\n\n return unique_id\n\n def create_similar_table(self) -> Table:\n \"\"\"\n Create a new table with a unique name but with the same metadata.\n \"\"\"\n return Table( # type: ignore\n name=self._create_unique_table_name(),\n conn_id=self.conn_id,\n metadata=self.metadata,\n )\n\n @property\n def sqlalchemy_metadata(self) -> MetaData:\n \"\"\"Return the Sqlalchemy metadata for the given table.\"\"\"\n if self.metadata and self.metadata.schema:\n alchemy_metadata = MetaData(schema=self.metadata.schema)\n else:\n alchemy_metadata = MetaData()\n return alchemy_metadata\n\n @property\n def name(self) -> str:\n \"\"\"\n Return either the user-defined name or auto-generate one.\n :sphinx-autoapi-skip:\n \"\"\"\n if self.temp and not self._name:\n self._name = self._create_unique_table_name(TEMP_PREFIX)\n return self._name\n\n @name.setter\n def name(self, value: str) -> None:\n \"\"\"\n Set the table name. Once this happens, the table is no longer considered temporary.\n \"\"\"\n if not isinstance(value, property) and value != self._name:\n self._name = value\n self.temp = False\n\n @property\n def row_count(self) -> Any:\n \"\"\"\n Return the row count of table.\n \"\"\"\n db = create_database(self.conn_id)\n result = db.run_sql(\n f\"select count(*) from {db.get_table_qualified_name(self)}\" # skipcq: BAN-B608\n ).scalar()\n return result\n\n def to_json(self):\n return {\n \"class\": \"Table\",\n \"name\": self.name,\n \"metadata\": {\n \"schema\": self.metadata.schema,\n \"database\": self.metadata.database,\n },\n \"temp\": self.temp,\n \"conn_id\": self.conn_id,\n }\n\n @classmethod\n def from_json(cls, obj: dict):\n return Table(\n name=obj[\"name\"],\n metadata=Metadata(**obj[\"metadata\"]),\n temp=obj[\"temp\"],\n conn_id=obj[\"conn_id\"],\n )\n\n def openlineage_dataset_name(self) -> str:\n \"\"\"\n Returns the open lineage dataset name as per\n https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md\n \"\"\"\n database = create_database(self.conn_id)\n return database.openlineage_dataset_name(table=self)\n\n def openlineage_dataset_namespace(self) -> str:\n \"\"\"\n Returns the open lineage dataset namespace as per\n https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md\n \"\"\"\n database = create_database(self.conn_id)\n return database.openlineage_dataset_namespace()\n\n\n@define(slots=False)\nclass TempTable(BaseTable):\n \"\"\"\n Internal class to represent a Temporary table\n\n :sphinx-autoapi-skip:\n \"\"\"\n\n temp: bool = field(default=True)\n\n\n@define(slots=False)\nclass Table(BaseTable, Dataset):\n \"\"\"\n User-facing class that has information necessary to access a SQL Table. It is agnostic to the database type.\n If no name is given, it auto-generates a name for the Table and considers it temporary.\n\n Temporary tables are prefixed with the prefix TEMP_PREFIX.\n\n :param name: The name of the database table. If name not provided then it would create a temporary name\n :param conn_id: The Airflow connection id. This will be used to identify the right database type at the runtime\n :param metadata: A metadata object which will have database or schema name\n :param columns: columns which define the database table schema.\n \"\"\"\n\n uri: str = field(init=False)\n extra: dict | None = field(init=False, factory=dict)\n\n def __new__(cls, *args, **kwargs):\n name = kwargs.get(\"name\") or args and args[0] or \"\"\n temp = kwargs.get(\"temp\", False)\n if temp or (not name or name.startswith(\"_tmp\")):\n return TempTable(*args, **kwargs)\n return super().__new__(cls)\n\n @uri.default\n def _path_to_dataset_uri(self) -> str:\n \"\"\"Build a URI to be passed to Dataset obj introduced in Airflow 2.4\"\"\"\n from urllib.parse import urlencode, urlparse\n\n path = f\"astro://{self.conn_id}@\"\n db_extra = {\"table\": self.name}\n if self.metadata.schema:\n db_extra[\"schema\"] = self.metadata.schema\n if self.metadata.database:\n db_extra[\"database\"] = self.metadata.database\n parsed_url = urlparse(url=path)\n new_parsed_url = parsed_url._replace(query=urlencode(db_extra))\n return new_parsed_url.geturl()\n", "path": "python-sdk/src/astro/table.py"}]} | 2,608 | 228 |
gh_patches_debug_3801 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-167 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Host docs, build on release
Decide where to host generated docs, configure CI to generate (and possibly upload) new docs on each release.
See #6 for CI bootstrapping.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # http://www.sphinx-doc.org/en/master/config
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12
13 import os
14 import sys
15
16 sys.path.insert(0, os.path.abspath("../opentelemetry-api/src/"))
17
18
19 # -- Project information -----------------------------------------------------
20
21 project = "OpenTelemetry"
22 copyright = "2019, OpenTelemetry Authors"
23 author = "OpenTelemetry Authors"
24
25
26 # -- General configuration ---------------------------------------------------
27
28 # Easy automatic cross-references for `code in backticks`
29 default_role = "any"
30
31 # Add any Sphinx extension module names here, as strings. They can be
32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 # ones.
34 extensions = [
35 # API doc generation
36 "sphinx.ext.autodoc",
37 # Support for google-style docstrings
38 "sphinx.ext.napoleon",
39 # Infer types from hints instead of docstrings
40 "sphinx_autodoc_typehints",
41 # Add links to source from generated docs
42 "sphinx.ext.viewcode",
43 # Link to other sphinx docs
44 "sphinx.ext.intersphinx",
45 ]
46
47 intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
48
49 # http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky
50 # Sphinx will warn about all references where the target cannot be found.
51 nitpicky = True
52 nitpick_ignore = []
53
54 # Add any paths that contain templates here, relative to this directory.
55 templates_path = ["_templates"]
56
57 # List of patterns, relative to source directory, that match files and
58 # directories to ignore when looking for source files.
59 # This pattern also affects html_static_path and html_extra_path.
60 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
61
62 autodoc_default_options = {
63 "members": True,
64 "undoc-members": True,
65 "show-inheritance": True,
66 "member-order": "bysource",
67 }
68
69 # -- Options for HTML output -------------------------------------------------
70
71 # The theme to use for HTML and HTML Help pages. See the documentation for
72 # a list of builtin themes.
73 #
74 html_theme = "sphinx_rtd_theme"
75
76 # Add any paths that contain custom static files (such as style sheets) here,
77 # relative to this directory. They are copied after the builtin static files,
78 # so a file named "default.css" will overwrite the builtin "default.css".
79 html_static_path = []
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -42,6 +42,9 @@
"sphinx.ext.viewcode",
# Link to other sphinx docs
"sphinx.ext.intersphinx",
+ # Add a .nojekyll file to the generated HTML docs
+ # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing
+ "sphinx.ext.githubpages",
]
intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)}
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -42,6 +42,9 @@\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n+ # Add a .nojekyll file to the generated HTML docs\n+ # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing\n+ \"sphinx.ext.githubpages\",\n ]\n \n intersphinx_mapping = {\"python\": (\"https://docs.python.org/3/\", None)}\n", "issue": "Host docs, build on release\nDecide where to host generated docs, configure CI to generate (and possibly upload) new docs on each release.\r\n\r\nSee #6 for CI bootstrapping.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../opentelemetry-api/src/\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"OpenTelemetry\"\ncopyright = \"2019, OpenTelemetry Authors\"\nauthor = \"OpenTelemetry Authors\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Easy automatic cross-references for `code in backticks`\ndefault_role = \"any\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # API doc generation\n \"sphinx.ext.autodoc\",\n # Support for google-style docstrings\n \"sphinx.ext.napoleon\",\n # Infer types from hints instead of docstrings\n \"sphinx_autodoc_typehints\",\n # Add links to source from generated docs\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n]\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3/\", None)}\n\n# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky\n# Sphinx will warn about all references where the target cannot be found.\nnitpicky = True\nnitpick_ignore = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\nautodoc_default_options = {\n \"members\": True,\n \"undoc-members\": True,\n \"show-inheritance\": True,\n \"member-order\": \"bysource\",\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../opentelemetry-api/src/\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"OpenTelemetry\"\ncopyright = \"2019, OpenTelemetry Authors\"\nauthor = \"OpenTelemetry Authors\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Easy automatic cross-references for `code in backticks`\ndefault_role = \"any\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n # API doc generation\n \"sphinx.ext.autodoc\",\n # Support for google-style docstrings\n \"sphinx.ext.napoleon\",\n # Infer types from hints instead of docstrings\n \"sphinx_autodoc_typehints\",\n # Add links to source from generated docs\n \"sphinx.ext.viewcode\",\n # Link to other sphinx docs\n \"sphinx.ext.intersphinx\",\n # Add a .nojekyll file to the generated HTML docs\n # https://help.github.com/en/articles/files-that-start-with-an-underscore-are-missing\n \"sphinx.ext.githubpages\",\n]\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3/\", None)}\n\n# http://www.sphinx-doc.org/en/master/config.html#confval-nitpicky\n# Sphinx will warn about all references where the target cannot be found.\nnitpicky = True\nnitpick_ignore = []\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\nautodoc_default_options = {\n \"members\": True,\n \"undoc-members\": True,\n \"show-inheritance\": True,\n \"member-order\": \"bysource\",\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n", "path": "docs/conf.py"}]} | 1,057 | 129 |
gh_patches_debug_35773 | rasdani/github-patches | git_diff | tiangolo__fastapi-756 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
asyncpg uuid implementation not recognized by fastapi because of too strict typechecking
### Describe the bug
An alternative implementation of the `UUID` type is not serialized by fastapi but throws an error instead. This is because it is of type `asyncpg.pgproto.pgproto.UUID` instead of type `uuid.UUID`. However it is implemented in such a way that it does the exact same things as the regular UUID but just faster. Also,
```python
isinstance(asyncpg.pgproto.pgproto.UUID(), uuid.UUID) == True
```
this should make fastapi able to recognize it as a uuid.
### To Reproduce
1. Create a file called uuid_error.py with:
```Python
import uuid
from fastapi import FastAPI
from asyncpg.pgproto import pgproto
app = FastAPI()
@app.get("/normal_uuid")
def return_normal_uuid():
regular_uuid = uuid.UUID("a10ff360-3b1e-4984-a26f-d3ab460bdb51")
assert isinstance(regular_uuid, uuid.UUID)
return {"normal_uuid": regular_uuid}
@app.get("/fast_uuid")
def return_fast_uuid():
asyncpg_uuid = pgproto.UUID("a10ff360-3b1e-4984-a26f-d3ab460bdb51")
assert isinstance(asyncpg_uuid, uuid.UUID)
return {"fast_uuid": asyncpg_uuid}
```
2. Run it with uvicorn uuid_error:app
3. Open the browser and call the endpoint `/fast_uuid`.
4. It returns 'internal server error' .
5. But I expected it to return `{"fast_uuid":"a10ff360-3b1e-4984-a26f-d3ab460bdb51"}`.
### Expected behavior
I expected fastapi to jsonify the uuid and send, but instead it returns 'internal server error'. The precise error being thrown in the server is:
```
ValueError: [KeyError(<class 'asyncpg.pgproto.pgproto.UUID'>), TypeError("'asyncpg.pgproto.pgproto.UUID' object is not iterable"), TypeError('vars() argument must have __dict__ attribute')]
```
### Environment
- OS: Linux Mint 19 Tara
- FastAPI Version: 0.44.0
- Python version: 3.8.0
### Additional context
I initially made an issue in the asyncpg repo asking them if they could fix it: https://github.com/MagicStack/asyncpg/issues/512 I think it's usefull as additional context for this issue.
The problem can be solved here in the fastapi code by improving the code a little. I'm working on a PR, this line seems to be the issue:
https://github.com/tiangolo/fastapi/blob/c200bc2240d48a4a27e96fba536351038eafc44f/fastapi/encoders.py#L113
I think it shouldn't use `type()` but it should instead use `isinstance()` this will make for slightly looser typechecking but I think that might be a good thing in this case?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fastapi/encoders.py`
Content:
```
1 from enum import Enum
2 from types import GeneratorType
3 from typing import Any, Dict, List, Set, Union
4
5 from fastapi.logger import logger
6 from fastapi.utils import PYDANTIC_1
7 from pydantic import BaseModel
8 from pydantic.json import ENCODERS_BY_TYPE
9
10 SetIntStr = Set[Union[int, str]]
11 DictIntStrAny = Dict[Union[int, str], Any]
12
13
14 def jsonable_encoder(
15 obj: Any,
16 include: Union[SetIntStr, DictIntStrAny] = None,
17 exclude: Union[SetIntStr, DictIntStrAny] = set(),
18 by_alias: bool = True,
19 skip_defaults: bool = None,
20 exclude_unset: bool = False,
21 include_none: bool = True,
22 custom_encoder: dict = {},
23 sqlalchemy_safe: bool = True,
24 ) -> Any:
25 if skip_defaults is not None:
26 logger.warning( # pragma: nocover
27 "skip_defaults in jsonable_encoder has been deprecated in favor of "
28 "exclude_unset to keep in line with Pydantic v1, support for it will be "
29 "removed soon."
30 )
31 if include is not None and not isinstance(include, set):
32 include = set(include)
33 if exclude is not None and not isinstance(exclude, set):
34 exclude = set(exclude)
35 if isinstance(obj, BaseModel):
36 encoder = getattr(obj.Config, "json_encoders", {})
37 if custom_encoder:
38 encoder.update(custom_encoder)
39 if PYDANTIC_1:
40 obj_dict = obj.dict(
41 include=include,
42 exclude=exclude,
43 by_alias=by_alias,
44 exclude_unset=bool(exclude_unset or skip_defaults),
45 )
46 else: # pragma: nocover
47 obj_dict = obj.dict(
48 include=include,
49 exclude=exclude,
50 by_alias=by_alias,
51 skip_defaults=bool(exclude_unset or skip_defaults),
52 )
53 return jsonable_encoder(
54 obj_dict,
55 include_none=include_none,
56 custom_encoder=encoder,
57 sqlalchemy_safe=sqlalchemy_safe,
58 )
59 if isinstance(obj, Enum):
60 return obj.value
61 if isinstance(obj, (str, int, float, type(None))):
62 return obj
63 if isinstance(obj, dict):
64 encoded_dict = {}
65 for key, value in obj.items():
66 if (
67 (
68 not sqlalchemy_safe
69 or (not isinstance(key, str))
70 or (not key.startswith("_sa"))
71 )
72 and (value is not None or include_none)
73 and ((include and key in include) or key not in exclude)
74 ):
75 encoded_key = jsonable_encoder(
76 key,
77 by_alias=by_alias,
78 exclude_unset=exclude_unset,
79 include_none=include_none,
80 custom_encoder=custom_encoder,
81 sqlalchemy_safe=sqlalchemy_safe,
82 )
83 encoded_value = jsonable_encoder(
84 value,
85 by_alias=by_alias,
86 exclude_unset=exclude_unset,
87 include_none=include_none,
88 custom_encoder=custom_encoder,
89 sqlalchemy_safe=sqlalchemy_safe,
90 )
91 encoded_dict[encoded_key] = encoded_value
92 return encoded_dict
93 if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
94 encoded_list = []
95 for item in obj:
96 encoded_list.append(
97 jsonable_encoder(
98 item,
99 include=include,
100 exclude=exclude,
101 by_alias=by_alias,
102 exclude_unset=exclude_unset,
103 include_none=include_none,
104 custom_encoder=custom_encoder,
105 sqlalchemy_safe=sqlalchemy_safe,
106 )
107 )
108 return encoded_list
109 errors: List[Exception] = []
110 try:
111 if custom_encoder and type(obj) in custom_encoder:
112 encoder = custom_encoder[type(obj)]
113 else:
114 encoder = ENCODERS_BY_TYPE[type(obj)]
115 return encoder(obj)
116 except KeyError as e:
117 errors.append(e)
118 try:
119 data = dict(obj)
120 except Exception as e:
121 errors.append(e)
122 try:
123 data = vars(obj)
124 except Exception as e:
125 errors.append(e)
126 raise ValueError(errors)
127 return jsonable_encoder(
128 data,
129 by_alias=by_alias,
130 exclude_unset=exclude_unset,
131 include_none=include_none,
132 custom_encoder=custom_encoder,
133 sqlalchemy_safe=sqlalchemy_safe,
134 )
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fastapi/encoders.py b/fastapi/encoders.py
--- a/fastapi/encoders.py
+++ b/fastapi/encoders.py
@@ -1,6 +1,6 @@
from enum import Enum
from types import GeneratorType
-from typing import Any, Dict, List, Set, Union
+from typing import Any, Callable, Dict, List, Set, Tuple, Union
from fastapi.utils import PYDANTIC_1, logger
from pydantic import BaseModel
@@ -10,6 +10,21 @@
DictIntStrAny = Dict[Union[int, str], Any]
+def generate_encoders_by_class_tuples(
+ type_encoder_map: Dict[Any, Callable]
+) -> Dict[Callable, Tuple]:
+ encoders_by_classes: Dict[Callable, List] = {}
+ for type_, encoder in type_encoder_map.items():
+ encoders_by_classes.setdefault(encoder, []).append(type_)
+ encoders_by_class_tuples: Dict[Callable, Tuple] = {}
+ for encoder, classes in encoders_by_classes.items():
+ encoders_by_class_tuples[encoder] = tuple(classes)
+ return encoders_by_class_tuples
+
+
+encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE)
+
+
def jsonable_encoder(
obj: Any,
include: Union[SetIntStr, DictIntStrAny] = None,
@@ -105,24 +120,31 @@
)
)
return encoded_list
+
+ if custom_encoder:
+ if type(obj) in custom_encoder:
+ return custom_encoder[type(obj)](obj)
+ else:
+ for encoder_type, encoder in custom_encoder.items():
+ if isinstance(obj, encoder_type):
+ return encoder(obj)
+
+ if type(obj) in ENCODERS_BY_TYPE:
+ return ENCODERS_BY_TYPE[type(obj)](obj)
+ for encoder, classes_tuple in encoders_by_class_tuples.items():
+ if isinstance(obj, classes_tuple):
+ return encoder(obj)
+
errors: List[Exception] = []
try:
- if custom_encoder and type(obj) in custom_encoder:
- encoder = custom_encoder[type(obj)]
- else:
- encoder = ENCODERS_BY_TYPE[type(obj)]
- return encoder(obj)
- except KeyError as e:
+ data = dict(obj)
+ except Exception as e:
errors.append(e)
try:
- data = dict(obj)
+ data = vars(obj)
except Exception as e:
errors.append(e)
- try:
- data = vars(obj)
- except Exception as e:
- errors.append(e)
- raise ValueError(errors)
+ raise ValueError(errors)
return jsonable_encoder(
data,
by_alias=by_alias,
| {"golden_diff": "diff --git a/fastapi/encoders.py b/fastapi/encoders.py\n--- a/fastapi/encoders.py\n+++ b/fastapi/encoders.py\n@@ -1,6 +1,6 @@\n from enum import Enum\n from types import GeneratorType\n-from typing import Any, Dict, List, Set, Union\n+from typing import Any, Callable, Dict, List, Set, Tuple, Union\n \n from fastapi.utils import PYDANTIC_1, logger\n from pydantic import BaseModel\n@@ -10,6 +10,21 @@\n DictIntStrAny = Dict[Union[int, str], Any]\n \n \n+def generate_encoders_by_class_tuples(\n+ type_encoder_map: Dict[Any, Callable]\n+) -> Dict[Callable, Tuple]:\n+ encoders_by_classes: Dict[Callable, List] = {}\n+ for type_, encoder in type_encoder_map.items():\n+ encoders_by_classes.setdefault(encoder, []).append(type_)\n+ encoders_by_class_tuples: Dict[Callable, Tuple] = {}\n+ for encoder, classes in encoders_by_classes.items():\n+ encoders_by_class_tuples[encoder] = tuple(classes)\n+ return encoders_by_class_tuples\n+\n+\n+encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE)\n+\n+\n def jsonable_encoder(\n obj: Any,\n include: Union[SetIntStr, DictIntStrAny] = None,\n@@ -105,24 +120,31 @@\n )\n )\n return encoded_list\n+\n+ if custom_encoder:\n+ if type(obj) in custom_encoder:\n+ return custom_encoder[type(obj)](obj)\n+ else:\n+ for encoder_type, encoder in custom_encoder.items():\n+ if isinstance(obj, encoder_type):\n+ return encoder(obj)\n+\n+ if type(obj) in ENCODERS_BY_TYPE:\n+ return ENCODERS_BY_TYPE[type(obj)](obj)\n+ for encoder, classes_tuple in encoders_by_class_tuples.items():\n+ if isinstance(obj, classes_tuple):\n+ return encoder(obj)\n+\n errors: List[Exception] = []\n try:\n- if custom_encoder and type(obj) in custom_encoder:\n- encoder = custom_encoder[type(obj)]\n- else:\n- encoder = ENCODERS_BY_TYPE[type(obj)]\n- return encoder(obj)\n- except KeyError as e:\n+ data = dict(obj)\n+ except Exception as e:\n errors.append(e)\n try:\n- data = dict(obj)\n+ data = vars(obj)\n except Exception as e:\n errors.append(e)\n- try:\n- data = vars(obj)\n- except Exception as e:\n- errors.append(e)\n- raise ValueError(errors)\n+ raise ValueError(errors)\n return jsonable_encoder(\n data,\n by_alias=by_alias,\n", "issue": "asyncpg uuid implementation not recognized by fastapi because of too strict typechecking\n### Describe the bug\r\n\r\nAn alternative implementation of the `UUID` type is not serialized by fastapi but throws an error instead. This is because it is of type `asyncpg.pgproto.pgproto.UUID` instead of type `uuid.UUID`. However it is implemented in such a way that it does the exact same things as the regular UUID but just faster. Also,\r\n```python\r\nisinstance(asyncpg.pgproto.pgproto.UUID(), uuid.UUID) == True\r\n```\r\nthis should make fastapi able to recognize it as a uuid.\r\n\r\n### To Reproduce\r\n1. Create a file called uuid_error.py with:\r\n\r\n```Python\r\nimport uuid\r\n\r\nfrom fastapi import FastAPI\r\nfrom asyncpg.pgproto import pgproto\r\n\r\napp = FastAPI()\r\n\r\n\r\[email protected](\"/normal_uuid\")\r\ndef return_normal_uuid():\r\n regular_uuid = uuid.UUID(\"a10ff360-3b1e-4984-a26f-d3ab460bdb51\")\r\n assert isinstance(regular_uuid, uuid.UUID)\r\n return {\"normal_uuid\": regular_uuid}\r\n\r\n\r\[email protected](\"/fast_uuid\")\r\ndef return_fast_uuid():\r\n asyncpg_uuid = pgproto.UUID(\"a10ff360-3b1e-4984-a26f-d3ab460bdb51\")\r\n assert isinstance(asyncpg_uuid, uuid.UUID)\r\n return {\"fast_uuid\": asyncpg_uuid}\r\n```\r\n2. Run it with uvicorn uuid_error:app\r\n3. Open the browser and call the endpoint `/fast_uuid`.\r\n4. It returns 'internal server error' .\r\n5. But I expected it to return `{\"fast_uuid\":\"a10ff360-3b1e-4984-a26f-d3ab460bdb51\"}`.\r\n\r\n### Expected behavior\r\n\r\nI expected fastapi to jsonify the uuid and send, but instead it returns 'internal server error'. The precise error being thrown in the server is:\r\n```\r\nValueError: [KeyError(<class 'asyncpg.pgproto.pgproto.UUID'>), TypeError(\"'asyncpg.pgproto.pgproto.UUID' object is not iterable\"), TypeError('vars() argument must have __dict__ attribute')]\r\n```\r\n\r\n### Environment\r\n\r\n- OS: Linux Mint 19 Tara\r\n- FastAPI Version: 0.44.0\r\n- Python version: 3.8.0\r\n\r\n### Additional context\r\n\r\nI initially made an issue in the asyncpg repo asking them if they could fix it: https://github.com/MagicStack/asyncpg/issues/512 I think it's usefull as additional context for this issue.\r\nThe problem can be solved here in the fastapi code by improving the code a little. I'm working on a PR, this line seems to be the issue:\r\nhttps://github.com/tiangolo/fastapi/blob/c200bc2240d48a4a27e96fba536351038eafc44f/fastapi/encoders.py#L113\r\nI think it shouldn't use `type()` but it should instead use `isinstance()` this will make for slightly looser typechecking but I think that might be a good thing in this case?\r\n\n", "before_files": [{"content": "from enum import Enum\nfrom types import GeneratorType\nfrom typing import Any, Dict, List, Set, Union\n\nfrom fastapi.logger import logger\nfrom fastapi.utils import PYDANTIC_1\nfrom pydantic import BaseModel\nfrom pydantic.json import ENCODERS_BY_TYPE\n\nSetIntStr = Set[Union[int, str]]\nDictIntStrAny = Dict[Union[int, str], Any]\n\n\ndef jsonable_encoder(\n obj: Any,\n include: Union[SetIntStr, DictIntStrAny] = None,\n exclude: Union[SetIntStr, DictIntStrAny] = set(),\n by_alias: bool = True,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n include_none: bool = True,\n custom_encoder: dict = {},\n sqlalchemy_safe: bool = True,\n) -> Any:\n if skip_defaults is not None:\n logger.warning( # pragma: nocover\n \"skip_defaults in jsonable_encoder has been deprecated in favor of \"\n \"exclude_unset to keep in line with Pydantic v1, support for it will be \"\n \"removed soon.\"\n )\n if include is not None and not isinstance(include, set):\n include = set(include)\n if exclude is not None and not isinstance(exclude, set):\n exclude = set(exclude)\n if isinstance(obj, BaseModel):\n encoder = getattr(obj.Config, \"json_encoders\", {})\n if custom_encoder:\n encoder.update(custom_encoder)\n if PYDANTIC_1:\n obj_dict = obj.dict(\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n exclude_unset=bool(exclude_unset or skip_defaults),\n )\n else: # pragma: nocover\n obj_dict = obj.dict(\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n skip_defaults=bool(exclude_unset or skip_defaults),\n )\n return jsonable_encoder(\n obj_dict,\n include_none=include_none,\n custom_encoder=encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n if isinstance(obj, Enum):\n return obj.value\n if isinstance(obj, (str, int, float, type(None))):\n return obj\n if isinstance(obj, dict):\n encoded_dict = {}\n for key, value in obj.items():\n if (\n (\n not sqlalchemy_safe\n or (not isinstance(key, str))\n or (not key.startswith(\"_sa\"))\n )\n and (value is not None or include_none)\n and ((include and key in include) or key not in exclude)\n ):\n encoded_key = jsonable_encoder(\n key,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n encoded_value = jsonable_encoder(\n value,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n encoded_dict[encoded_key] = encoded_value\n return encoded_dict\n if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):\n encoded_list = []\n for item in obj:\n encoded_list.append(\n jsonable_encoder(\n item,\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n )\n return encoded_list\n errors: List[Exception] = []\n try:\n if custom_encoder and type(obj) in custom_encoder:\n encoder = custom_encoder[type(obj)]\n else:\n encoder = ENCODERS_BY_TYPE[type(obj)]\n return encoder(obj)\n except KeyError as e:\n errors.append(e)\n try:\n data = dict(obj)\n except Exception as e:\n errors.append(e)\n try:\n data = vars(obj)\n except Exception as e:\n errors.append(e)\n raise ValueError(errors)\n return jsonable_encoder(\n data,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n", "path": "fastapi/encoders.py"}], "after_files": [{"content": "from enum import Enum\nfrom types import GeneratorType\nfrom typing import Any, Callable, Dict, List, Set, Tuple, Union\n\nfrom fastapi.utils import PYDANTIC_1, logger\nfrom pydantic import BaseModel\nfrom pydantic.json import ENCODERS_BY_TYPE\n\nSetIntStr = Set[Union[int, str]]\nDictIntStrAny = Dict[Union[int, str], Any]\n\n\ndef generate_encoders_by_class_tuples(\n type_encoder_map: Dict[Any, Callable]\n) -> Dict[Callable, Tuple]:\n encoders_by_classes: Dict[Callable, List] = {}\n for type_, encoder in type_encoder_map.items():\n encoders_by_classes.setdefault(encoder, []).append(type_)\n encoders_by_class_tuples: Dict[Callable, Tuple] = {}\n for encoder, classes in encoders_by_classes.items():\n encoders_by_class_tuples[encoder] = tuple(classes)\n return encoders_by_class_tuples\n\n\nencoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE)\n\n\ndef jsonable_encoder(\n obj: Any,\n include: Union[SetIntStr, DictIntStrAny] = None,\n exclude: Union[SetIntStr, DictIntStrAny] = set(),\n by_alias: bool = True,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n include_none: bool = True,\n custom_encoder: dict = {},\n sqlalchemy_safe: bool = True,\n) -> Any:\n if skip_defaults is not None:\n logger.warning( # pragma: nocover\n \"skip_defaults in jsonable_encoder has been deprecated in \\\n favor of exclude_unset to keep in line with Pydantic v1, support for it \\\n will be removed soon.\"\n )\n if include is not None and not isinstance(include, set):\n include = set(include)\n if exclude is not None and not isinstance(exclude, set):\n exclude = set(exclude)\n if isinstance(obj, BaseModel):\n encoder = getattr(obj.Config, \"json_encoders\", {})\n if custom_encoder:\n encoder.update(custom_encoder)\n if PYDANTIC_1:\n obj_dict = obj.dict(\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n exclude_unset=bool(exclude_unset or skip_defaults),\n )\n else: # pragma: nocover\n obj_dict = obj.dict(\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n skip_defaults=bool(exclude_unset or skip_defaults),\n )\n return jsonable_encoder(\n obj_dict,\n include_none=include_none,\n custom_encoder=encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n if isinstance(obj, Enum):\n return obj.value\n if isinstance(obj, (str, int, float, type(None))):\n return obj\n if isinstance(obj, dict):\n encoded_dict = {}\n for key, value in obj.items():\n if (\n (\n not sqlalchemy_safe\n or (not isinstance(key, str))\n or (not key.startswith(\"_sa\"))\n )\n and (value is not None or include_none)\n and ((include and key in include) or key not in exclude)\n ):\n encoded_key = jsonable_encoder(\n key,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n encoded_value = jsonable_encoder(\n value,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n encoded_dict[encoded_key] = encoded_value\n return encoded_dict\n if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):\n encoded_list = []\n for item in obj:\n encoded_list.append(\n jsonable_encoder(\n item,\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n )\n return encoded_list\n\n if custom_encoder:\n if type(obj) in custom_encoder:\n return custom_encoder[type(obj)](obj)\n else:\n for encoder_type, encoder in custom_encoder.items():\n if isinstance(obj, encoder_type):\n return encoder(obj)\n\n if type(obj) in ENCODERS_BY_TYPE:\n return ENCODERS_BY_TYPE[type(obj)](obj)\n for encoder, classes_tuple in encoders_by_class_tuples.items():\n if isinstance(obj, classes_tuple):\n return encoder(obj)\n\n errors: List[Exception] = []\n try:\n data = dict(obj)\n except Exception as e:\n errors.append(e)\n try:\n data = vars(obj)\n except Exception as e:\n errors.append(e)\n raise ValueError(errors)\n return jsonable_encoder(\n data,\n by_alias=by_alias,\n exclude_unset=exclude_unset,\n include_none=include_none,\n custom_encoder=custom_encoder,\n sqlalchemy_safe=sqlalchemy_safe,\n )\n", "path": "fastapi/encoders.py"}]} | 2,172 | 613 |
gh_patches_debug_16499 | rasdani/github-patches | git_diff | bentoml__BentoML-979 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failed to package local pip installed dependency in 0.8.4
An issue first reported in #bentoml-users slack channel by @fernandocamargoti
In some python environment, we see the following error when doing a `BenoService#save`:
```
TypeError: expected str, bytes or os.PathLike object, not NoneType
```
The reason is some packages ended up with a `module_path == None` when accessing form `pkg_resources.working_set`:
```python
import pkg_resources
[dist for dist in pkg_resources.working_set if not dist.module_path]
Out[4]: [terminado 0.8.3 (/home/fernandocamargo/anaconda3/envs/logit/lib/python3.6/site-packages)]
```
Here is a conda environment yaml file that can reproduce this issue:
```
name: logit
channels:
#- anaconda
- conda-forge
- anaconda
- pytorch
- https://public.dhe.ibm.com/ibmdl/export/pub/software/server/ibm-ai/conda
- powerai
dependencies:
- python=3.6
- pandas=1.0
- pyarrow=0.15
- matplotlib=3.1
- plotly=4.6
- scipy=1.3
- numpy=1.16
- seaborn=0.10
- scikit-learn=0.22
- pytorch=1.2
- statsmodels=0.11
- holidays=0.9
- convertdate=2.2
- python-dateutil=2.8
- fbprophet=0.6
- luigi=2.8
- tqdm=4.44
- jupyterlab=1.2
- ipywidgets=7.5
- pyspark=2.4.5
- psutil=5.5
- pydantic=1.6
- sqlalchemy=1.3
- psycopg2=2.8
# GPU:
- cudatoolkit=10.1
- pip
- pip:
# CPU:
# - mxnet==1.4.1
# GPU:
- mxnet-cu101mkl==1.4.1
# Power-9:
# - /root/packages/mxnet/mxnet-1.4.1-cp36-cp36m-linux_ppc64le.whl
- gluonts==0.5
- pmdarima==1.4
- torchbearer==0.5
- imbalanced-learn==0.6
- tensorboardx==2.0
- diskcache==3.0
- bentoml==0.8.3
```
Interestingly, installing terminado alone does not cause this problem. I'm not sure if this is caused by other packages in this list. But will submit a fix to ensure BentoML handles this scenario properly.
We will release a new version including this fix very soon.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bentoml/saved_bundle/pip_pkg.py`
Content:
```
1 # Copyright 2019 Atalaya Tech, Inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import sys
17 import logging
18 import pkgutil
19 import ast
20 import zipimport
21
22
23 EPP_NO_ERROR = 0
24 EPP_PKG_NOT_EXIST = 1
25 EPP_PKG_VERSION_MISMATCH = 2
26
27 __mm = None
28
29
30 logger = logging.getLogger(__name__)
31
32
33 def parse_requirement_string(rs):
34 name, _, version = rs.partition("==")
35 return name, version
36
37
38 def verify_pkg(pkg_name, pkg_version):
39 global __mm # pylint: disable=global-statement
40 if __mm is None:
41 __mm = ModuleManager()
42 return __mm.verify_pkg(pkg_name, pkg_version)
43
44
45 def seek_pip_dependencies(target_py_file_path):
46 global __mm # pylint: disable=global-statement
47 if __mm is None:
48 __mm = ModuleManager()
49 return __mm.seek_pip_dependencies(target_py_file_path)
50
51
52 def get_all_pip_installed_modules():
53 global __mm # pylint: disable=global-statement
54 if __mm is None:
55 __mm = ModuleManager()
56
57 installed_modules = list(
58 # local modules are the ones imported from current directory, either from a
59 # module.py file or a module directory that contains a `__init__.py` file
60 filter(lambda m: not m.is_local, __mm.searched_modules.values())
61 )
62 return list(map(lambda m: m.name, installed_modules))
63
64
65 class ModuleInfo(object):
66 def __init__(self, name, path, is_local, is_pkg):
67 super(ModuleInfo, self).__init__()
68 self.name = name
69 self.path = path
70 self.is_local = is_local
71 self.is_pkg = is_pkg
72
73
74 class ModuleManager(object):
75 def __init__(self):
76 super(ModuleManager, self).__init__()
77 self.pip_pkg_map = {}
78 self.pip_module_map = {}
79 self.setuptools_module_set = set()
80 self.nonlocal_package_path = set()
81
82 import pkg_resources
83
84 for dist in pkg_resources.working_set: # pylint: disable=not-an-iterable
85 if os.path.realpath(dist.module_path) != os.getcwd():
86 # add to nonlocal_package path only if it's not current directory
87 self.nonlocal_package_path.add(dist.module_path)
88
89 self.pip_pkg_map[dist._key] = dist._version
90 for mn in dist._get_metadata("top_level.txt"):
91 if dist._key != "setuptools":
92 self.pip_module_map.setdefault(mn, []).append(
93 (dist._key, dist._version)
94 )
95 else:
96 self.setuptools_module_set.add(mn)
97
98 self.searched_modules = {}
99 for m in pkgutil.iter_modules():
100 if isinstance(m.module_finder, zipimport.zipimporter):
101 logger.warning(f"Skipped unsupported zipimporter {m.module_finder}")
102 continue
103 if m.name not in self.searched_modules:
104 path = m.module_finder.path
105 is_local = self.is_local_path(path)
106 self.searched_modules[m.name] = ModuleInfo(
107 m.name, path, is_local, m.ispkg
108 )
109
110 def verify_pkg(self, pkg_name, pkg_version):
111 if pkg_name not in self.pip_pkg_map:
112 # package does not exist in the current python session
113 return EPP_PKG_NOT_EXIST
114 if pkg_version and pkg_version != self.pip_pkg_map[pkg_name]:
115 # package version is different from the version being used
116 # in the current python session
117 return EPP_PKG_VERSION_MISMATCH
118 return EPP_NO_ERROR
119
120 def seek_pip_dependencies(self, target_py_file_path):
121 work = DepSeekWork(self, target_py_file_path)
122 work.do()
123 requirements = {}
124 for _, pkg_info_list in work.dependencies.items():
125 for pkg_name, pkg_version in pkg_info_list:
126 requirements[pkg_name] = pkg_version
127
128 return requirements, work.unknown_module_set
129
130 def is_local_path(self, path):
131 if path in self.nonlocal_package_path:
132 return False
133
134 dir_name = os.path.split(path)[1]
135
136 if (
137 "site-packages" in path
138 or "anaconda" in path
139 or path.endswith("packages")
140 or dir_name == "bin"
141 or dir_name.startswith("lib")
142 or dir_name.startswith("python")
143 or dir_name.startswith("plat")
144 ):
145 self.nonlocal_package_path.add(path)
146 return False
147
148 return True
149
150
151 class DepSeekWork(object):
152 def __init__(self, module_manager, target_py_file_path):
153 super(DepSeekWork, self).__init__()
154 self.module_manager = module_manager
155 self.target_py_file_path = target_py_file_path
156
157 self.dependencies = {}
158 self.unknown_module_set = set()
159 self.parsed_module_set = set()
160
161 def do(self):
162 self.seek_in_file(self.target_py_file_path)
163
164 def seek_in_file(self, file_path):
165 # Extract all dependency modules by searching through the trees of the Python
166 # abstract syntax grammar with Python's built-in ast module
167 try:
168 with open(file_path) as f:
169 content = f.read()
170 except UnicodeDecodeError:
171 with open(file_path, encoding='utf-8') as f:
172 content = f.read()
173
174 tree = ast.parse(content)
175 import_set = set()
176 for node in ast.walk(tree):
177 if isinstance(node, ast.Import):
178 for name in node.names:
179 import_set.add(name.name.partition(".")[0])
180 elif isinstance(node, ast.ImportFrom):
181 if node.module is not None and node.level == 0:
182 import_set.add(node.module.partition(".")[0])
183 for module_name in import_set:
184 # Avoid parsing BentoML when BentoML is imported from local source code repo
185 if module_name == 'bentoml':
186 continue
187 if module_name in self.parsed_module_set:
188 continue
189 self.parsed_module_set.add(module_name)
190
191 if module_name in self.module_manager.searched_modules:
192 m = self.module_manager.searched_modules[module_name]
193 if m.is_local:
194 # Recursively search dependencies in sub-modules
195 if m.is_pkg:
196 self.seek_in_dir(os.path.join(m.path, m.name))
197 else:
198 self.seek_in_file(os.path.join(m.path, "{}.py".format(m.name)))
199 else:
200 # check if the package has already been added to the list
201 if (
202 module_name in self.module_manager.pip_module_map
203 and module_name not in self.dependencies
204 and module_name not in self.module_manager.setuptools_module_set
205 ):
206 self.dependencies[
207 module_name
208 ] = self.module_manager.pip_module_map[module_name]
209 else:
210 if module_name in self.module_manager.pip_module_map:
211 if module_name not in self.dependencies:
212 # In some special cases, the pip-installed module can not
213 # be located in the searched_modules
214 self.dependencies[
215 module_name
216 ] = self.module_manager.pip_module_map[module_name]
217 else:
218 if module_name not in sys.builtin_module_names:
219 self.unknown_module_set.add(module_name)
220
221 def seek_in_dir(self, dir_path):
222 for path, dir_list, file_list in os.walk(dir_path):
223 for file_name in file_list:
224 if not file_name.endswith(".py"):
225 continue
226 self.seek_in_file(os.path.join(path, file_name))
227 for dir_name in dir_list:
228 if dir_name == '__pycache__':
229 continue
230 self.seek_in_dir(os.path.join(path, dir_name))
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bentoml/saved_bundle/pip_pkg.py b/bentoml/saved_bundle/pip_pkg.py
--- a/bentoml/saved_bundle/pip_pkg.py
+++ b/bentoml/saved_bundle/pip_pkg.py
@@ -82,9 +82,14 @@
import pkg_resources
for dist in pkg_resources.working_set: # pylint: disable=not-an-iterable
- if os.path.realpath(dist.module_path) != os.getcwd():
+ module_path = dist.module_path or dist.location
+ if not module_path:
+ # Skip if no module path was found for pkg distribution
+ continue
+
+ if os.path.realpath(module_path) != os.getcwd():
# add to nonlocal_package path only if it's not current directory
- self.nonlocal_package_path.add(dist.module_path)
+ self.nonlocal_package_path.add(module_path)
self.pip_pkg_map[dist._key] = dist._version
for mn in dist._get_metadata("top_level.txt"):
| {"golden_diff": "diff --git a/bentoml/saved_bundle/pip_pkg.py b/bentoml/saved_bundle/pip_pkg.py\n--- a/bentoml/saved_bundle/pip_pkg.py\n+++ b/bentoml/saved_bundle/pip_pkg.py\n@@ -82,9 +82,14 @@\n import pkg_resources\n \n for dist in pkg_resources.working_set: # pylint: disable=not-an-iterable\n- if os.path.realpath(dist.module_path) != os.getcwd():\n+ module_path = dist.module_path or dist.location\n+ if not module_path:\n+ # Skip if no module path was found for pkg distribution\n+ continue\n+\n+ if os.path.realpath(module_path) != os.getcwd():\n # add to nonlocal_package path only if it's not current directory\n- self.nonlocal_package_path.add(dist.module_path)\n+ self.nonlocal_package_path.add(module_path)\n \n self.pip_pkg_map[dist._key] = dist._version\n for mn in dist._get_metadata(\"top_level.txt\"):\n", "issue": "Failed to package local pip installed dependency in 0.8.4\nAn issue first reported in #bentoml-users slack channel by @fernandocamargoti\r\n\r\nIn some python environment, we see the following error when doing a `BenoService#save`:\r\n```\r\nTypeError: expected str, bytes or os.PathLike object, not NoneType\r\n```\r\n\r\nThe reason is some packages ended up with a `module_path == None` when accessing form `pkg_resources.working_set`:\r\n```python\r\nimport pkg_resources\r\n[dist for dist in pkg_resources.working_set if not dist.module_path]\r\nOut[4]: [terminado 0.8.3 (/home/fernandocamargo/anaconda3/envs/logit/lib/python3.6/site-packages)]\r\n```\r\n\r\nHere is a conda environment yaml file that can reproduce this issue:\r\n```\r\nname: logit\r\nchannels:\r\n#- anaconda\r\n- conda-forge\r\n- anaconda\r\n- pytorch\r\n- https://public.dhe.ibm.com/ibmdl/export/pub/software/server/ibm-ai/conda\r\n- powerai\r\ndependencies:\r\n- python=3.6\r\n- pandas=1.0\r\n- pyarrow=0.15\r\n- matplotlib=3.1\r\n- plotly=4.6\r\n- scipy=1.3\r\n- numpy=1.16\r\n- seaborn=0.10\r\n- scikit-learn=0.22\r\n- pytorch=1.2\r\n- statsmodels=0.11\r\n- holidays=0.9\r\n- convertdate=2.2\r\n- python-dateutil=2.8\r\n- fbprophet=0.6\r\n- luigi=2.8\r\n- tqdm=4.44\r\n- jupyterlab=1.2\r\n- ipywidgets=7.5\r\n- pyspark=2.4.5\r\n- psutil=5.5\r\n- pydantic=1.6\r\n- sqlalchemy=1.3\r\n- psycopg2=2.8\r\n# GPU:\r\n- cudatoolkit=10.1\r\n- pip\r\n- pip:\r\n# CPU:\r\n# - mxnet==1.4.1\r\n# GPU:\r\n - mxnet-cu101mkl==1.4.1\r\n# Power-9:\r\n# - /root/packages/mxnet/mxnet-1.4.1-cp36-cp36m-linux_ppc64le.whl\r\n - gluonts==0.5\r\n - pmdarima==1.4\r\n - torchbearer==0.5\r\n - imbalanced-learn==0.6\r\n - tensorboardx==2.0\r\n - diskcache==3.0\r\n - bentoml==0.8.3\r\n```\r\n\r\nInterestingly, installing terminado alone does not cause this problem. I'm not sure if this is caused by other packages in this list. But will submit a fix to ensure BentoML handles this scenario properly.\r\n\r\nWe will release a new version including this fix very soon.\r\n\n", "before_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport logging\nimport pkgutil\nimport ast\nimport zipimport\n\n\nEPP_NO_ERROR = 0\nEPP_PKG_NOT_EXIST = 1\nEPP_PKG_VERSION_MISMATCH = 2\n\n__mm = None\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef parse_requirement_string(rs):\n name, _, version = rs.partition(\"==\")\n return name, version\n\n\ndef verify_pkg(pkg_name, pkg_version):\n global __mm # pylint: disable=global-statement\n if __mm is None:\n __mm = ModuleManager()\n return __mm.verify_pkg(pkg_name, pkg_version)\n\n\ndef seek_pip_dependencies(target_py_file_path):\n global __mm # pylint: disable=global-statement\n if __mm is None:\n __mm = ModuleManager()\n return __mm.seek_pip_dependencies(target_py_file_path)\n\n\ndef get_all_pip_installed_modules():\n global __mm # pylint: disable=global-statement\n if __mm is None:\n __mm = ModuleManager()\n\n installed_modules = list(\n # local modules are the ones imported from current directory, either from a\n # module.py file or a module directory that contains a `__init__.py` file\n filter(lambda m: not m.is_local, __mm.searched_modules.values())\n )\n return list(map(lambda m: m.name, installed_modules))\n\n\nclass ModuleInfo(object):\n def __init__(self, name, path, is_local, is_pkg):\n super(ModuleInfo, self).__init__()\n self.name = name\n self.path = path\n self.is_local = is_local\n self.is_pkg = is_pkg\n\n\nclass ModuleManager(object):\n def __init__(self):\n super(ModuleManager, self).__init__()\n self.pip_pkg_map = {}\n self.pip_module_map = {}\n self.setuptools_module_set = set()\n self.nonlocal_package_path = set()\n\n import pkg_resources\n\n for dist in pkg_resources.working_set: # pylint: disable=not-an-iterable\n if os.path.realpath(dist.module_path) != os.getcwd():\n # add to nonlocal_package path only if it's not current directory\n self.nonlocal_package_path.add(dist.module_path)\n\n self.pip_pkg_map[dist._key] = dist._version\n for mn in dist._get_metadata(\"top_level.txt\"):\n if dist._key != \"setuptools\":\n self.pip_module_map.setdefault(mn, []).append(\n (dist._key, dist._version)\n )\n else:\n self.setuptools_module_set.add(mn)\n\n self.searched_modules = {}\n for m in pkgutil.iter_modules():\n if isinstance(m.module_finder, zipimport.zipimporter):\n logger.warning(f\"Skipped unsupported zipimporter {m.module_finder}\")\n continue\n if m.name not in self.searched_modules:\n path = m.module_finder.path\n is_local = self.is_local_path(path)\n self.searched_modules[m.name] = ModuleInfo(\n m.name, path, is_local, m.ispkg\n )\n\n def verify_pkg(self, pkg_name, pkg_version):\n if pkg_name not in self.pip_pkg_map:\n # package does not exist in the current python session\n return EPP_PKG_NOT_EXIST\n if pkg_version and pkg_version != self.pip_pkg_map[pkg_name]:\n # package version is different from the version being used\n # in the current python session\n return EPP_PKG_VERSION_MISMATCH\n return EPP_NO_ERROR\n\n def seek_pip_dependencies(self, target_py_file_path):\n work = DepSeekWork(self, target_py_file_path)\n work.do()\n requirements = {}\n for _, pkg_info_list in work.dependencies.items():\n for pkg_name, pkg_version in pkg_info_list:\n requirements[pkg_name] = pkg_version\n\n return requirements, work.unknown_module_set\n\n def is_local_path(self, path):\n if path in self.nonlocal_package_path:\n return False\n\n dir_name = os.path.split(path)[1]\n\n if (\n \"site-packages\" in path\n or \"anaconda\" in path\n or path.endswith(\"packages\")\n or dir_name == \"bin\"\n or dir_name.startswith(\"lib\")\n or dir_name.startswith(\"python\")\n or dir_name.startswith(\"plat\")\n ):\n self.nonlocal_package_path.add(path)\n return False\n\n return True\n\n\nclass DepSeekWork(object):\n def __init__(self, module_manager, target_py_file_path):\n super(DepSeekWork, self).__init__()\n self.module_manager = module_manager\n self.target_py_file_path = target_py_file_path\n\n self.dependencies = {}\n self.unknown_module_set = set()\n self.parsed_module_set = set()\n\n def do(self):\n self.seek_in_file(self.target_py_file_path)\n\n def seek_in_file(self, file_path):\n # Extract all dependency modules by searching through the trees of the Python\n # abstract syntax grammar with Python's built-in ast module\n try:\n with open(file_path) as f:\n content = f.read()\n except UnicodeDecodeError:\n with open(file_path, encoding='utf-8') as f:\n content = f.read()\n\n tree = ast.parse(content)\n import_set = set()\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for name in node.names:\n import_set.add(name.name.partition(\".\")[0])\n elif isinstance(node, ast.ImportFrom):\n if node.module is not None and node.level == 0:\n import_set.add(node.module.partition(\".\")[0])\n for module_name in import_set:\n # Avoid parsing BentoML when BentoML is imported from local source code repo\n if module_name == 'bentoml':\n continue\n if module_name in self.parsed_module_set:\n continue\n self.parsed_module_set.add(module_name)\n\n if module_name in self.module_manager.searched_modules:\n m = self.module_manager.searched_modules[module_name]\n if m.is_local:\n # Recursively search dependencies in sub-modules\n if m.is_pkg:\n self.seek_in_dir(os.path.join(m.path, m.name))\n else:\n self.seek_in_file(os.path.join(m.path, \"{}.py\".format(m.name)))\n else:\n # check if the package has already been added to the list\n if (\n module_name in self.module_manager.pip_module_map\n and module_name not in self.dependencies\n and module_name not in self.module_manager.setuptools_module_set\n ):\n self.dependencies[\n module_name\n ] = self.module_manager.pip_module_map[module_name]\n else:\n if module_name in self.module_manager.pip_module_map:\n if module_name not in self.dependencies:\n # In some special cases, the pip-installed module can not\n # be located in the searched_modules\n self.dependencies[\n module_name\n ] = self.module_manager.pip_module_map[module_name]\n else:\n if module_name not in sys.builtin_module_names:\n self.unknown_module_set.add(module_name)\n\n def seek_in_dir(self, dir_path):\n for path, dir_list, file_list in os.walk(dir_path):\n for file_name in file_list:\n if not file_name.endswith(\".py\"):\n continue\n self.seek_in_file(os.path.join(path, file_name))\n for dir_name in dir_list:\n if dir_name == '__pycache__':\n continue\n self.seek_in_dir(os.path.join(path, dir_name))\n", "path": "bentoml/saved_bundle/pip_pkg.py"}], "after_files": [{"content": "# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport logging\nimport pkgutil\nimport ast\nimport zipimport\n\n\nEPP_NO_ERROR = 0\nEPP_PKG_NOT_EXIST = 1\nEPP_PKG_VERSION_MISMATCH = 2\n\n__mm = None\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef parse_requirement_string(rs):\n name, _, version = rs.partition(\"==\")\n return name, version\n\n\ndef verify_pkg(pkg_name, pkg_version):\n global __mm # pylint: disable=global-statement\n if __mm is None:\n __mm = ModuleManager()\n return __mm.verify_pkg(pkg_name, pkg_version)\n\n\ndef seek_pip_dependencies(target_py_file_path):\n global __mm # pylint: disable=global-statement\n if __mm is None:\n __mm = ModuleManager()\n return __mm.seek_pip_dependencies(target_py_file_path)\n\n\ndef get_all_pip_installed_modules():\n global __mm # pylint: disable=global-statement\n if __mm is None:\n __mm = ModuleManager()\n\n installed_modules = list(\n # local modules are the ones imported from current directory, either from a\n # module.py file or a module directory that contains a `__init__.py` file\n filter(lambda m: not m.is_local, __mm.searched_modules.values())\n )\n return list(map(lambda m: m.name, installed_modules))\n\n\nclass ModuleInfo(object):\n def __init__(self, name, path, is_local, is_pkg):\n super(ModuleInfo, self).__init__()\n self.name = name\n self.path = path\n self.is_local = is_local\n self.is_pkg = is_pkg\n\n\nclass ModuleManager(object):\n def __init__(self):\n super(ModuleManager, self).__init__()\n self.pip_pkg_map = {}\n self.pip_module_map = {}\n self.setuptools_module_set = set()\n self.nonlocal_package_path = set()\n\n import pkg_resources\n\n for dist in pkg_resources.working_set: # pylint: disable=not-an-iterable\n module_path = dist.module_path or dist.location\n if not module_path:\n # Skip if no module path was found for pkg distribution\n continue\n\n if os.path.realpath(module_path) != os.getcwd():\n # add to nonlocal_package path only if it's not current directory\n self.nonlocal_package_path.add(module_path)\n\n self.pip_pkg_map[dist._key] = dist._version\n for mn in dist._get_metadata(\"top_level.txt\"):\n if dist._key != \"setuptools\":\n self.pip_module_map.setdefault(mn, []).append(\n (dist._key, dist._version)\n )\n else:\n self.setuptools_module_set.add(mn)\n\n self.searched_modules = {}\n for m in pkgutil.iter_modules():\n if isinstance(m.module_finder, zipimport.zipimporter):\n logger.warning(f\"Skipped unsupported zipimporter {m.module_finder}\")\n continue\n if m.name not in self.searched_modules:\n path = m.module_finder.path\n is_local = self.is_local_path(path)\n self.searched_modules[m.name] = ModuleInfo(\n m.name, path, is_local, m.ispkg\n )\n\n def verify_pkg(self, pkg_name, pkg_version):\n if pkg_name not in self.pip_pkg_map:\n # package does not exist in the current python session\n return EPP_PKG_NOT_EXIST\n if pkg_version and pkg_version != self.pip_pkg_map[pkg_name]:\n # package version is different from the version being used\n # in the current python session\n return EPP_PKG_VERSION_MISMATCH\n return EPP_NO_ERROR\n\n def seek_pip_dependencies(self, target_py_file_path):\n work = DepSeekWork(self, target_py_file_path)\n work.do()\n requirements = {}\n for _, pkg_info_list in work.dependencies.items():\n for pkg_name, pkg_version in pkg_info_list:\n requirements[pkg_name] = pkg_version\n\n return requirements, work.unknown_module_set\n\n def is_local_path(self, path):\n if path in self.nonlocal_package_path:\n return False\n\n dir_name = os.path.split(path)[1]\n\n if (\n \"site-packages\" in path\n or \"anaconda\" in path\n or path.endswith(\"packages\")\n or dir_name == \"bin\"\n or dir_name.startswith(\"lib\")\n or dir_name.startswith(\"python\")\n or dir_name.startswith(\"plat\")\n ):\n self.nonlocal_package_path.add(path)\n return False\n\n return True\n\n\nclass DepSeekWork(object):\n def __init__(self, module_manager, target_py_file_path):\n super(DepSeekWork, self).__init__()\n self.module_manager = module_manager\n self.target_py_file_path = target_py_file_path\n\n self.dependencies = {}\n self.unknown_module_set = set()\n self.parsed_module_set = set()\n\n def do(self):\n self.seek_in_file(self.target_py_file_path)\n\n def seek_in_file(self, file_path):\n # Extract all dependency modules by searching through the trees of the Python\n # abstract syntax grammar with Python's built-in ast module\n try:\n with open(file_path) as f:\n content = f.read()\n except UnicodeDecodeError:\n with open(file_path, encoding='utf-8') as f:\n content = f.read()\n\n tree = ast.parse(content)\n import_set = set()\n for node in ast.walk(tree):\n if isinstance(node, ast.Import):\n for name in node.names:\n import_set.add(name.name.partition(\".\")[0])\n elif isinstance(node, ast.ImportFrom):\n if node.module is not None and node.level == 0:\n import_set.add(node.module.partition(\".\")[0])\n for module_name in import_set:\n # Avoid parsing BentoML when BentoML is imported from local source code repo\n if module_name == 'bentoml':\n continue\n if module_name in self.parsed_module_set:\n continue\n self.parsed_module_set.add(module_name)\n\n if module_name in self.module_manager.searched_modules:\n m = self.module_manager.searched_modules[module_name]\n if m.is_local:\n # Recursively search dependencies in sub-modules\n if m.is_pkg:\n self.seek_in_dir(os.path.join(m.path, m.name))\n else:\n self.seek_in_file(os.path.join(m.path, \"{}.py\".format(m.name)))\n else:\n # check if the package has already been added to the list\n if (\n module_name in self.module_manager.pip_module_map\n and module_name not in self.dependencies\n and module_name not in self.module_manager.setuptools_module_set\n ):\n self.dependencies[\n module_name\n ] = self.module_manager.pip_module_map[module_name]\n else:\n if module_name in self.module_manager.pip_module_map:\n if module_name not in self.dependencies:\n # In some special cases, the pip-installed module can not\n # be located in the searched_modules\n self.dependencies[\n module_name\n ] = self.module_manager.pip_module_map[module_name]\n else:\n if module_name not in sys.builtin_module_names:\n self.unknown_module_set.add(module_name)\n\n def seek_in_dir(self, dir_path):\n for path, dir_list, file_list in os.walk(dir_path):\n for file_name in file_list:\n if not file_name.endswith(\".py\"):\n continue\n self.seek_in_file(os.path.join(path, file_name))\n for dir_name in dir_list:\n if dir_name == '__pycache__':\n continue\n self.seek_in_dir(os.path.join(path, dir_name))\n", "path": "bentoml/saved_bundle/pip_pkg.py"}]} | 3,293 | 230 |
gh_patches_debug_23109 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1054 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test for unintended permissions via API
Have we left any security holes via the API?
- A non-sysadmin should not be able to add/edit/delete a group (country)
- A non-sysadmin should not be able to add/edit/delete an org
- A non-sysadmin should not be able to see user emails (other than their own) (note: Luis is pulling a list of user emails via the API, presumably with his sysadmin account)
- A NEAO should not be able to add a dataset for the org
- A NEAO should not be able to become a member of an org
- No one should be able to become a member of a group (country)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_theme/ckanext/hdx_theme/auth.py`
Content:
```
1 import ckan.new_authz as new_authz
2
3 from ckan.lib.base import _
4
5 def hdx_basic_user_info(context, data_dict):
6 logged_in = new_authz.auth_is_loggedin_user()
7 if logged_in:
8 return {'success': True}
9 else:
10 return {'success': False,
11 'msg': _("You must be logged in to access basic organization member info.")}
```
Path: `ckanext-hdx_theme/ckanext/hdx_theme/plugin.py`
Content:
```
1 import ckanext.hdx_theme.licenses as hdx_licenses
2
3 import ckan.plugins as plugins
4 import ckan.plugins.toolkit as toolkit
5 import ckan.model.package as package
6 import ckan.model.license as license
7 import pylons.config as config
8 import version
9
10 import ckanext.hdx_theme.caching as caching
11 import ckanext.hdx_theme.auth as auth
12
13
14 def run_on_startup():
15 cache_on_startup = config.get('hdx.cache.onstartup', 'true')
16 if 'true' == cache_on_startup:
17 _generate_license_list()
18 caching.cached_get_group_package_stuff()
19
20
21 def _generate_license_list():
22 package.Package._license_register = license.LicenseRegister()
23 package.Package._license_register.licenses = [
24 license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),
25 license.License(license.LicenseCreativeCommonsAttribution()),
26 license.License(license.LicenseCreativeCommonsAttributionShareAlike()),
27 license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),
28 license.License(hdx_licenses.LicenseHdxMultiple()),
29 license.License(hdx_licenses.LicenseHdxOther())
30 ]
31
32 class HDXThemePlugin(plugins.SingletonPlugin):
33 plugins.implements(plugins.IConfigurer)
34 plugins.implements(plugins.IRoutes, inherit=True)
35 plugins.implements(plugins.ITemplateHelpers)
36 plugins.implements(plugins.IActions)
37 plugins.implements(plugins.IAuthFunctions)
38 plugins.implements(plugins.IGroupController, inherit=True)
39 plugins.implements(plugins.IMiddleware, inherit=True)
40
41 def update_config(self, config):
42 toolkit.add_template_directory(config, 'templates')
43 toolkit.add_public_directory(config, 'public')
44 toolkit.add_resource('fanstatic', 'hdx_theme')
45
46
47 def before_map(self, map):
48 map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')
49 map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')
50 map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')
51 map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')
52 map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')
53 map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')
54
55 map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')
56
57 map.connect('request_membership', '/organization/{org_id}/request_membership', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_membership')
58 map.connect('request_editing_rights', '/organization/{org_id}/request_editing_rights', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_editor_for_org')
59 map.connect('/organization/request_new', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_new_organization')
60 map.connect('/organization/members/{id}', controller='ckanext.hdx_theme.member_controller:HDXOrgMemberController', action='members')
61 map.connect('dataset_preselect','/dataset/preselect', controller='ckanext.hdx_theme.preselect_dsform_controller:HDXPreselectOrgController', action='preselect')
62 map.connect('/organization/member_new/{id}', controller='ckanext.hdx_theme.member_controller:HDXOrgMemberController', action='member_new')
63
64 map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')
65
66 map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')
67
68 return map
69
70 def create(self, entity):
71 caching.invalidate_group_caches()
72
73 def edit(self, entity):
74 caching.invalidate_group_caches()
75
76 def get_helpers(self):
77 from ckanext.hdx_theme import helpers as hdx_helpers
78 return {
79 'is_downloadable': hdx_helpers.is_downloadable,
80 'get_facet_items_dict':hdx_helpers.get_facet_items_dict,
81 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,
82 'get_filtered_params_list':hdx_helpers.get_filtered_params_list,
83 'get_last_revision_package':hdx_helpers.get_last_revision_package,
84 'get_last_modifier_user':hdx_helpers.get_last_modifier_user,
85 'get_last_revision_group':hdx_helpers.get_last_revision_group,
86 'get_group_followers':hdx_helpers.get_group_followers,
87 'get_group_members':hdx_helpers.get_group_members,
88 'markdown_extract_strip':hdx_helpers.markdown_extract_strip,
89 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str,
90 'hdx_version':hdx_helpers.hdx_version,
91 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message,
92 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items,
93 'hdx_get_extras_element':hdx_helpers.hdx_get_extras_element,
94 'hdx_get_user_info':hdx_helpers.hdx_get_user_info,
95 'hdx_linked_user':hdx_helpers.hdx_linked_user,
96 'hdx_show_singular_plural':hdx_helpers.hdx_show_singular_plural,
97 'hdx_member_roles_list':hdx_helpers.hdx_member_roles_list,
98 'hdx_organizations_available_with_roles':hdx_helpers.hdx_organizations_available_with_roles
99
100 }
101
102 def get_actions(self):
103 from ckanext.hdx_theme import actions as hdx_actions
104 return {
105 'organization_list_for_user':hdx_actions.organization_list_for_user,
106 'cached_group_list': hdx_actions.cached_group_list,
107 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,
108 'member_list': hdx_actions.member_list,
109 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins
110
111 }
112 def get_auth_functions(self):
113 return {
114 'hdx_basic_user_info': auth.hdx_basic_user_info
115 }
116
117 def make_middleware(self, app, config):
118 run_on_startup()
119 return app
120
121
122
123
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/auth.py b/ckanext-hdx_theme/ckanext/hdx_theme/auth.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/auth.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/auth.py
@@ -1,11 +1,18 @@
import ckan.new_authz as new_authz
+import ckan.logic as logic
from ckan.lib.base import _
+
def hdx_basic_user_info(context, data_dict):
logged_in = new_authz.auth_is_loggedin_user()
if logged_in:
return {'success': True}
else:
return {'success': False,
- 'msg': _("You must be logged in to access basic organization member info.")}
\ No newline at end of file
+ 'msg': _("You must be logged in to access basic organization member info.")}
+
+
[email protected]_sysadmins_check
+def group_member_create(context, data_dict):
+ return {'success': False, 'msg': _('Nobody can add a member to a country in HDX')}
\ No newline at end of file
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
@@ -111,7 +111,8 @@
}
def get_auth_functions(self):
return {
- 'hdx_basic_user_info': auth.hdx_basic_user_info
+ 'hdx_basic_user_info': auth.hdx_basic_user_info,
+ 'group_member_create': auth.group_member_create
}
def make_middleware(self, app, config):
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/auth.py b/ckanext-hdx_theme/ckanext/hdx_theme/auth.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/auth.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/auth.py\n@@ -1,11 +1,18 @@\n import ckan.new_authz as new_authz\n+import ckan.logic as logic\n \n from ckan.lib.base import _\n \n+\n def hdx_basic_user_info(context, data_dict):\n logged_in = new_authz.auth_is_loggedin_user()\n if logged_in:\n return {'success': True}\n else:\n return {'success': False,\n- 'msg': _(\"You must be logged in to access basic organization member info.\")}\n\\ No newline at end of file\n+ 'msg': _(\"You must be logged in to access basic organization member info.\")}\n+\n+\[email protected]_sysadmins_check\n+def group_member_create(context, data_dict):\n+ return {'success': False, 'msg': _('Nobody can add a member to a country in HDX')}\n\\ No newline at end of file\ndiff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n@@ -111,7 +111,8 @@\n }\n def get_auth_functions(self):\n return {\n- 'hdx_basic_user_info': auth.hdx_basic_user_info\n+ 'hdx_basic_user_info': auth.hdx_basic_user_info,\n+ 'group_member_create': auth.group_member_create\n }\n \n def make_middleware(self, app, config):\n", "issue": "Test for unintended permissions via API\nHave we left any security holes via the API?\n- A non-sysadmin should not be able to add/edit/delete a group (country)\n- A non-sysadmin should not be able to add/edit/delete an org\n- A non-sysadmin should not be able to see user emails (other than their own) (note: Luis is pulling a list of user emails via the API, presumably with his sysadmin account)\n- A NEAO should not be able to add a dataset for the org\n- A NEAO should not be able to become a member of an org\n- No one should be able to become a member of a group (country)\n\n", "before_files": [{"content": "import ckan.new_authz as new_authz\n\nfrom ckan.lib.base import _\n\ndef hdx_basic_user_info(context, data_dict):\n logged_in = new_authz.auth_is_loggedin_user()\n if logged_in:\n return {'success': True}\n else:\n return {'success': False,\n 'msg': _(\"You must be logged in to access basic organization member info.\")}", "path": "ckanext-hdx_theme/ckanext/hdx_theme/auth.py"}, {"content": "import ckanext.hdx_theme.licenses as hdx_licenses\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nimport ckan.model.package as package\nimport ckan.model.license as license\nimport pylons.config as config\nimport version\n\nimport ckanext.hdx_theme.caching as caching\nimport ckanext.hdx_theme.auth as auth\n\n\ndef run_on_startup():\n cache_on_startup = config.get('hdx.cache.onstartup', 'true')\n if 'true' == cache_on_startup:\n _generate_license_list()\n caching.cached_get_group_package_stuff()\n\n\ndef _generate_license_list():\n package.Package._license_register = license.LicenseRegister() \n package.Package._license_register.licenses = [\n license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),\n license.License(license.LicenseCreativeCommonsAttribution()),\n license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n license.License(hdx_licenses.LicenseHdxMultiple()),\n license.License(hdx_licenses.LicenseHdxOther())\n ]\n\nclass HDXThemePlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n plugins.implements(plugins.IAuthFunctions)\n plugins.implements(plugins.IGroupController, inherit=True)\n plugins.implements(plugins.IMiddleware, inherit=True)\n \n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'hdx_theme')\n \n\n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')\n map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')\n map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')\n map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n \n map.connect('request_membership', '/organization/{org_id}/request_membership', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_membership')\n map.connect('request_editing_rights', '/organization/{org_id}/request_editing_rights', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_editor_for_org')\n map.connect('/organization/request_new', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_new_organization')\n map.connect('/organization/members/{id}', controller='ckanext.hdx_theme.member_controller:HDXOrgMemberController', action='members')\n map.connect('dataset_preselect','/dataset/preselect', controller='ckanext.hdx_theme.preselect_dsform_controller:HDXPreselectOrgController', action='preselect')\n map.connect('/organization/member_new/{id}', controller='ckanext.hdx_theme.member_controller:HDXOrgMemberController', action='member_new')\n\n map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')\n\n map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')\n\n return map\n \n def create(self, entity):\n caching.invalidate_group_caches()\n\n def edit(self, entity):\n caching.invalidate_group_caches()\n\n def get_helpers(self):\n from ckanext.hdx_theme import helpers as hdx_helpers\n return {\n 'is_downloadable': hdx_helpers.is_downloadable,\n 'get_facet_items_dict':hdx_helpers.get_facet_items_dict,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_filtered_params_list':hdx_helpers.get_filtered_params_list,\n 'get_last_revision_package':hdx_helpers.get_last_revision_package,\n 'get_last_modifier_user':hdx_helpers.get_last_modifier_user,\n 'get_last_revision_group':hdx_helpers.get_last_revision_group,\n 'get_group_followers':hdx_helpers.get_group_followers,\n 'get_group_members':hdx_helpers.get_group_members,\n 'markdown_extract_strip':hdx_helpers.markdown_extract_strip,\n 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str,\n 'hdx_version':hdx_helpers.hdx_version,\n 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message,\n 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items,\n 'hdx_get_extras_element':hdx_helpers.hdx_get_extras_element,\n 'hdx_get_user_info':hdx_helpers.hdx_get_user_info,\n 'hdx_linked_user':hdx_helpers.hdx_linked_user,\n 'hdx_show_singular_plural':hdx_helpers.hdx_show_singular_plural,\n 'hdx_member_roles_list':hdx_helpers.hdx_member_roles_list,\n 'hdx_organizations_available_with_roles':hdx_helpers.hdx_organizations_available_with_roles\n \n }\n \n def get_actions(self):\n from ckanext.hdx_theme import actions as hdx_actions\n return {\n 'organization_list_for_user':hdx_actions.organization_list_for_user, \n 'cached_group_list': hdx_actions.cached_group_list,\n 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,\n 'member_list': hdx_actions.member_list,\n 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins\n \n }\n def get_auth_functions(self):\n return {\n 'hdx_basic_user_info': auth.hdx_basic_user_info\n }\n \n def make_middleware(self, app, config):\n run_on_startup()\n return app\n\n \n \n\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/plugin.py"}], "after_files": [{"content": "import ckan.new_authz as new_authz\nimport ckan.logic as logic\n\nfrom ckan.lib.base import _\n\n\ndef hdx_basic_user_info(context, data_dict):\n logged_in = new_authz.auth_is_loggedin_user()\n if logged_in:\n return {'success': True}\n else:\n return {'success': False,\n 'msg': _(\"You must be logged in to access basic organization member info.\")}\n\n\[email protected]_sysadmins_check\ndef group_member_create(context, data_dict):\n return {'success': False, 'msg': _('Nobody can add a member to a country in HDX')}", "path": "ckanext-hdx_theme/ckanext/hdx_theme/auth.py"}, {"content": "import ckanext.hdx_theme.licenses as hdx_licenses\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nimport ckan.model.package as package\nimport ckan.model.license as license\nimport pylons.config as config\nimport version\n\nimport ckanext.hdx_theme.caching as caching\nimport ckanext.hdx_theme.auth as auth\n\n\ndef run_on_startup():\n cache_on_startup = config.get('hdx.cache.onstartup', 'true')\n if 'true' == cache_on_startup:\n _generate_license_list()\n caching.cached_get_group_package_stuff()\n\n\ndef _generate_license_list():\n package.Package._license_register = license.LicenseRegister() \n package.Package._license_register.licenses = [\n license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),\n license.License(license.LicenseCreativeCommonsAttribution()),\n license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n license.License(hdx_licenses.LicenseHdxMultiple()),\n license.License(hdx_licenses.LicenseHdxOther())\n ]\n\nclass HDXThemePlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n plugins.implements(plugins.IAuthFunctions)\n plugins.implements(plugins.IGroupController, inherit=True)\n plugins.implements(plugins.IMiddleware, inherit=True)\n \n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'hdx_theme')\n \n\n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')\n map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')\n map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')\n map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n \n map.connect('request_membership', '/organization/{org_id}/request_membership', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_membership')\n map.connect('request_editing_rights', '/organization/{org_id}/request_editing_rights', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_editor_for_org')\n map.connect('/organization/request_new', controller='ckanext.hdx_theme.org_controller:HDXReqsOrgController', action='request_new_organization')\n map.connect('/organization/members/{id}', controller='ckanext.hdx_theme.member_controller:HDXOrgMemberController', action='members')\n map.connect('dataset_preselect','/dataset/preselect', controller='ckanext.hdx_theme.preselect_dsform_controller:HDXPreselectOrgController', action='preselect')\n map.connect('/organization/member_new/{id}', controller='ckanext.hdx_theme.member_controller:HDXOrgMemberController', action='member_new')\n\n map.connect('/about/{page}', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='about')\n\n map.connect('resource_edit', '/dataset/{id}/resource_edit/{resource_id}', controller='ckanext.hdx_theme.package_controller:HDXPackageController', action='resource_edit', ckan_icon='edit')\n\n return map\n \n def create(self, entity):\n caching.invalidate_group_caches()\n\n def edit(self, entity):\n caching.invalidate_group_caches()\n\n def get_helpers(self):\n from ckanext.hdx_theme import helpers as hdx_helpers\n return {\n 'is_downloadable': hdx_helpers.is_downloadable,\n 'get_facet_items_dict':hdx_helpers.get_facet_items_dict,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_filtered_params_list':hdx_helpers.get_filtered_params_list,\n 'get_last_revision_package':hdx_helpers.get_last_revision_package,\n 'get_last_modifier_user':hdx_helpers.get_last_modifier_user,\n 'get_last_revision_group':hdx_helpers.get_last_revision_group,\n 'get_group_followers':hdx_helpers.get_group_followers,\n 'get_group_members':hdx_helpers.get_group_members,\n 'markdown_extract_strip':hdx_helpers.markdown_extract_strip,\n 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str,\n 'hdx_version':hdx_helpers.hdx_version,\n 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message,\n 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items,\n 'hdx_get_extras_element':hdx_helpers.hdx_get_extras_element,\n 'hdx_get_user_info':hdx_helpers.hdx_get_user_info,\n 'hdx_linked_user':hdx_helpers.hdx_linked_user,\n 'hdx_show_singular_plural':hdx_helpers.hdx_show_singular_plural,\n 'hdx_member_roles_list':hdx_helpers.hdx_member_roles_list,\n 'hdx_organizations_available_with_roles':hdx_helpers.hdx_organizations_available_with_roles\n \n }\n \n def get_actions(self):\n from ckanext.hdx_theme import actions as hdx_actions\n return {\n 'organization_list_for_user':hdx_actions.organization_list_for_user, \n 'cached_group_list': hdx_actions.cached_group_list,\n 'hdx_basic_user_info': hdx_actions.hdx_basic_user_info,\n 'member_list': hdx_actions.member_list,\n 'hdx_get_sys_admins': hdx_actions.hdx_get_sys_admins\n \n }\n def get_auth_functions(self):\n return {\n 'hdx_basic_user_info': auth.hdx_basic_user_info,\n 'group_member_create': auth.group_member_create\n }\n \n def make_middleware(self, app, config):\n run_on_startup()\n return app\n\n \n \n\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/plugin.py"}]} | 2,208 | 420 |
gh_patches_debug_641 | rasdani/github-patches | git_diff | pex-tool__pex-2219 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.143
On the docket:
+ [x] pex fails to build pycryptodome due to filename too long #2087
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.142"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.142"
+__version__ = "2.1.143"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.142\"\n+__version__ = \"2.1.143\"\n", "issue": "Release 2.1.143\nOn the docket:\r\n+ [x] pex fails to build pycryptodome due to filename too long #2087 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.142\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.143\"\n", "path": "pex/version.py"}]} | 348 | 98 |
gh_patches_debug_21265 | rasdani/github-patches | git_diff | scikit-hep__pyhf-436 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test Deploy feature is broken
# Description
See https://github.com/FaradayRF/faradayio/issues/35 for a related issue - I think. Failing job on master here: https://travis-ci.org/diana-hep/pyhf/builds/517678508?utm_source=github_status&utm_medium=notification
# Expected Behavior
Expect it to pass.
# Actual Behavior
Observe it failing.
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4 from os import path
5 import sys
6
7 this_directory = path.abspath(path.dirname(__file__))
8 if sys.version_info.major < 3:
9 from io import open
10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:
11 long_description = readme_md.read()
12
13 extras_require = {
14 'tensorflow': [
15 'tensorflow~=1.13',
16 'tensorflow-probability~=0.5',
17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
18 'setuptools<=39.1.0',
19 ],
20 'torch': ['torch~=1.0'],
21 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],
22 # 'dask': [
23 # 'dask[array]'
24 # ],
25 'xmlio': ['uproot'],
26 'minuit': ['iminuit'],
27 'develop': [
28 'pyflakes',
29 'pytest~=3.5',
30 'pytest-cov>=2.5.1',
31 'pytest-mock',
32 'pytest-benchmark[histogram]',
33 'pytest-console-scripts',
34 'python-coveralls',
35 'coverage>=4.0', # coveralls
36 'matplotlib',
37 'jupyter',
38 'nbdime',
39 'uproot~=3.3',
40 'papermill~=0.16',
41 'graphviz',
42 'bumpversion',
43 'sphinx',
44 'sphinxcontrib-bibtex',
45 'sphinxcontrib-napoleon',
46 'sphinx_rtd_theme',
47 'nbsphinx',
48 'sphinx-issues',
49 'm2r',
50 'jsonpatch',
51 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
52 'pre-commit',
53 'black;python_version>="3.6"', # Black is Python3 only
54 'twine',
55 ],
56 }
57 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
58
59 setup(
60 name='pyhf',
61 version='0.0.16',
62 description='(partial) pure python histfactory implementation',
63 long_description=long_description,
64 long_description_content_type='text/markdown',
65 url='https://github.com/diana-hep/pyhf',
66 author='Lukas Heinrich',
67 author_email='[email protected]',
68 license='Apache',
69 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',
70 classifiers=[
71 "Programming Language :: Python :: 2",
72 "Programming Language :: Python :: 2.7",
73 "Programming Language :: Python :: 3",
74 "Programming Language :: Python :: 3.6",
75 "Programming Language :: Python :: 3.7",
76 ],
77 packages=find_packages(),
78 include_package_data=True,
79 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
80 install_requires=[
81 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet
82 'click>=6.0', # for console scripts,
83 'tqdm', # for readxml
84 'six', # for modifiers
85 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
86 'jsonpatch',
87 ],
88 extras_require=extras_require,
89 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
90 dependency_links=[],
91 )
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -56,6 +56,29 @@
}
extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
+
+def _is_test_pypi():
+ """
+ Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and
+ set to true (c.f. .travis.yml)
+
+ The use_scm_version kwarg accepts a callable for the local_scheme
+ configuration parameter with argument "version". This can be replaced
+ with a lambda as the desired version structure is {next_version}.dev{distance}
+ c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy
+
+ As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version
+ controlled through bumpversion is used.
+ """
+ from os import getenv
+
+ return (
+ {'local_scheme': lambda version: ''}
+ if getenv('TESTPYPI_UPLOAD') == 'true'
+ else False
+ )
+
+
setup(
name='pyhf',
version='0.0.16',
@@ -88,4 +111,5 @@
extras_require=extras_require,
entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
dependency_links=[],
+ use_scm_version=_is_test_pypi(),
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -56,6 +56,29 @@\n }\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n \n+\n+def _is_test_pypi():\n+ \"\"\"\n+ Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n+ set to true (c.f. .travis.yml)\n+\n+ The use_scm_version kwarg accepts a callable for the local_scheme\n+ configuration parameter with argument \"version\". This can be replaced\n+ with a lambda as the desired version structure is {next_version}.dev{distance}\n+ c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n+\n+ As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n+ controlled through bumpversion is used.\n+ \"\"\"\n+ from os import getenv\n+\n+ return (\n+ {'local_scheme': lambda version: ''}\n+ if getenv('TESTPYPI_UPLOAD') == 'true'\n+ else False\n+ )\n+\n+\n setup(\n name='pyhf',\n version='0.0.16',\n@@ -88,4 +111,5 @@\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n+ use_scm_version=_is_test_pypi(),\n )\n", "issue": "Test Deploy feature is broken\n# Description\r\n\r\nSee https://github.com/FaradayRF/faradayio/issues/35 for a related issue - I think. Failing job on master here: https://travis-ci.org/diana-hep/pyhf/builds/517678508?utm_source=github_status&utm_medium=notification\r\n\r\n# Expected Behavior\r\n\r\nExpect it to pass.\r\n\r\n# Actual Behavior\r\n\r\nObserve it failing.\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.13',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=0.16',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.16',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.13',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=0.16',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\ndef _is_test_pypi():\n \"\"\"\n Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n set to true (c.f. .travis.yml)\n\n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n with a lambda as the desired version structure is {next_version}.dev{distance}\n c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n\n As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n controlled through bumpversion is used.\n \"\"\"\n from os import getenv\n\n return (\n {'local_scheme': lambda version: ''}\n if getenv('TESTPYPI_UPLOAD') == 'true'\n else False\n )\n\n\nsetup(\n name='pyhf',\n version='0.0.16',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n use_scm_version=_is_test_pypi(),\n)\n", "path": "setup.py"}]} | 1,452 | 326 |
gh_patches_debug_29031 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1495 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Forbid setting user and chat data attributes
Inevitably users will attempt to do `context.user_data = {'my': 'dict'}` or `context.chat_data = whatever`. This will lead to confusing behaviour.
I propose making those fields properties without a setter, which will more helpfully error. Depending on how far we want to go, we can also suggest the user to use `update(other_dict)` if they attempt to set the field.
See https://t.me/pythontelegrambotgroup/222138 onwards.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/ext/callbackcontext.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2018
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains the CallbackContext class."""
20
21 from telegram import Update
22
23
24 class CallbackContext(object):
25 """
26 This is a context object passed to the callback called by :class:`telegram.ext.Handler`
27 or by the :class:`telegram.ext.Dispatcher` in an error handler added by
28 :attr:`telegram.ext.Dispatcher.add_error_handler` or to the callback of a
29 :class:`telegram.ext.Job`.
30
31 Note:
32 :class:`telegram.ext.Dispatcher` will create a single context for an entire update. This
33 means that if you got 2 handlers in different groups and they both get called, they will
34 get passed the same `CallbackContext` object (of course with proper attributes like
35 `.matches` differing). This allows you to add custom attributes in a lower handler group
36 callback, and then subsequently access those attributes in a higher handler group callback.
37 Note that the attributes on `CallbackContext` might change in the future, so make sure to
38 use a fairly unique name for the attributes.
39
40 Warning:
41 Do not combine custom attributes and @run_async. Due to how @run_async works, it will
42 almost certainly execute the callbacks for an update out of order, and the attributes
43 that you think you added will not be present.
44
45 Attributes:
46 chat_data (:obj:`dict`, optional): A dict that can be used to keep any data in. For each
47 update from the same chat it will be the same ``dict``.
48 user_data (:obj:`dict`, optional): A dict that can be used to keep any data in. For each
49 update from the same user it will be the same ``dict``.
50 matches (List[:obj:`re match object`], optional): If the associated update originated from
51 a regex-supported handler or had a :class:`Filters.regex`, this will contain a list of
52 match objects for every pattern where ``re.search(pattern, string)`` returned a match.
53 Note that filters short circuit, so combined regex filters will not always
54 be evaluated.
55 args (List[:obj:`str`], optional): Arguments passed to a command if the associated update
56 is handled by :class:`telegram.ext.CommandHandler`, :class:`telegram.ext.PrefixHandler`
57 or :class:`telegram.ext.StringCommandHandler`. It contains a list of the words in the
58 text after the command, using any whitespace string as a delimiter.
59 error (:class:`telegram.TelegramError`, optional): The Telegram error that was raised.
60 Only present when passed to a error handler registered with
61 :attr:`telegram.ext.Dispatcher.add_error_handler`.
62 job (:class:`telegram.ext.Job`): The job that that originated this callback.
63 Only present when passed to the callback of :class:`telegram.ext.Job`.
64
65 """
66
67 def __init__(self, dispatcher):
68 """
69 Args:
70 dispatcher (:class:`telegram.ext.Dispatcher`):
71 """
72 if not dispatcher.use_context:
73 raise ValueError('CallbackContext should not be used with a non context aware '
74 'dispatcher!')
75 self._dispatcher = dispatcher
76 self.chat_data = None
77 self.user_data = None
78 self.args = None
79 self.matches = None
80 self.error = None
81 self.job = None
82
83 @classmethod
84 def from_error(cls, update, error, dispatcher):
85 self = cls.from_update(update, dispatcher)
86 self.error = error
87 return self
88
89 @classmethod
90 def from_update(cls, update, dispatcher):
91 self = cls(dispatcher)
92 if update is not None and isinstance(update, Update):
93 chat = update.effective_chat
94 user = update.effective_user
95
96 if chat:
97 self.chat_data = dispatcher.chat_data[chat.id]
98 if user:
99 self.user_data = dispatcher.user_data[user.id]
100 return self
101
102 @classmethod
103 def from_job(cls, job, dispatcher):
104 self = cls(dispatcher)
105 self.job = job
106 return self
107
108 def update(self, data):
109 self.__dict__.update(data)
110
111 @property
112 def bot(self):
113 """:class:`telegram.Bot`: The bot associated with this context."""
114 return self._dispatcher.bot
115
116 @property
117 def job_queue(self):
118 """
119 :class:`telegram.ext.JobQueue`: The ``JobQueue`` used by the
120 :class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater`
121 associated with this context.
122
123 """
124 return self._dispatcher.job_queue
125
126 @property
127 def update_queue(self):
128 """
129 :class:`queue.Queue`: The ``Queue`` instance used by the
130 :class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater`
131 associated with this context.
132
133 """
134 return self._dispatcher.update_queue
135
136 @property
137 def match(self):
138 """
139 `Regex match type`: The first match from :attr:`matches`.
140 Useful if you are only filtering using a single regex filter.
141 Returns `None` if :attr:`matches` is empty.
142 """
143 try:
144 return self.matches[0] # pylint: disable=unsubscriptable-object
145 except (IndexError, TypeError):
146 return None
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram/ext/callbackcontext.py b/telegram/ext/callbackcontext.py
--- a/telegram/ext/callbackcontext.py
+++ b/telegram/ext/callbackcontext.py
@@ -73,13 +73,31 @@
raise ValueError('CallbackContext should not be used with a non context aware '
'dispatcher!')
self._dispatcher = dispatcher
- self.chat_data = None
- self.user_data = None
+ self._chat_data = None
+ self._user_data = None
self.args = None
self.matches = None
self.error = None
self.job = None
+ @property
+ def chat_data(self):
+ return self._chat_data
+
+ @chat_data.setter
+ def chat_data(self, value):
+ raise AttributeError("You can not assign a new value to chat_data, see "
+ "https://git.io/fjxKe")
+
+ @property
+ def user_data(self):
+ return self._user_data
+
+ @user_data.setter
+ def user_data(self, value):
+ raise AttributeError("You can not assign a new value to user_data, see "
+ "https://git.io/fjxKe")
+
@classmethod
def from_error(cls, update, error, dispatcher):
self = cls.from_update(update, dispatcher)
@@ -94,9 +112,9 @@
user = update.effective_user
if chat:
- self.chat_data = dispatcher.chat_data[chat.id]
+ self._chat_data = dispatcher.chat_data[chat.id]
if user:
- self.user_data = dispatcher.user_data[user.id]
+ self._user_data = dispatcher.user_data[user.id]
return self
@classmethod
| {"golden_diff": "diff --git a/telegram/ext/callbackcontext.py b/telegram/ext/callbackcontext.py\n--- a/telegram/ext/callbackcontext.py\n+++ b/telegram/ext/callbackcontext.py\n@@ -73,13 +73,31 @@\n raise ValueError('CallbackContext should not be used with a non context aware '\n 'dispatcher!')\n self._dispatcher = dispatcher\n- self.chat_data = None\n- self.user_data = None\n+ self._chat_data = None\n+ self._user_data = None\n self.args = None\n self.matches = None\n self.error = None\n self.job = None\n \n+ @property\n+ def chat_data(self):\n+ return self._chat_data\n+\n+ @chat_data.setter\n+ def chat_data(self, value):\n+ raise AttributeError(\"You can not assign a new value to chat_data, see \"\n+ \"https://git.io/fjxKe\")\n+\n+ @property\n+ def user_data(self):\n+ return self._user_data\n+\n+ @user_data.setter\n+ def user_data(self, value):\n+ raise AttributeError(\"You can not assign a new value to user_data, see \"\n+ \"https://git.io/fjxKe\")\n+\n @classmethod\n def from_error(cls, update, error, dispatcher):\n self = cls.from_update(update, dispatcher)\n@@ -94,9 +112,9 @@\n user = update.effective_user\n \n if chat:\n- self.chat_data = dispatcher.chat_data[chat.id]\n+ self._chat_data = dispatcher.chat_data[chat.id]\n if user:\n- self.user_data = dispatcher.user_data[user.id]\n+ self._user_data = dispatcher.user_data[user.id]\n return self\n \n @classmethod\n", "issue": "Forbid setting user and chat data attributes\nInevitably users will attempt to do `context.user_data = {'my': 'dict'}` or `context.chat_data = whatever`. This will lead to confusing behaviour.\r\n\r\nI propose making those fields properties without a setter, which will more helpfully error. Depending on how far we want to go, we can also suggest the user to use `update(other_dict)` if they attempt to set the field.\r\n\r\nSee https://t.me/pythontelegrambotgroup/222138 onwards.\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the CallbackContext class.\"\"\"\n\nfrom telegram import Update\n\n\nclass CallbackContext(object):\n \"\"\"\n This is a context object passed to the callback called by :class:`telegram.ext.Handler`\n or by the :class:`telegram.ext.Dispatcher` in an error handler added by\n :attr:`telegram.ext.Dispatcher.add_error_handler` or to the callback of a\n :class:`telegram.ext.Job`.\n\n Note:\n :class:`telegram.ext.Dispatcher` will create a single context for an entire update. This\n means that if you got 2 handlers in different groups and they both get called, they will\n get passed the same `CallbackContext` object (of course with proper attributes like\n `.matches` differing). This allows you to add custom attributes in a lower handler group\n callback, and then subsequently access those attributes in a higher handler group callback.\n Note that the attributes on `CallbackContext` might change in the future, so make sure to\n use a fairly unique name for the attributes.\n\n Warning:\n Do not combine custom attributes and @run_async. Due to how @run_async works, it will\n almost certainly execute the callbacks for an update out of order, and the attributes\n that you think you added will not be present.\n\n Attributes:\n chat_data (:obj:`dict`, optional): A dict that can be used to keep any data in. For each\n update from the same chat it will be the same ``dict``.\n user_data (:obj:`dict`, optional): A dict that can be used to keep any data in. For each\n update from the same user it will be the same ``dict``.\n matches (List[:obj:`re match object`], optional): If the associated update originated from\n a regex-supported handler or had a :class:`Filters.regex`, this will contain a list of\n match objects for every pattern where ``re.search(pattern, string)`` returned a match.\n Note that filters short circuit, so combined regex filters will not always\n be evaluated.\n args (List[:obj:`str`], optional): Arguments passed to a command if the associated update\n is handled by :class:`telegram.ext.CommandHandler`, :class:`telegram.ext.PrefixHandler`\n or :class:`telegram.ext.StringCommandHandler`. It contains a list of the words in the\n text after the command, using any whitespace string as a delimiter.\n error (:class:`telegram.TelegramError`, optional): The Telegram error that was raised.\n Only present when passed to a error handler registered with\n :attr:`telegram.ext.Dispatcher.add_error_handler`.\n job (:class:`telegram.ext.Job`): The job that that originated this callback.\n Only present when passed to the callback of :class:`telegram.ext.Job`.\n\n \"\"\"\n\n def __init__(self, dispatcher):\n \"\"\"\n Args:\n dispatcher (:class:`telegram.ext.Dispatcher`):\n \"\"\"\n if not dispatcher.use_context:\n raise ValueError('CallbackContext should not be used with a non context aware '\n 'dispatcher!')\n self._dispatcher = dispatcher\n self.chat_data = None\n self.user_data = None\n self.args = None\n self.matches = None\n self.error = None\n self.job = None\n\n @classmethod\n def from_error(cls, update, error, dispatcher):\n self = cls.from_update(update, dispatcher)\n self.error = error\n return self\n\n @classmethod\n def from_update(cls, update, dispatcher):\n self = cls(dispatcher)\n if update is not None and isinstance(update, Update):\n chat = update.effective_chat\n user = update.effective_user\n\n if chat:\n self.chat_data = dispatcher.chat_data[chat.id]\n if user:\n self.user_data = dispatcher.user_data[user.id]\n return self\n\n @classmethod\n def from_job(cls, job, dispatcher):\n self = cls(dispatcher)\n self.job = job\n return self\n\n def update(self, data):\n self.__dict__.update(data)\n\n @property\n def bot(self):\n \"\"\":class:`telegram.Bot`: The bot associated with this context.\"\"\"\n return self._dispatcher.bot\n\n @property\n def job_queue(self):\n \"\"\"\n :class:`telegram.ext.JobQueue`: The ``JobQueue`` used by the\n :class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater`\n associated with this context.\n\n \"\"\"\n return self._dispatcher.job_queue\n\n @property\n def update_queue(self):\n \"\"\"\n :class:`queue.Queue`: The ``Queue`` instance used by the\n :class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater`\n associated with this context.\n\n \"\"\"\n return self._dispatcher.update_queue\n\n @property\n def match(self):\n \"\"\"\n `Regex match type`: The first match from :attr:`matches`.\n Useful if you are only filtering using a single regex filter.\n Returns `None` if :attr:`matches` is empty.\n \"\"\"\n try:\n return self.matches[0] # pylint: disable=unsubscriptable-object\n except (IndexError, TypeError):\n return None\n", "path": "telegram/ext/callbackcontext.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the CallbackContext class.\"\"\"\n\nfrom telegram import Update\n\n\nclass CallbackContext(object):\n \"\"\"\n This is a context object passed to the callback called by :class:`telegram.ext.Handler`\n or by the :class:`telegram.ext.Dispatcher` in an error handler added by\n :attr:`telegram.ext.Dispatcher.add_error_handler` or to the callback of a\n :class:`telegram.ext.Job`.\n\n Note:\n :class:`telegram.ext.Dispatcher` will create a single context for an entire update. This\n means that if you got 2 handlers in different groups and they both get called, they will\n get passed the same `CallbackContext` object (of course with proper attributes like\n `.matches` differing). This allows you to add custom attributes in a lower handler group\n callback, and then subsequently access those attributes in a higher handler group callback.\n Note that the attributes on `CallbackContext` might change in the future, so make sure to\n use a fairly unique name for the attributes.\n\n Warning:\n Do not combine custom attributes and @run_async. Due to how @run_async works, it will\n almost certainly execute the callbacks for an update out of order, and the attributes\n that you think you added will not be present.\n\n Attributes:\n chat_data (:obj:`dict`, optional): A dict that can be used to keep any data in. For each\n update from the same chat it will be the same ``dict``.\n user_data (:obj:`dict`, optional): A dict that can be used to keep any data in. For each\n update from the same user it will be the same ``dict``.\n matches (List[:obj:`re match object`], optional): If the associated update originated from\n a regex-supported handler or had a :class:`Filters.regex`, this will contain a list of\n match objects for every pattern where ``re.search(pattern, string)`` returned a match.\n Note that filters short circuit, so combined regex filters will not always\n be evaluated.\n args (List[:obj:`str`], optional): Arguments passed to a command if the associated update\n is handled by :class:`telegram.ext.CommandHandler`, :class:`telegram.ext.PrefixHandler`\n or :class:`telegram.ext.StringCommandHandler`. It contains a list of the words in the\n text after the command, using any whitespace string as a delimiter.\n error (:class:`telegram.TelegramError`, optional): The Telegram error that was raised.\n Only present when passed to a error handler registered with\n :attr:`telegram.ext.Dispatcher.add_error_handler`.\n job (:class:`telegram.ext.Job`): The job that that originated this callback.\n Only present when passed to the callback of :class:`telegram.ext.Job`.\n\n \"\"\"\n\n def __init__(self, dispatcher):\n \"\"\"\n Args:\n dispatcher (:class:`telegram.ext.Dispatcher`):\n \"\"\"\n if not dispatcher.use_context:\n raise ValueError('CallbackContext should not be used with a non context aware '\n 'dispatcher!')\n self._dispatcher = dispatcher\n self._chat_data = None\n self._user_data = None\n self.args = None\n self.matches = None\n self.error = None\n self.job = None\n\n @property\n def chat_data(self):\n return self._chat_data\n\n @chat_data.setter\n def chat_data(self, value):\n raise AttributeError(\"You can not assign a new value to chat_data, see \"\n \"https://git.io/fjxKe\")\n\n @property\n def user_data(self):\n return self._user_data\n\n @user_data.setter\n def user_data(self, value):\n raise AttributeError(\"You can not assign a new value to user_data, see \"\n \"https://git.io/fjxKe\")\n\n @classmethod\n def from_error(cls, update, error, dispatcher):\n self = cls.from_update(update, dispatcher)\n self.error = error\n return self\n\n @classmethod\n def from_update(cls, update, dispatcher):\n self = cls(dispatcher)\n if update is not None and isinstance(update, Update):\n chat = update.effective_chat\n user = update.effective_user\n\n if chat:\n self._chat_data = dispatcher.chat_data[chat.id]\n if user:\n self._user_data = dispatcher.user_data[user.id]\n return self\n\n @classmethod\n def from_job(cls, job, dispatcher):\n self = cls(dispatcher)\n self.job = job\n return self\n\n def update(self, data):\n self.__dict__.update(data)\n\n @property\n def bot(self):\n \"\"\":class:`telegram.Bot`: The bot associated with this context.\"\"\"\n return self._dispatcher.bot\n\n @property\n def job_queue(self):\n \"\"\"\n :class:`telegram.ext.JobQueue`: The ``JobQueue`` used by the\n :class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater`\n associated with this context.\n\n \"\"\"\n return self._dispatcher.job_queue\n\n @property\n def update_queue(self):\n \"\"\"\n :class:`queue.Queue`: The ``Queue`` instance used by the\n :class:`telegram.ext.Dispatcher` and (usually) the :class:`telegram.ext.Updater`\n associated with this context.\n\n \"\"\"\n return self._dispatcher.update_queue\n\n @property\n def match(self):\n \"\"\"\n `Regex match type`: The first match from :attr:`matches`.\n Useful if you are only filtering using a single regex filter.\n Returns `None` if :attr:`matches` is empty.\n \"\"\"\n try:\n return self.matches[0] # pylint: disable=unsubscriptable-object\n except (IndexError, TypeError):\n return None\n", "path": "telegram/ext/callbackcontext.py"}]} | 2,020 | 386 |
gh_patches_debug_30825 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2484 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ordering by a non-orderable column (implicitly or explicitly) causes a backend crash
## Bug
Brent [writes](https://github.com/centerofci/mathesar/pull/2253#pullrequestreview-1251718380):
> At one point, you note in a comment that there's a bug whenever we have internal columns of JSON type, and no primary key in a table. This bug is still present for _any_ non-orderable column type if there is no primary key in the table (e.g., if the user attaches to a preexisting DB). It's easy to figure out whether or not a type is orderable in PostgreSQL. See the [docs page for ORDER BY](https://www.postgresql.org/docs/current/queries-order.html#id-1.5.6.9.5.10) and the [docs page for Operator Classes](https://www.postgresql.org/docs/9.5/indexes-opclass.html).
Brent is referring to [this comment](https://github.com/centerofci/mathesar/blob/8c215b2efdd740da0d530b624763cf260e12ca90/db/records/operations/sort.py#L47).
The error message received upon such a crash is: `could not identify an ordering operator for type json` (`json` is the type of the unorderable column in this case.
## Solution
The solution is, as explained by Brent, to make sure that every column we order by (implicitly or explicitly) is orderable.
## Severity
Because of this, a JSON column (it's non-orderable), can make a query/table view unrenderable, but only when the table/query does not have a primary key, which cannot happen in a Mathesar-created table. It can only happen when Mathesar is used to access a pre-existing database.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/records/operations/sort.py`
Content:
```
1 from collections import namedtuple
2 from sqlalchemy import select
3 from db.columns import utils as col_utils
4 from db.records.exceptions import BadSortFormat, SortFieldNotFound
5
6
7 def make_order_by_deterministic(relation, order_by=None):
8 """
9 Makes an order_by deterministic (totally ordering).
10
11 Given a relation, and a `order_by` spec, that defines the ordering to be applied to the
12 relation, returns a new order_by that is the totally ordered (deterministic) version of the
13 input order_by.
14
15 Appending primary key sort guarantees determinism, but if that fails, we revert to ordering by
16 all columns.
17 """
18 if order_by is None:
19 order_by = []
20 order_by = _append_primary_key_sort(relation, order_by)
21 if not order_by:
22 order_by = _build_order_by_all_columns_clause(relation)
23 return order_by
24
25
26 def _append_primary_key_sort(relation, order_by):
27 """
28 Given an order by clause, we can guarantee a deterministic ordering
29 overall by appending a final ordering by primary key if one exists.
30 """
31 pk_cols = col_utils.get_primary_key_column_collection_from_relation(relation)
32 order_by = list(order_by)
33 if pk_cols is not None:
34 order_by += [
35 {'field': col, 'direction': 'asc'}
36 for col
37 in set(pk_cols).intersection(relation.columns)
38 ]
39 return order_by
40
41
42 def _build_order_by_all_columns_clause(relation):
43 """
44 To be used when we have failed to find any other ordering criteria,
45 since ordering by all columns is inherently inefficient.
46
47 Note the filtering out of internal columns. Before applying this fix, psycopg was throwing an error
48 like "could not identify an ordering operator for type json", because we were trying to
49 sort by an internal column like `__mathesar_group_metadata`, which has type `json`, which
50 requires special handling to be sorted. The problem is bypassed by not attempting to sort on
51 internal columns.
52 """
53 return [
54 {'field': col, 'direction': 'asc'}
55 for col
56 in relation.columns
57 if not _is_internal_column(col)
58 ]
59
60
61 def _is_internal_column(col):
62 """
63 Might not be exhaustive, take care.
64 """
65 return col.name == '__mathesar_group_metadata'
66
67
68 def apply_relation_sorting(relation, sort_spec):
69 order_by_list = [
70 _get_sorted_column_obj_from_spec(relation, spec) for spec in sort_spec
71 ]
72 return select(relation).order_by(*order_by_list)
73
74
75 def _get_sorted_column_obj_from_spec(relation, spec):
76 try:
77 sort_spec = _deserialize_sort_spec(spec)
78 except (KeyError, TypeError, AssertionError):
79 raise BadSortFormat
80
81 try:
82 column = col_utils.get_column_obj_from_relation(relation, sort_spec.field)
83 except KeyError as e:
84 raise SortFieldNotFound(e)
85 except AttributeError:
86 raise BadSortFormat
87
88 try:
89 directed_col = _build_directed_column_expr(column, sort_spec)
90 except AttributeError:
91 raise BadSortFormat
92
93 return directed_col
94
95
96 def _deserialize_sort_spec(spec):
97 sort_spec = namedtuple(
98 '_sort_spec',
99 ['field', 'direction', 'nullsfirst', 'nullslast']
100 )(
101 field=spec['field'],
102 direction=spec['direction'],
103 nullsfirst=spec.get('nullsfirst', False),
104 nullslast=spec.get('nullslast', False)
105 )
106 # Since it's not valid to have both nullsfirst and nullslast.
107 assert not sort_spec.nullsfirst or not sort_spec.nullslast
108 return sort_spec
109
110
111 def _build_directed_column_expr(column, sort_spec):
112 directed_col = getattr(column, sort_spec.direction)()
113 if sort_spec.nullsfirst:
114 directed_col = directed_col.nulls_first()
115 elif sort_spec.nullslast:
116 directed_col = directed_col.nulls_last()
117 return directed_col
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/records/operations/sort.py b/db/records/operations/sort.py
--- a/db/records/operations/sort.py
+++ b/db/records/operations/sort.py
@@ -44,27 +44,37 @@
To be used when we have failed to find any other ordering criteria,
since ordering by all columns is inherently inefficient.
- Note the filtering out of internal columns. Before applying this fix, psycopg was throwing an error
- like "could not identify an ordering operator for type json", because we were trying to
- sort by an internal column like `__mathesar_group_metadata`, which has type `json`, which
- requires special handling to be sorted. The problem is bypassed by not attempting to sort on
- internal columns.
+ Note the filtering out some columns, namely internal columns and non-orderable columns. See
+ their docstrings for details.
"""
return [
{'field': col, 'direction': 'asc'}
for col
in relation.columns
- if not _is_internal_column(col)
+ if _is_col_orderable(col) and not _is_internal_column(col)
]
def _is_internal_column(col):
"""
+ Columns that Mathesar adds for its own devices and does not expose to the user. We don't want
+ to sort by these.
+
Might not be exhaustive, take care.
"""
return col.name == '__mathesar_group_metadata'
+def _is_col_orderable(col):
+ """
+ Some columns are not orderable (or at least don't have a non-ambiguous way to define order
+ without additional logic). We only want to order by orderably columns.
+ """
+ data_type = col.type
+ non_orderable_type = ['Binary', 'LargeBinary', 'PickleType', 'ARRAY', 'JSON', 'JSONB']
+ return str(data_type) not in non_orderable_type
+
+
def apply_relation_sorting(relation, sort_spec):
order_by_list = [
_get_sorted_column_obj_from_spec(relation, spec) for spec in sort_spec
| {"golden_diff": "diff --git a/db/records/operations/sort.py b/db/records/operations/sort.py\n--- a/db/records/operations/sort.py\n+++ b/db/records/operations/sort.py\n@@ -44,27 +44,37 @@\n To be used when we have failed to find any other ordering criteria,\n since ordering by all columns is inherently inefficient.\n \n- Note the filtering out of internal columns. Before applying this fix, psycopg was throwing an error\n- like \"could not identify an ordering operator for type json\", because we were trying to\n- sort by an internal column like `__mathesar_group_metadata`, which has type `json`, which\n- requires special handling to be sorted. The problem is bypassed by not attempting to sort on\n- internal columns.\n+ Note the filtering out some columns, namely internal columns and non-orderable columns. See\n+ their docstrings for details.\n \"\"\"\n return [\n {'field': col, 'direction': 'asc'}\n for col\n in relation.columns\n- if not _is_internal_column(col)\n+ if _is_col_orderable(col) and not _is_internal_column(col)\n ]\n \n \n def _is_internal_column(col):\n \"\"\"\n+ Columns that Mathesar adds for its own devices and does not expose to the user. We don't want\n+ to sort by these.\n+\n Might not be exhaustive, take care.\n \"\"\"\n return col.name == '__mathesar_group_metadata'\n \n \n+def _is_col_orderable(col):\n+ \"\"\"\n+ Some columns are not orderable (or at least don't have a non-ambiguous way to define order\n+ without additional logic). We only want to order by orderably columns.\n+ \"\"\"\n+ data_type = col.type\n+ non_orderable_type = ['Binary', 'LargeBinary', 'PickleType', 'ARRAY', 'JSON', 'JSONB']\n+ return str(data_type) not in non_orderable_type\n+\n+\n def apply_relation_sorting(relation, sort_spec):\n order_by_list = [\n _get_sorted_column_obj_from_spec(relation, spec) for spec in sort_spec\n", "issue": "Ordering by a non-orderable column (implicitly or explicitly) causes a backend crash\n## Bug\r\n\r\nBrent [writes](https://github.com/centerofci/mathesar/pull/2253#pullrequestreview-1251718380):\r\n\r\n> At one point, you note in a comment that there's a bug whenever we have internal columns of JSON type, and no primary key in a table. This bug is still present for _any_ non-orderable column type if there is no primary key in the table (e.g., if the user attaches to a preexisting DB). It's easy to figure out whether or not a type is orderable in PostgreSQL. See the [docs page for ORDER BY](https://www.postgresql.org/docs/current/queries-order.html#id-1.5.6.9.5.10) and the [docs page for Operator Classes](https://www.postgresql.org/docs/9.5/indexes-opclass.html).\r\n\r\nBrent is referring to [this comment](https://github.com/centerofci/mathesar/blob/8c215b2efdd740da0d530b624763cf260e12ca90/db/records/operations/sort.py#L47).\r\n\r\nThe error message received upon such a crash is: `could not identify an ordering operator for type json` (`json` is the type of the unorderable column in this case.\r\n\r\n## Solution\r\n\r\nThe solution is, as explained by Brent, to make sure that every column we order by (implicitly or explicitly) is orderable.\r\n\r\n## Severity\r\n\r\nBecause of this, a JSON column (it's non-orderable), can make a query/table view unrenderable, but only when the table/query does not have a primary key, which cannot happen in a Mathesar-created table. It can only happen when Mathesar is used to access a pre-existing database.\n", "before_files": [{"content": "from collections import namedtuple\nfrom sqlalchemy import select\nfrom db.columns import utils as col_utils\nfrom db.records.exceptions import BadSortFormat, SortFieldNotFound\n\n\ndef make_order_by_deterministic(relation, order_by=None):\n \"\"\"\n Makes an order_by deterministic (totally ordering).\n\n Given a relation, and a `order_by` spec, that defines the ordering to be applied to the\n relation, returns a new order_by that is the totally ordered (deterministic) version of the\n input order_by.\n\n Appending primary key sort guarantees determinism, but if that fails, we revert to ordering by\n all columns.\n \"\"\"\n if order_by is None:\n order_by = []\n order_by = _append_primary_key_sort(relation, order_by)\n if not order_by:\n order_by = _build_order_by_all_columns_clause(relation)\n return order_by\n\n\ndef _append_primary_key_sort(relation, order_by):\n \"\"\"\n Given an order by clause, we can guarantee a deterministic ordering\n overall by appending a final ordering by primary key if one exists.\n \"\"\"\n pk_cols = col_utils.get_primary_key_column_collection_from_relation(relation)\n order_by = list(order_by)\n if pk_cols is not None:\n order_by += [\n {'field': col, 'direction': 'asc'}\n for col\n in set(pk_cols).intersection(relation.columns)\n ]\n return order_by\n\n\ndef _build_order_by_all_columns_clause(relation):\n \"\"\"\n To be used when we have failed to find any other ordering criteria,\n since ordering by all columns is inherently inefficient.\n\n Note the filtering out of internal columns. Before applying this fix, psycopg was throwing an error\n like \"could not identify an ordering operator for type json\", because we were trying to\n sort by an internal column like `__mathesar_group_metadata`, which has type `json`, which\n requires special handling to be sorted. The problem is bypassed by not attempting to sort on\n internal columns.\n \"\"\"\n return [\n {'field': col, 'direction': 'asc'}\n for col\n in relation.columns\n if not _is_internal_column(col)\n ]\n\n\ndef _is_internal_column(col):\n \"\"\"\n Might not be exhaustive, take care.\n \"\"\"\n return col.name == '__mathesar_group_metadata'\n\n\ndef apply_relation_sorting(relation, sort_spec):\n order_by_list = [\n _get_sorted_column_obj_from_spec(relation, spec) for spec in sort_spec\n ]\n return select(relation).order_by(*order_by_list)\n\n\ndef _get_sorted_column_obj_from_spec(relation, spec):\n try:\n sort_spec = _deserialize_sort_spec(spec)\n except (KeyError, TypeError, AssertionError):\n raise BadSortFormat\n\n try:\n column = col_utils.get_column_obj_from_relation(relation, sort_spec.field)\n except KeyError as e:\n raise SortFieldNotFound(e)\n except AttributeError:\n raise BadSortFormat\n\n try:\n directed_col = _build_directed_column_expr(column, sort_spec)\n except AttributeError:\n raise BadSortFormat\n\n return directed_col\n\n\ndef _deserialize_sort_spec(spec):\n sort_spec = namedtuple(\n '_sort_spec',\n ['field', 'direction', 'nullsfirst', 'nullslast']\n )(\n field=spec['field'],\n direction=spec['direction'],\n nullsfirst=spec.get('nullsfirst', False),\n nullslast=spec.get('nullslast', False)\n )\n # Since it's not valid to have both nullsfirst and nullslast.\n assert not sort_spec.nullsfirst or not sort_spec.nullslast\n return sort_spec\n\n\ndef _build_directed_column_expr(column, sort_spec):\n directed_col = getattr(column, sort_spec.direction)()\n if sort_spec.nullsfirst:\n directed_col = directed_col.nulls_first()\n elif sort_spec.nullslast:\n directed_col = directed_col.nulls_last()\n return directed_col\n", "path": "db/records/operations/sort.py"}], "after_files": [{"content": "from collections import namedtuple\nfrom sqlalchemy import select\nfrom db.columns import utils as col_utils\nfrom db.records.exceptions import BadSortFormat, SortFieldNotFound\n\n\ndef make_order_by_deterministic(relation, order_by=None):\n \"\"\"\n Makes an order_by deterministic (totally ordering).\n\n Given a relation, and a `order_by` spec, that defines the ordering to be applied to the\n relation, returns a new order_by that is the totally ordered (deterministic) version of the\n input order_by.\n\n Appending primary key sort guarantees determinism, but if that fails, we revert to ordering by\n all columns.\n \"\"\"\n if order_by is None:\n order_by = []\n order_by = _append_primary_key_sort(relation, order_by)\n if not order_by:\n order_by = _build_order_by_all_columns_clause(relation)\n return order_by\n\n\ndef _append_primary_key_sort(relation, order_by):\n \"\"\"\n Given an order by clause, we can guarantee a deterministic ordering\n overall by appending a final ordering by primary key if one exists.\n \"\"\"\n pk_cols = col_utils.get_primary_key_column_collection_from_relation(relation)\n order_by = list(order_by)\n if pk_cols is not None:\n order_by += [\n {'field': col, 'direction': 'asc'}\n for col\n in set(pk_cols).intersection(relation.columns)\n ]\n return order_by\n\n\ndef _build_order_by_all_columns_clause(relation):\n \"\"\"\n To be used when we have failed to find any other ordering criteria,\n since ordering by all columns is inherently inefficient.\n\n Note the filtering out some columns, namely internal columns and non-orderable columns. See\n their docstrings for details.\n \"\"\"\n return [\n {'field': col, 'direction': 'asc'}\n for col\n in relation.columns\n if _is_col_orderable(col) and not _is_internal_column(col)\n ]\n\n\ndef _is_internal_column(col):\n \"\"\"\n Columns that Mathesar adds for its own devices and does not expose to the user. We don't want\n to sort by these.\n\n Might not be exhaustive, take care.\n \"\"\"\n return col.name == '__mathesar_group_metadata'\n\n\ndef _is_col_orderable(col):\n \"\"\"\n Some columns are not orderable (or at least don't have a non-ambiguous way to define order\n without additional logic). We only want to order by orderably columns.\n \"\"\"\n data_type = col.type\n non_orderable_type = ['Binary', 'LargeBinary', 'PickleType', 'ARRAY', 'JSON', 'JSONB']\n return str(data_type) not in non_orderable_type\n\n\ndef apply_relation_sorting(relation, sort_spec):\n order_by_list = [\n _get_sorted_column_obj_from_spec(relation, spec) for spec in sort_spec\n ]\n return select(relation).order_by(*order_by_list)\n\n\ndef _get_sorted_column_obj_from_spec(relation, spec):\n try:\n sort_spec = _deserialize_sort_spec(spec)\n except (KeyError, TypeError, AssertionError):\n raise BadSortFormat\n\n try:\n column = col_utils.get_column_obj_from_relation(relation, sort_spec.field)\n except KeyError as e:\n raise SortFieldNotFound(e)\n except AttributeError:\n raise BadSortFormat\n\n try:\n directed_col = _build_directed_column_expr(column, sort_spec)\n except AttributeError:\n raise BadSortFormat\n\n return directed_col\n\n\ndef _deserialize_sort_spec(spec):\n sort_spec = namedtuple(\n '_sort_spec',\n ['field', 'direction', 'nullsfirst', 'nullslast']\n )(\n field=spec['field'],\n direction=spec['direction'],\n nullsfirst=spec.get('nullsfirst', False),\n nullslast=spec.get('nullslast', False)\n )\n # Since it's not valid to have both nullsfirst and nullslast.\n assert not sort_spec.nullsfirst or not sort_spec.nullslast\n return sort_spec\n\n\ndef _build_directed_column_expr(column, sort_spec):\n directed_col = getattr(column, sort_spec.direction)()\n if sort_spec.nullsfirst:\n directed_col = directed_col.nulls_first()\n elif sort_spec.nullslast:\n directed_col = directed_col.nulls_last()\n return directed_col\n", "path": "db/records/operations/sort.py"}]} | 1,791 | 467 |
gh_patches_debug_25806 | rasdani/github-patches | git_diff | huggingface__dataset-viewer-2058 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Better handling of k8s SIGTERM when stopping workers
We've observed recently that worker pods can take more than 1h to get killed by kubernetes (see internal [slack thread](https://huggingface.slack.com/archives/C04L6P8KNQ5/p1697641144036899) for example).
According to the k8s docs, at one point it sends a TERM and then after a grace period a SIGKILL on all the running processes.
If this keeps happening we need to improve the way we handle those to kill the pods faster.
One idea is to catch SIGTERM in the worker executor (main python process of workers) that would stop the worker loop (its subprocess where the actual code runs).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `services/worker/src/worker/executor.py`
Content:
```
1 # SPDX-License-Identifier: Apache-2.0
2 # Copyright 2022 The HuggingFace Authors.
3 import asyncio
4 import logging
5 import os
6 import sys
7 from collections.abc import Callable
8 from datetime import datetime, timedelta
9 from random import random
10 from typing import Any, Optional, Union
11
12 import orjson
13 from filelock import FileLock
14 from libcommon.processing_graph import ProcessingGraph
15 from libcommon.queue import Queue
16 from libcommon.utils import get_datetime
17 from mirakuru import OutputExecutor, ProcessExitedWithError
18
19 from worker import start_worker_loop
20 from worker.config import AppConfig
21 from worker.job_manager import JobManager
22 from worker.job_runner_factory import JobRunnerFactory
23 from worker.loop import WorkerState
24
25 START_WORKER_LOOP_PATH = start_worker_loop.__file__
26
27
28 async def every(
29 func: Callable[..., Optional[Any]],
30 *args: Any,
31 seconds: Union[float, tuple[float, float]],
32 stop_on: Optional[Any] = None,
33 **kwargs: Any,
34 ) -> None:
35 while True:
36 out = func(*args, **kwargs)
37 if stop_on is not None and out == stop_on:
38 break
39 delay = (
40 seconds[0] + (seconds[1] - seconds[0]) * random() if isinstance(seconds, tuple) else seconds # nosec B311
41 )
42 await asyncio.sleep(delay)
43
44
45 class BadWorkerState(RuntimeError):
46 """Raised when the worker state from the worker read by the executor is not valid."""
47
48 pass
49
50
51 class WorkerExecutor:
52 def __init__(self, app_config: AppConfig, job_runner_factory: JobRunnerFactory, state_file_path: str) -> None:
53 self.app_config = app_config
54 self.job_runner_factory = job_runner_factory
55 self.state_file_path = state_file_path
56 self.processing_graph = ProcessingGraph(self.app_config.processing_graph)
57
58 max_missing_heartbeats = self.app_config.worker.max_missing_heartbeats
59 heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds
60 self.max_seconds_without_heartbeat_for_zombies = heartbeat_interval_seconds * max_missing_heartbeats
61
62 self.heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds
63 self.max_job_duration_seconds = self.app_config.worker.max_job_duration_seconds
64 self.kill_zombies_interval_seconds = self.app_config.worker.kill_zombies_interval_seconds
65 self.kill_long_job_interval_seconds = self.app_config.worker.kill_long_job_interval_seconds
66
67 def _create_worker_loop_executor(self) -> OutputExecutor:
68 banner = self.state_file_path
69 start_worker_loop_command = [
70 sys.executable,
71 START_WORKER_LOOP_PATH,
72 "--print-worker-state-path",
73 ]
74 return OutputExecutor(start_worker_loop_command, banner, timeout=10)
75
76 def start(self) -> None:
77 exceptions = []
78 worker_loop_executor = self._create_worker_loop_executor()
79 worker_loop_executor.start() # blocking until the banner is printed
80
81 def custom_exception_handler(loop: asyncio.AbstractEventLoop, context: dict[str, Any]) -> None:
82 nonlocal exceptions
83 # first, handle with default handler
84 loop.default_exception_handler(context)
85
86 exception = context.get("exception")
87 if exception:
88 exceptions.append(repr(exception))
89 loop.stop()
90
91 loop = asyncio.get_event_loop()
92 loop.set_exception_handler(custom_exception_handler)
93 logging.info("Starting heartbeat.")
94 loop.create_task(every(self.heartbeat, seconds=self.heartbeat_interval_seconds))
95 loop.create_task(
96 every(
97 self.kill_zombies,
98 seconds=(
99 self.kill_zombies_interval_seconds * 0.5,
100 self.kill_zombies_interval_seconds * 1.5,
101 ),
102 )
103 )
104 loop.create_task(
105 every(
106 self.kill_long_job,
107 worker_loop_executor=worker_loop_executor,
108 seconds=(
109 self.kill_long_job_interval_seconds * 0.5,
110 self.kill_long_job_interval_seconds * 1.5,
111 ),
112 )
113 )
114 loop.run_until_complete(
115 every(self.is_worker_alive, worker_loop_executor=worker_loop_executor, seconds=1.0, stop_on=False)
116 )
117 if exceptions:
118 raise RuntimeError(f"Some async tasks failed: {exceptions}")
119
120 def get_state(self) -> Optional[WorkerState]:
121 worker_state_file_path = self.state_file_path
122 if not os.path.exists(worker_state_file_path):
123 return None
124 with FileLock(f"{worker_state_file_path}.lock"):
125 try:
126 with open(worker_state_file_path, "rb") as worker_state_f:
127 worker_state = orjson.loads(worker_state_f.read())
128 return WorkerState(
129 current_job_info=worker_state.get("current_job_info"),
130 last_updated=datetime.fromisoformat(worker_state["last_updated"]),
131 )
132 except (orjson.JSONDecodeError, KeyError) as err:
133 raise BadWorkerState(f"Failed to read worker state at {worker_state_file_path}") from err
134
135 def heartbeat(self) -> None:
136 worker_state = self.get_state()
137 if worker_state and worker_state["current_job_info"]:
138 Queue().heartbeat(job_id=worker_state["current_job_info"]["job_id"])
139
140 def kill_zombies(self) -> None:
141 queue = Queue()
142 zombies = queue.get_zombies(max_seconds_without_heartbeat=self.max_seconds_without_heartbeat_for_zombies)
143 message = "Job manager crashed while running this job (missing heartbeats)."
144 for zombie in zombies:
145 job_runner = self.job_runner_factory.create_job_runner(zombie)
146 job_manager = JobManager(
147 job_info=zombie,
148 app_config=self.app_config,
149 job_runner=job_runner,
150 processing_graph=self.processing_graph,
151 )
152 job_manager.set_crashed(message=message)
153 logging.info(f"Killing zombie. Job info = {zombie}")
154
155 def kill_long_job(self, worker_loop_executor: OutputExecutor) -> None:
156 worker_state = self.get_state()
157 if worker_state and worker_state["current_job_info"]:
158 long_job = worker_state["current_job_info"]
159 last_updated = worker_state["last_updated"]
160 coefficient = 10 if long_job["params"]["dataset"] == "cerebras/SlimPajama-627B" else 1
161 if last_updated + timedelta(seconds=coefficient * self.max_job_duration_seconds) <= get_datetime():
162 _duration_seconds = int((get_datetime() - last_updated).total_seconds())
163 logging.warning(
164 f"Job {long_job} exceeded maximum duration of"
165 f" {self.max_job_duration_seconds} seconds ({_duration_seconds} seconds)."
166 )
167 try:
168 worker_loop_executor.stop() # raises an error if the worker returned exit code 1
169 finally:
170 logging.info(f"Killing a long job. Job info = {long_job}")
171 job_runner = self.job_runner_factory.create_job_runner(long_job)
172 job_manager = JobManager(
173 job_info=long_job,
174 app_config=self.app_config,
175 job_runner=job_runner,
176 processing_graph=self.processing_graph,
177 )
178 message = "Job manager was killed while running this job (job exceeded maximum duration)."
179 job_manager.set_exceeded_maximum_duration(message=message)
180
181 def is_worker_alive(self, worker_loop_executor: OutputExecutor) -> bool:
182 if worker_loop_executor.running():
183 return True
184 try:
185 worker_loop_executor.stop() # raises an error if the worker returned unexpected exit code
186 except ProcessExitedWithError as err:
187 explanation = f"exit code f{err.exit_code}"
188 if err.exit_code == -9:
189 explanation += " SIGKILL - surely an OOM"
190 error_msg = f"Worker crashed ({explanation})"
191 state = self.get_state()
192 if state and state["current_job_info"]:
193 error_msg += f"when running job_id={state['current_job_info']['job_id']}"
194 logging.error(error_msg)
195 raise
196 return False
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py
--- a/services/worker/src/worker/executor.py
+++ b/services/worker/src/worker/executor.py
@@ -3,6 +3,7 @@
import asyncio
import logging
import os
+import signal
import sys
from collections.abc import Callable
from datetime import datetime, timedelta
@@ -89,6 +90,7 @@
loop.stop()
loop = asyncio.get_event_loop()
+ loop.add_signal_handler(signal.SIGTERM, self.stop, worker_loop_executor)
loop.set_exception_handler(custom_exception_handler)
logging.info("Starting heartbeat.")
loop.create_task(every(self.heartbeat, seconds=self.heartbeat_interval_seconds))
@@ -117,6 +119,9 @@
if exceptions:
raise RuntimeError(f"Some async tasks failed: {exceptions}")
+ def stop(self, worker_loop_executor: OutputExecutor) -> None:
+ worker_loop_executor.stop()
+
def get_state(self) -> Optional[WorkerState]:
worker_state_file_path = self.state_file_path
if not os.path.exists(worker_state_file_path):
| {"golden_diff": "diff --git a/services/worker/src/worker/executor.py b/services/worker/src/worker/executor.py\n--- a/services/worker/src/worker/executor.py\n+++ b/services/worker/src/worker/executor.py\n@@ -3,6 +3,7 @@\n import asyncio\n import logging\n import os\n+import signal\n import sys\n from collections.abc import Callable\n from datetime import datetime, timedelta\n@@ -89,6 +90,7 @@\n loop.stop()\n \n loop = asyncio.get_event_loop()\n+ loop.add_signal_handler(signal.SIGTERM, self.stop, worker_loop_executor)\n loop.set_exception_handler(custom_exception_handler)\n logging.info(\"Starting heartbeat.\")\n loop.create_task(every(self.heartbeat, seconds=self.heartbeat_interval_seconds))\n@@ -117,6 +119,9 @@\n if exceptions:\n raise RuntimeError(f\"Some async tasks failed: {exceptions}\")\n \n+ def stop(self, worker_loop_executor: OutputExecutor) -> None:\n+ worker_loop_executor.stop()\n+\n def get_state(self) -> Optional[WorkerState]:\n worker_state_file_path = self.state_file_path\n if not os.path.exists(worker_state_file_path):\n", "issue": "Better handling of k8s SIGTERM when stopping workers\nWe've observed recently that worker pods can take more than 1h to get killed by kubernetes (see internal [slack thread](https://huggingface.slack.com/archives/C04L6P8KNQ5/p1697641144036899) for example).\r\nAccording to the k8s docs, at one point it sends a TERM and then after a grace period a SIGKILL on all the running processes.\r\nIf this keeps happening we need to improve the way we handle those to kill the pods faster.\r\n\r\nOne idea is to catch SIGTERM in the worker executor (main python process of workers) that would stop the worker loop (its subprocess where the actual code runs).\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\nimport asyncio\nimport logging\nimport os\nimport sys\nfrom collections.abc import Callable\nfrom datetime import datetime, timedelta\nfrom random import random\nfrom typing import Any, Optional, Union\n\nimport orjson\nfrom filelock import FileLock\nfrom libcommon.processing_graph import ProcessingGraph\nfrom libcommon.queue import Queue\nfrom libcommon.utils import get_datetime\nfrom mirakuru import OutputExecutor, ProcessExitedWithError\n\nfrom worker import start_worker_loop\nfrom worker.config import AppConfig\nfrom worker.job_manager import JobManager\nfrom worker.job_runner_factory import JobRunnerFactory\nfrom worker.loop import WorkerState\n\nSTART_WORKER_LOOP_PATH = start_worker_loop.__file__\n\n\nasync def every(\n func: Callable[..., Optional[Any]],\n *args: Any,\n seconds: Union[float, tuple[float, float]],\n stop_on: Optional[Any] = None,\n **kwargs: Any,\n) -> None:\n while True:\n out = func(*args, **kwargs)\n if stop_on is not None and out == stop_on:\n break\n delay = (\n seconds[0] + (seconds[1] - seconds[0]) * random() if isinstance(seconds, tuple) else seconds # nosec B311\n )\n await asyncio.sleep(delay)\n\n\nclass BadWorkerState(RuntimeError):\n \"\"\"Raised when the worker state from the worker read by the executor is not valid.\"\"\"\n\n pass\n\n\nclass WorkerExecutor:\n def __init__(self, app_config: AppConfig, job_runner_factory: JobRunnerFactory, state_file_path: str) -> None:\n self.app_config = app_config\n self.job_runner_factory = job_runner_factory\n self.state_file_path = state_file_path\n self.processing_graph = ProcessingGraph(self.app_config.processing_graph)\n\n max_missing_heartbeats = self.app_config.worker.max_missing_heartbeats\n heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds\n self.max_seconds_without_heartbeat_for_zombies = heartbeat_interval_seconds * max_missing_heartbeats\n\n self.heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds\n self.max_job_duration_seconds = self.app_config.worker.max_job_duration_seconds\n self.kill_zombies_interval_seconds = self.app_config.worker.kill_zombies_interval_seconds\n self.kill_long_job_interval_seconds = self.app_config.worker.kill_long_job_interval_seconds\n\n def _create_worker_loop_executor(self) -> OutputExecutor:\n banner = self.state_file_path\n start_worker_loop_command = [\n sys.executable,\n START_WORKER_LOOP_PATH,\n \"--print-worker-state-path\",\n ]\n return OutputExecutor(start_worker_loop_command, banner, timeout=10)\n\n def start(self) -> None:\n exceptions = []\n worker_loop_executor = self._create_worker_loop_executor()\n worker_loop_executor.start() # blocking until the banner is printed\n\n def custom_exception_handler(loop: asyncio.AbstractEventLoop, context: dict[str, Any]) -> None:\n nonlocal exceptions\n # first, handle with default handler\n loop.default_exception_handler(context)\n\n exception = context.get(\"exception\")\n if exception:\n exceptions.append(repr(exception))\n loop.stop()\n\n loop = asyncio.get_event_loop()\n loop.set_exception_handler(custom_exception_handler)\n logging.info(\"Starting heartbeat.\")\n loop.create_task(every(self.heartbeat, seconds=self.heartbeat_interval_seconds))\n loop.create_task(\n every(\n self.kill_zombies,\n seconds=(\n self.kill_zombies_interval_seconds * 0.5,\n self.kill_zombies_interval_seconds * 1.5,\n ),\n )\n )\n loop.create_task(\n every(\n self.kill_long_job,\n worker_loop_executor=worker_loop_executor,\n seconds=(\n self.kill_long_job_interval_seconds * 0.5,\n self.kill_long_job_interval_seconds * 1.5,\n ),\n )\n )\n loop.run_until_complete(\n every(self.is_worker_alive, worker_loop_executor=worker_loop_executor, seconds=1.0, stop_on=False)\n )\n if exceptions:\n raise RuntimeError(f\"Some async tasks failed: {exceptions}\")\n\n def get_state(self) -> Optional[WorkerState]:\n worker_state_file_path = self.state_file_path\n if not os.path.exists(worker_state_file_path):\n return None\n with FileLock(f\"{worker_state_file_path}.lock\"):\n try:\n with open(worker_state_file_path, \"rb\") as worker_state_f:\n worker_state = orjson.loads(worker_state_f.read())\n return WorkerState(\n current_job_info=worker_state.get(\"current_job_info\"),\n last_updated=datetime.fromisoformat(worker_state[\"last_updated\"]),\n )\n except (orjson.JSONDecodeError, KeyError) as err:\n raise BadWorkerState(f\"Failed to read worker state at {worker_state_file_path}\") from err\n\n def heartbeat(self) -> None:\n worker_state = self.get_state()\n if worker_state and worker_state[\"current_job_info\"]:\n Queue().heartbeat(job_id=worker_state[\"current_job_info\"][\"job_id\"])\n\n def kill_zombies(self) -> None:\n queue = Queue()\n zombies = queue.get_zombies(max_seconds_without_heartbeat=self.max_seconds_without_heartbeat_for_zombies)\n message = \"Job manager crashed while running this job (missing heartbeats).\"\n for zombie in zombies:\n job_runner = self.job_runner_factory.create_job_runner(zombie)\n job_manager = JobManager(\n job_info=zombie,\n app_config=self.app_config,\n job_runner=job_runner,\n processing_graph=self.processing_graph,\n )\n job_manager.set_crashed(message=message)\n logging.info(f\"Killing zombie. Job info = {zombie}\")\n\n def kill_long_job(self, worker_loop_executor: OutputExecutor) -> None:\n worker_state = self.get_state()\n if worker_state and worker_state[\"current_job_info\"]:\n long_job = worker_state[\"current_job_info\"]\n last_updated = worker_state[\"last_updated\"]\n coefficient = 10 if long_job[\"params\"][\"dataset\"] == \"cerebras/SlimPajama-627B\" else 1\n if last_updated + timedelta(seconds=coefficient * self.max_job_duration_seconds) <= get_datetime():\n _duration_seconds = int((get_datetime() - last_updated).total_seconds())\n logging.warning(\n f\"Job {long_job} exceeded maximum duration of\"\n f\" {self.max_job_duration_seconds} seconds ({_duration_seconds} seconds).\"\n )\n try:\n worker_loop_executor.stop() # raises an error if the worker returned exit code 1\n finally:\n logging.info(f\"Killing a long job. Job info = {long_job}\")\n job_runner = self.job_runner_factory.create_job_runner(long_job)\n job_manager = JobManager(\n job_info=long_job,\n app_config=self.app_config,\n job_runner=job_runner,\n processing_graph=self.processing_graph,\n )\n message = \"Job manager was killed while running this job (job exceeded maximum duration).\"\n job_manager.set_exceeded_maximum_duration(message=message)\n\n def is_worker_alive(self, worker_loop_executor: OutputExecutor) -> bool:\n if worker_loop_executor.running():\n return True\n try:\n worker_loop_executor.stop() # raises an error if the worker returned unexpected exit code\n except ProcessExitedWithError as err:\n explanation = f\"exit code f{err.exit_code}\"\n if err.exit_code == -9:\n explanation += \" SIGKILL - surely an OOM\"\n error_msg = f\"Worker crashed ({explanation})\"\n state = self.get_state()\n if state and state[\"current_job_info\"]:\n error_msg += f\"when running job_id={state['current_job_info']['job_id']}\"\n logging.error(error_msg)\n raise\n return False\n", "path": "services/worker/src/worker/executor.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\nimport asyncio\nimport logging\nimport os\nimport signal\nimport sys\nfrom collections.abc import Callable\nfrom datetime import datetime, timedelta\nfrom random import random\nfrom typing import Any, Optional, Union\n\nimport orjson\nfrom filelock import FileLock\nfrom libcommon.processing_graph import ProcessingGraph\nfrom libcommon.queue import Queue\nfrom libcommon.utils import get_datetime\nfrom mirakuru import OutputExecutor, ProcessExitedWithError\n\nfrom worker import start_worker_loop\nfrom worker.config import AppConfig\nfrom worker.job_manager import JobManager\nfrom worker.job_runner_factory import JobRunnerFactory\nfrom worker.loop import WorkerState\n\nSTART_WORKER_LOOP_PATH = start_worker_loop.__file__\n\n\nasync def every(\n func: Callable[..., Optional[Any]],\n *args: Any,\n seconds: Union[float, tuple[float, float]],\n stop_on: Optional[Any] = None,\n **kwargs: Any,\n) -> None:\n while True:\n out = func(*args, **kwargs)\n if stop_on is not None and out == stop_on:\n break\n delay = (\n seconds[0] + (seconds[1] - seconds[0]) * random() if isinstance(seconds, tuple) else seconds # nosec B311\n )\n await asyncio.sleep(delay)\n\n\nclass BadWorkerState(RuntimeError):\n \"\"\"Raised when the worker state from the worker read by the executor is not valid.\"\"\"\n\n pass\n\n\nclass WorkerExecutor:\n def __init__(self, app_config: AppConfig, job_runner_factory: JobRunnerFactory, state_file_path: str) -> None:\n self.app_config = app_config\n self.job_runner_factory = job_runner_factory\n self.state_file_path = state_file_path\n self.processing_graph = ProcessingGraph(self.app_config.processing_graph)\n\n max_missing_heartbeats = self.app_config.worker.max_missing_heartbeats\n heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds\n self.max_seconds_without_heartbeat_for_zombies = heartbeat_interval_seconds * max_missing_heartbeats\n\n self.heartbeat_interval_seconds = self.app_config.worker.heartbeat_interval_seconds\n self.max_job_duration_seconds = self.app_config.worker.max_job_duration_seconds\n self.kill_zombies_interval_seconds = self.app_config.worker.kill_zombies_interval_seconds\n self.kill_long_job_interval_seconds = self.app_config.worker.kill_long_job_interval_seconds\n\n def _create_worker_loop_executor(self) -> OutputExecutor:\n banner = self.state_file_path\n start_worker_loop_command = [\n sys.executable,\n START_WORKER_LOOP_PATH,\n \"--print-worker-state-path\",\n ]\n return OutputExecutor(start_worker_loop_command, banner, timeout=10)\n\n def start(self) -> None:\n exceptions = []\n worker_loop_executor = self._create_worker_loop_executor()\n worker_loop_executor.start() # blocking until the banner is printed\n\n def custom_exception_handler(loop: asyncio.AbstractEventLoop, context: dict[str, Any]) -> None:\n nonlocal exceptions\n # first, handle with default handler\n loop.default_exception_handler(context)\n\n exception = context.get(\"exception\")\n if exception:\n exceptions.append(repr(exception))\n loop.stop()\n\n loop = asyncio.get_event_loop()\n loop.add_signal_handler(signal.SIGTERM, self.stop, worker_loop_executor)\n loop.set_exception_handler(custom_exception_handler)\n logging.info(\"Starting heartbeat.\")\n loop.create_task(every(self.heartbeat, seconds=self.heartbeat_interval_seconds))\n loop.create_task(\n every(\n self.kill_zombies,\n seconds=(\n self.kill_zombies_interval_seconds * 0.5,\n self.kill_zombies_interval_seconds * 1.5,\n ),\n )\n )\n loop.create_task(\n every(\n self.kill_long_job,\n worker_loop_executor=worker_loop_executor,\n seconds=(\n self.kill_long_job_interval_seconds * 0.5,\n self.kill_long_job_interval_seconds * 1.5,\n ),\n )\n )\n loop.run_until_complete(\n every(self.is_worker_alive, worker_loop_executor=worker_loop_executor, seconds=1.0, stop_on=False)\n )\n if exceptions:\n raise RuntimeError(f\"Some async tasks failed: {exceptions}\")\n\n def stop(self, worker_loop_executor: OutputExecutor) -> None:\n worker_loop_executor.stop()\n\n def get_state(self) -> Optional[WorkerState]:\n worker_state_file_path = self.state_file_path\n if not os.path.exists(worker_state_file_path):\n return None\n with FileLock(f\"{worker_state_file_path}.lock\"):\n try:\n with open(worker_state_file_path, \"rb\") as worker_state_f:\n worker_state = orjson.loads(worker_state_f.read())\n return WorkerState(\n current_job_info=worker_state.get(\"current_job_info\"),\n last_updated=datetime.fromisoformat(worker_state[\"last_updated\"]),\n )\n except (orjson.JSONDecodeError, KeyError) as err:\n raise BadWorkerState(f\"Failed to read worker state at {worker_state_file_path}\") from err\n\n def heartbeat(self) -> None:\n worker_state = self.get_state()\n if worker_state and worker_state[\"current_job_info\"]:\n Queue().heartbeat(job_id=worker_state[\"current_job_info\"][\"job_id\"])\n\n def kill_zombies(self) -> None:\n queue = Queue()\n zombies = queue.get_zombies(max_seconds_without_heartbeat=self.max_seconds_without_heartbeat_for_zombies)\n message = \"Job manager crashed while running this job (missing heartbeats).\"\n for zombie in zombies:\n job_runner = self.job_runner_factory.create_job_runner(zombie)\n job_manager = JobManager(\n job_info=zombie,\n app_config=self.app_config,\n job_runner=job_runner,\n processing_graph=self.processing_graph,\n )\n job_manager.set_crashed(message=message)\n logging.info(f\"Killing zombie. Job info = {zombie}\")\n\n def kill_long_job(self, worker_loop_executor: OutputExecutor) -> None:\n worker_state = self.get_state()\n if worker_state and worker_state[\"current_job_info\"]:\n long_job = worker_state[\"current_job_info\"]\n last_updated = worker_state[\"last_updated\"]\n coefficient = 10 if long_job[\"params\"][\"dataset\"] == \"cerebras/SlimPajama-627B\" else 1\n if last_updated + timedelta(seconds=coefficient * self.max_job_duration_seconds) <= get_datetime():\n _duration_seconds = int((get_datetime() - last_updated).total_seconds())\n logging.warning(\n f\"Job {long_job} exceeded maximum duration of\"\n f\" {self.max_job_duration_seconds} seconds ({_duration_seconds} seconds).\"\n )\n try:\n worker_loop_executor.stop() # raises an error if the worker returned exit code 1\n finally:\n logging.info(f\"Killing a long job. Job info = {long_job}\")\n job_runner = self.job_runner_factory.create_job_runner(long_job)\n job_manager = JobManager(\n job_info=long_job,\n app_config=self.app_config,\n job_runner=job_runner,\n processing_graph=self.processing_graph,\n )\n message = \"Job manager was killed while running this job (job exceeded maximum duration).\"\n job_manager.set_exceeded_maximum_duration(message=message)\n\n def is_worker_alive(self, worker_loop_executor: OutputExecutor) -> bool:\n if worker_loop_executor.running():\n return True\n try:\n worker_loop_executor.stop() # raises an error if the worker returned unexpected exit code\n except ProcessExitedWithError as err:\n explanation = f\"exit code f{err.exit_code}\"\n if err.exit_code == -9:\n explanation += \" SIGKILL - surely an OOM\"\n error_msg = f\"Worker crashed ({explanation})\"\n state = self.get_state()\n if state and state[\"current_job_info\"]:\n error_msg += f\"when running job_id={state['current_job_info']['job_id']}\"\n logging.error(error_msg)\n raise\n return False\n", "path": "services/worker/src/worker/executor.py"}]} | 2,597 | 252 |
gh_patches_debug_3357 | rasdani/github-patches | git_diff | nipy__nipype-1574 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Build documentation fails if dipy is not installed
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nipype/interfaces/dipy/base.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """ Base interfaces for dipy """
3 import os.path as op
4 import numpy as np
5 from nipype.interfaces.base import (traits, File, isdefined,
6 BaseInterface, BaseInterfaceInputSpec)
7 from ... import logging
8
9 IFLOGGER = logging.getLogger('interface')
10
11 HAVE_DIPY = True
12 try:
13 import dipy
14 except ImportError:
15 HAVE_DIPY = False
16
17
18 def no_dipy():
19 """ Check if dipy is available """
20 global HAVE_DIPY
21 return not HAVE_DIPY
22
23
24 def dipy_version():
25 """ Check dipy version """
26 if no_dipy():
27 return None
28
29 return dipy.__version__
30
31
32 class DipyBaseInterface(BaseInterface):
33
34 """
35 A base interface for py:mod:`dipy` computations
36 """
37 def __init__(self, **inputs):
38 if no_dipy():
39 IFLOGGER.error('dipy was not found')
40 # raise ImportError('dipy was not found')
41 super(DipyBaseInterface, self).__init__(**inputs)
42
43
44 class DipyBaseInterfaceInputSpec(BaseInterfaceInputSpec):
45 in_file = File(exists=True, mandatory=True, desc=('input diffusion data'))
46 in_bval = File(exists=True, mandatory=True, desc=('input b-values table'))
47 in_bvec = File(exists=True, mandatory=True, desc=('input b-vectors table'))
48 b0_thres = traits.Int(700, usedefault=True, desc=('b0 threshold'))
49 out_prefix = traits.Str(desc=('output prefix for file names'))
50
51
52 class DipyDiffusionInterface(DipyBaseInterface):
53
54 """
55 A base interface for py:mod:`dipy` computations
56 """
57 input_spec = DipyBaseInterfaceInputSpec
58
59 def _get_gradient_table(self):
60 bval = np.loadtxt(self.inputs.in_bval)
61 bvec = np.loadtxt(self.inputs.in_bvec).T
62 from dipy.core.gradients import gradient_table
63 gtab = gradient_table(bval, bvec)
64
65 gtab.b0_threshold = self.inputs.b0_thres
66 return gtab
67
68 def _gen_filename(self, name, ext=None):
69 fname, fext = op.splitext(op.basename(self.inputs.in_file))
70 if fext == '.gz':
71 fname, fext2 = op.splitext(fname)
72 fext = fext2 + fext
73
74 if not isdefined(self.inputs.out_prefix):
75 out_prefix = op.abspath(fname)
76 else:
77 out_prefix = self.inputs.out_prefix
78
79 if ext is None:
80 ext = fext
81
82 return out_prefix + '_' + name + ext
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py
--- a/nipype/interfaces/dipy/base.py
+++ b/nipype/interfaces/dipy/base.py
@@ -36,7 +36,7 @@
"""
def __init__(self, **inputs):
if no_dipy():
- IFLOGGER.error('dipy was not found')
+ IFLOGGER.warn('dipy was not found')
# raise ImportError('dipy was not found')
super(DipyBaseInterface, self).__init__(**inputs)
| {"golden_diff": "diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py\n--- a/nipype/interfaces/dipy/base.py\n+++ b/nipype/interfaces/dipy/base.py\n@@ -36,7 +36,7 @@\n \"\"\"\n def __init__(self, **inputs):\n if no_dipy():\n- IFLOGGER.error('dipy was not found')\n+ IFLOGGER.warn('dipy was not found')\n # raise ImportError('dipy was not found')\n super(DipyBaseInterface, self).__init__(**inputs)\n", "issue": "[BUG] Build documentation fails if dipy is not installed\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\" Base interfaces for dipy \"\"\"\nimport os.path as op\nimport numpy as np\nfrom nipype.interfaces.base import (traits, File, isdefined,\n BaseInterface, BaseInterfaceInputSpec)\nfrom ... import logging\n\nIFLOGGER = logging.getLogger('interface')\n\nHAVE_DIPY = True\ntry:\n import dipy\nexcept ImportError:\n HAVE_DIPY = False\n\n\ndef no_dipy():\n \"\"\" Check if dipy is available \"\"\"\n global HAVE_DIPY\n return not HAVE_DIPY\n\n\ndef dipy_version():\n \"\"\" Check dipy version \"\"\"\n if no_dipy():\n return None\n\n return dipy.__version__\n\n\nclass DipyBaseInterface(BaseInterface):\n\n \"\"\"\n A base interface for py:mod:`dipy` computations\n \"\"\"\n def __init__(self, **inputs):\n if no_dipy():\n IFLOGGER.error('dipy was not found')\n # raise ImportError('dipy was not found')\n super(DipyBaseInterface, self).__init__(**inputs)\n\n\nclass DipyBaseInterfaceInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=('input diffusion data'))\n in_bval = File(exists=True, mandatory=True, desc=('input b-values table'))\n in_bvec = File(exists=True, mandatory=True, desc=('input b-vectors table'))\n b0_thres = traits.Int(700, usedefault=True, desc=('b0 threshold'))\n out_prefix = traits.Str(desc=('output prefix for file names'))\n\n\nclass DipyDiffusionInterface(DipyBaseInterface):\n\n \"\"\"\n A base interface for py:mod:`dipy` computations\n \"\"\"\n input_spec = DipyBaseInterfaceInputSpec\n\n def _get_gradient_table(self):\n bval = np.loadtxt(self.inputs.in_bval)\n bvec = np.loadtxt(self.inputs.in_bvec).T\n from dipy.core.gradients import gradient_table\n gtab = gradient_table(bval, bvec)\n\n gtab.b0_threshold = self.inputs.b0_thres\n return gtab\n\n def _gen_filename(self, name, ext=None):\n fname, fext = op.splitext(op.basename(self.inputs.in_file))\n if fext == '.gz':\n fname, fext2 = op.splitext(fname)\n fext = fext2 + fext\n\n if not isdefined(self.inputs.out_prefix):\n out_prefix = op.abspath(fname)\n else:\n out_prefix = self.inputs.out_prefix\n\n if ext is None:\n ext = fext\n\n return out_prefix + '_' + name + ext\n", "path": "nipype/interfaces/dipy/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\" Base interfaces for dipy \"\"\"\nimport os.path as op\nimport numpy as np\nfrom nipype.interfaces.base import (traits, File, isdefined,\n BaseInterface, BaseInterfaceInputSpec)\nfrom ... import logging\n\nIFLOGGER = logging.getLogger('interface')\n\nHAVE_DIPY = True\ntry:\n import dipy\nexcept ImportError:\n HAVE_DIPY = False\n\n\ndef no_dipy():\n \"\"\" Check if dipy is available \"\"\"\n global HAVE_DIPY\n return not HAVE_DIPY\n\n\ndef dipy_version():\n \"\"\" Check dipy version \"\"\"\n if no_dipy():\n return None\n\n return dipy.__version__\n\n\nclass DipyBaseInterface(BaseInterface):\n\n \"\"\"\n A base interface for py:mod:`dipy` computations\n \"\"\"\n def __init__(self, **inputs):\n if no_dipy():\n IFLOGGER.warn('dipy was not found')\n # raise ImportError('dipy was not found')\n super(DipyBaseInterface, self).__init__(**inputs)\n\n\nclass DipyBaseInterfaceInputSpec(BaseInterfaceInputSpec):\n in_file = File(exists=True, mandatory=True, desc=('input diffusion data'))\n in_bval = File(exists=True, mandatory=True, desc=('input b-values table'))\n in_bvec = File(exists=True, mandatory=True, desc=('input b-vectors table'))\n b0_thres = traits.Int(700, usedefault=True, desc=('b0 threshold'))\n out_prefix = traits.Str(desc=('output prefix for file names'))\n\n\nclass DipyDiffusionInterface(DipyBaseInterface):\n\n \"\"\"\n A base interface for py:mod:`dipy` computations\n \"\"\"\n input_spec = DipyBaseInterfaceInputSpec\n\n def _get_gradient_table(self):\n bval = np.loadtxt(self.inputs.in_bval)\n bvec = np.loadtxt(self.inputs.in_bvec).T\n from dipy.core.gradients import gradient_table\n gtab = gradient_table(bval, bvec)\n\n gtab.b0_threshold = self.inputs.b0_thres\n return gtab\n\n def _gen_filename(self, name, ext=None):\n fname, fext = op.splitext(op.basename(self.inputs.in_file))\n if fext == '.gz':\n fname, fext2 = op.splitext(fname)\n fext = fext2 + fext\n\n if not isdefined(self.inputs.out_prefix):\n out_prefix = op.abspath(fname)\n else:\n out_prefix = self.inputs.out_prefix\n\n if ext is None:\n ext = fext\n\n return out_prefix + '_' + name + ext\n", "path": "nipype/interfaces/dipy/base.py"}]} | 1,005 | 123 |
gh_patches_debug_60523 | rasdani/github-patches | git_diff | streamlit__streamlit-2811 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sliders should show current value [regression] [Baseweb]
# Summary
(via @tvst: )
Our sliders right now require you to hover in order to see the selected value. This makes it really hard to understand what the user selected. I reported this before, but I just spent some time debugging my app thinking it was broken because I was reading the wrong slider value. Frustrating.
I understand this is the new behavior of sliders in Base Web, but we have alternatives:
**1. Roll back Base Web to a previous version**
This is the preferable solution in order to get this fix out ASAP. Even if we decide it's only a temporary solution.
2. Try to find a solution using the latest Base Web
3. Copy/paste the old Baseweb slider into our own repo and modify it there. Their slider is based on another library, btw (I forget which), so maybe we should just use that library directly instead?
## Is this a regression?
yes
# Debug info
- Streamlit version: 0.75-special
Allow hiding tracebacks
Currently, when a Streamlit app throws an exception, we print the traceback to the browser. This isn't necessarily the right thing to do for all apps; we should allow this to be configurable.
Maybe a `[client] showTracebacks = false` option? And presumably, if tracebacks are disabled, we should filter them at the server level, so that the client never even receives the string, in case the user is worried about leaking internal app details.
(Related discussion here: https://discuss.streamlit.io/t/dont-show-users-tracebacks/1746)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `e2e/scripts/st_columns.py`
Content:
```
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 CAT_IMAGE = "https://images.unsplash.com/photo-1552933529-e359b2477252?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=950&q=80"
18
19 if st.button("Layout should not shift when this is pressed"):
20 st.write("Pressed!")
21
22 # Same-width columns
23 c1, c2, c3 = st.beta_columns(3)
24 c1.image(CAT_IMAGE, use_column_width=True)
25 c2.image(CAT_IMAGE, use_column_width=True)
26 c3.image(CAT_IMAGE, use_column_width=True)
27
28
29 # Variable-width columns
30 for c in st.beta_columns((1, 2, 4, 8)):
31 c.image(CAT_IMAGE, use_column_width=True)
32
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/e2e/scripts/st_columns.py b/e2e/scripts/st_columns.py
--- a/e2e/scripts/st_columns.py
+++ b/e2e/scripts/st_columns.py
@@ -27,5 +27,5 @@
# Variable-width columns
-for c in st.beta_columns((1, 2, 4, 8)):
+for c in st.beta_columns((1, 2, 3, 4)):
c.image(CAT_IMAGE, use_column_width=True)
| {"golden_diff": "diff --git a/e2e/scripts/st_columns.py b/e2e/scripts/st_columns.py\n--- a/e2e/scripts/st_columns.py\n+++ b/e2e/scripts/st_columns.py\n@@ -27,5 +27,5 @@\n \n \n # Variable-width columns\n-for c in st.beta_columns((1, 2, 4, 8)):\n+for c in st.beta_columns((1, 2, 3, 4)):\n c.image(CAT_IMAGE, use_column_width=True)\n", "issue": "Sliders should show current value [regression] [Baseweb]\n# Summary\r\n\r\n(via @tvst: )\r\n\r\nOur sliders right now require you to hover in order to see the selected value. This makes it really hard to understand what the user selected. I reported this before, but I just spent some time debugging my app thinking it was broken because I was reading the wrong slider value. Frustrating.\r\n\r\nI understand this is the new behavior of sliders in Base Web, but we have alternatives:\r\n\r\n**1. Roll back Base Web to a previous version**\r\n\r\n This is the preferable solution in order to get this fix out ASAP. Even if we decide it's only a temporary solution.\r\n\r\n2. Try to find a solution using the latest Base Web\r\n\r\n3. Copy/paste the old Baseweb slider into our own repo and modify it there. Their slider is based on another library, btw (I forget which), so maybe we should just use that library directly instead?\r\n\r\n\r\n## Is this a regression?\r\n\r\nyes \r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.75-special\nAllow hiding tracebacks\nCurrently, when a Streamlit app throws an exception, we print the traceback to the browser. This isn't necessarily the right thing to do for all apps; we should allow this to be configurable.\r\n\r\nMaybe a `[client] showTracebacks = false` option? And presumably, if tracebacks are disabled, we should filter them at the server level, so that the client never even receives the string, in case the user is worried about leaking internal app details.\r\n\r\n(Related discussion here: https://discuss.streamlit.io/t/dont-show-users-tracebacks/1746)\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nCAT_IMAGE = \"https://images.unsplash.com/photo-1552933529-e359b2477252?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=950&q=80\"\n\nif st.button(\"Layout should not shift when this is pressed\"):\n st.write(\"Pressed!\")\n\n# Same-width columns\nc1, c2, c3 = st.beta_columns(3)\nc1.image(CAT_IMAGE, use_column_width=True)\nc2.image(CAT_IMAGE, use_column_width=True)\nc3.image(CAT_IMAGE, use_column_width=True)\n\n\n# Variable-width columns\nfor c in st.beta_columns((1, 2, 4, 8)):\n c.image(CAT_IMAGE, use_column_width=True)\n", "path": "e2e/scripts/st_columns.py"}], "after_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nCAT_IMAGE = \"https://images.unsplash.com/photo-1552933529-e359b2477252?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=950&q=80\"\n\nif st.button(\"Layout should not shift when this is pressed\"):\n st.write(\"Pressed!\")\n\n# Same-width columns\nc1, c2, c3 = st.beta_columns(3)\nc1.image(CAT_IMAGE, use_column_width=True)\nc2.image(CAT_IMAGE, use_column_width=True)\nc3.image(CAT_IMAGE, use_column_width=True)\n\n\n# Variable-width columns\nfor c in st.beta_columns((1, 2, 3, 4)):\n c.image(CAT_IMAGE, use_column_width=True)\n", "path": "e2e/scripts/st_columns.py"}]} | 1,005 | 107 |
gh_patches_debug_18992 | rasdani/github-patches | git_diff | pytorch__vision-6397 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistency between RoIPool's forward and roi_pool
### 🚀 The feature
Currently the functional `roi_pool` method supports both `Tensor` and `List[Tensor]` for boxes, but the module forward function of `RoIPool` expects boxes(rois) to be `Tensor` only. Can we change the function signature to support both?
The inconsistency can cause issue during torchscripting, for example:
```
from typing import List
import torch
from torch import Tensor
from torchvision.ops import RoIPool
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.pool = RoIPool(output_size=[3,3], spatial_scale=1.0)
def forward(self, imgs, boxes: List[Tensor]):
return self.pool(imgs, boxes)
model = Model()
torch.jit.script(model)
```
Raise the following error:
> forward(__torch__.torchvision.ops.roi_pool.RoIPool self, Tensor input, Tensor rois) -> Tensor:
> Expected a value of type 'Tensor' for argument 'rois' but instead found type 'List[Tensor]'.
> Empty lists default to List[Tensor]. Add a variable annotation to the assignment to create an empty list of another type (torch.jit.annotate(List[T, []]) where T is the type of elements in the list for Python 2)
> :
> File "<ipython-input-8-9536dd2f0cc1>", line 12
> def forward(self, imgs, boxes: List[Tensor]):
> return self.pool(imgs, boxes)
> ~~~~~~~~~ <--- HERE
### Motivation, pitch
Make sure the model can be scriptable when using list of boxes as the input.
### Alternatives
Only support Tensor as the input, user are required to convert bbox of different images into one tensor. Potentially make `_utils.convert_boxes_to_roi_format` public so that users don't need to write the conversion function.
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/ops/roi_align.py`
Content:
```
1 from typing import List, Union
2
3 import torch
4 from torch import nn, Tensor
5 from torch.jit.annotations import BroadcastingList2
6 from torch.nn.modules.utils import _pair
7 from torchvision.extension import _assert_has_ops
8
9 from ..utils import _log_api_usage_once
10 from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format
11
12
13 def roi_align(
14 input: Tensor,
15 boxes: Union[Tensor, List[Tensor]],
16 output_size: BroadcastingList2[int],
17 spatial_scale: float = 1.0,
18 sampling_ratio: int = -1,
19 aligned: bool = False,
20 ) -> Tensor:
21 """
22 Performs Region of Interest (RoI) Align operator with average pooling, as described in Mask R-CNN.
23
24 Args:
25 input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
26 contains ``C`` feature maps of dimensions ``H x W``.
27 If the tensor is quantized, we expect a batch size of ``N == 1``.
28 boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
29 format where the regions will be taken from.
30 The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
31 If a single Tensor is passed, then the first column should
32 contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
33 If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
34 in the batch.
35 output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling
36 is performed, as (height, width).
37 spatial_scale (float): a scaling factor that maps the box coordinates to
38 the input coordinates. For example, if your boxes are defined on the scale
39 of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
40 the original image), you'll want to set this to 0.5. Default: 1.0
41 sampling_ratio (int): number of sampling points in the interpolation grid
42 used to compute the output value of each pooled output bin. If > 0,
43 then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If
44 <= 0, then an adaptive number of grid points are used (computed as
45 ``ceil(roi_width / output_width)``, and likewise for height). Default: -1
46 aligned (bool): If False, use the legacy implementation.
47 If True, pixel shift the box coordinates it by -0.5 for a better alignment with the two
48 neighboring pixel indices. This version is used in Detectron2
49
50 Returns:
51 Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
52 """
53 if not torch.jit.is_scripting() and not torch.jit.is_tracing():
54 _log_api_usage_once(roi_align)
55 _assert_has_ops()
56 check_roi_boxes_shape(boxes)
57 rois = boxes
58 output_size = _pair(output_size)
59 if not isinstance(rois, torch.Tensor):
60 rois = convert_boxes_to_roi_format(rois)
61 return torch.ops.torchvision.roi_align(
62 input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio, aligned
63 )
64
65
66 class RoIAlign(nn.Module):
67 """
68 See :func:`roi_align`.
69 """
70
71 def __init__(
72 self,
73 output_size: BroadcastingList2[int],
74 spatial_scale: float,
75 sampling_ratio: int,
76 aligned: bool = False,
77 ):
78 super().__init__()
79 _log_api_usage_once(self)
80 self.output_size = output_size
81 self.spatial_scale = spatial_scale
82 self.sampling_ratio = sampling_ratio
83 self.aligned = aligned
84
85 def forward(self, input: Tensor, rois: Tensor) -> Tensor:
86 return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)
87
88 def __repr__(self) -> str:
89 s = (
90 f"{self.__class__.__name__}("
91 f"output_size={self.output_size}"
92 f", spatial_scale={self.spatial_scale}"
93 f", sampling_ratio={self.sampling_ratio}"
94 f", aligned={self.aligned}"
95 f")"
96 )
97 return s
98
```
Path: `torchvision/ops/roi_pool.py`
Content:
```
1 from typing import List, Union
2
3 import torch
4 from torch import nn, Tensor
5 from torch.jit.annotations import BroadcastingList2
6 from torch.nn.modules.utils import _pair
7 from torchvision.extension import _assert_has_ops
8
9 from ..utils import _log_api_usage_once
10 from ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format
11
12
13 def roi_pool(
14 input: Tensor,
15 boxes: Union[Tensor, List[Tensor]],
16 output_size: BroadcastingList2[int],
17 spatial_scale: float = 1.0,
18 ) -> Tensor:
19 """
20 Performs Region of Interest (RoI) Pool operator described in Fast R-CNN
21
22 Args:
23 input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element
24 contains ``C`` feature maps of dimensions ``H x W``.
25 boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)
26 format where the regions will be taken from.
27 The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
28 If a single Tensor is passed, then the first column should
29 contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.
30 If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i
31 in the batch.
32 output_size (int or Tuple[int, int]): the size of the output after the cropping
33 is performed, as (height, width)
34 spatial_scale (float): a scaling factor that maps the box coordinates to
35 the input coordinates. For example, if your boxes are defined on the scale
36 of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of
37 the original image), you'll want to set this to 0.5. Default: 1.0
38
39 Returns:
40 Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.
41 """
42 if not torch.jit.is_scripting() and not torch.jit.is_tracing():
43 _log_api_usage_once(roi_pool)
44 _assert_has_ops()
45 check_roi_boxes_shape(boxes)
46 rois = boxes
47 output_size = _pair(output_size)
48 if not isinstance(rois, torch.Tensor):
49 rois = convert_boxes_to_roi_format(rois)
50 output, _ = torch.ops.torchvision.roi_pool(input, rois, spatial_scale, output_size[0], output_size[1])
51 return output
52
53
54 class RoIPool(nn.Module):
55 """
56 See :func:`roi_pool`.
57 """
58
59 def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float):
60 super().__init__()
61 _log_api_usage_once(self)
62 self.output_size = output_size
63 self.spatial_scale = spatial_scale
64
65 def forward(self, input: Tensor, rois: Tensor) -> Tensor:
66 return roi_pool(input, rois, self.output_size, self.spatial_scale)
67
68 def __repr__(self) -> str:
69 s = f"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})"
70 return s
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/ops/roi_align.py b/torchvision/ops/roi_align.py
--- a/torchvision/ops/roi_align.py
+++ b/torchvision/ops/roi_align.py
@@ -82,7 +82,7 @@
self.sampling_ratio = sampling_ratio
self.aligned = aligned
- def forward(self, input: Tensor, rois: Tensor) -> Tensor:
+ def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor:
return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)
def __repr__(self) -> str:
diff --git a/torchvision/ops/roi_pool.py b/torchvision/ops/roi_pool.py
--- a/torchvision/ops/roi_pool.py
+++ b/torchvision/ops/roi_pool.py
@@ -62,7 +62,7 @@
self.output_size = output_size
self.spatial_scale = spatial_scale
- def forward(self, input: Tensor, rois: Tensor) -> Tensor:
+ def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor:
return roi_pool(input, rois, self.output_size, self.spatial_scale)
def __repr__(self) -> str:
| {"golden_diff": "diff --git a/torchvision/ops/roi_align.py b/torchvision/ops/roi_align.py\n--- a/torchvision/ops/roi_align.py\n+++ b/torchvision/ops/roi_align.py\n@@ -82,7 +82,7 @@\n self.sampling_ratio = sampling_ratio\n self.aligned = aligned\n \n- def forward(self, input: Tensor, rois: Tensor) -> Tensor:\n+ def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor:\n return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)\n \n def __repr__(self) -> str:\ndiff --git a/torchvision/ops/roi_pool.py b/torchvision/ops/roi_pool.py\n--- a/torchvision/ops/roi_pool.py\n+++ b/torchvision/ops/roi_pool.py\n@@ -62,7 +62,7 @@\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n \n- def forward(self, input: Tensor, rois: Tensor) -> Tensor:\n+ def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor:\n return roi_pool(input, rois, self.output_size, self.spatial_scale)\n \n def __repr__(self) -> str:\n", "issue": "Inconsistency between RoIPool's forward and roi_pool\n### \ud83d\ude80 The feature\n\nCurrently the functional `roi_pool` method supports both `Tensor` and `List[Tensor]` for boxes, but the module forward function of `RoIPool` expects boxes(rois) to be `Tensor` only. Can we change the function signature to support both? \r\n\r\nThe inconsistency can cause issue during torchscripting, for example:\r\n```\r\nfrom typing import List\r\nimport torch\r\nfrom torch import Tensor\r\nfrom torchvision.ops import RoIPool\r\n\r\nclass Model(torch.nn.Module):\r\n def __init__(self):\r\n super().__init__()\r\n self.pool = RoIPool(output_size=[3,3], spatial_scale=1.0)\r\n\r\n def forward(self, imgs, boxes: List[Tensor]):\r\n return self.pool(imgs, boxes)\r\n\r\nmodel = Model()\r\ntorch.jit.script(model)\r\n```\r\nRaise the following error:\r\n\r\n> forward(__torch__.torchvision.ops.roi_pool.RoIPool self, Tensor input, Tensor rois) -> Tensor:\r\n> Expected a value of type 'Tensor' for argument 'rois' but instead found type 'List[Tensor]'.\r\n> Empty lists default to List[Tensor]. Add a variable annotation to the assignment to create an empty list of another type (torch.jit.annotate(List[T, []]) where T is the type of elements in the list for Python 2)\r\n> :\r\n> File \"<ipython-input-8-9536dd2f0cc1>\", line 12\r\n> def forward(self, imgs, boxes: List[Tensor]):\r\n> return self.pool(imgs, boxes)\r\n> ~~~~~~~~~ <--- HERE\n\n### Motivation, pitch\n\nMake sure the model can be scriptable when using list of boxes as the input.\n\n### Alternatives\n\nOnly support Tensor as the input, user are required to convert bbox of different images into one tensor. Potentially make `_utils.convert_boxes_to_roi_format` public so that users don't need to write the conversion function.\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "from typing import List, Union\n\nimport torch\nfrom torch import nn, Tensor\nfrom torch.jit.annotations import BroadcastingList2\nfrom torch.nn.modules.utils import _pair\nfrom torchvision.extension import _assert_has_ops\n\nfrom ..utils import _log_api_usage_once\nfrom ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format\n\n\ndef roi_align(\n input: Tensor,\n boxes: Union[Tensor, List[Tensor]],\n output_size: BroadcastingList2[int],\n spatial_scale: float = 1.0,\n sampling_ratio: int = -1,\n aligned: bool = False,\n) -> Tensor:\n \"\"\"\n Performs Region of Interest (RoI) Align operator with average pooling, as described in Mask R-CNN.\n\n Args:\n input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element\n contains ``C`` feature maps of dimensions ``H x W``.\n If the tensor is quantized, we expect a batch size of ``N == 1``.\n boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)\n format where the regions will be taken from.\n The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.\n If a single Tensor is passed, then the first column should\n contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.\n If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i\n in the batch.\n output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling\n is performed, as (height, width).\n spatial_scale (float): a scaling factor that maps the box coordinates to\n the input coordinates. For example, if your boxes are defined on the scale\n of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of\n the original image), you'll want to set this to 0.5. Default: 1.0\n sampling_ratio (int): number of sampling points in the interpolation grid\n used to compute the output value of each pooled output bin. If > 0,\n then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If\n <= 0, then an adaptive number of grid points are used (computed as\n ``ceil(roi_width / output_width)``, and likewise for height). Default: -1\n aligned (bool): If False, use the legacy implementation.\n If True, pixel shift the box coordinates it by -0.5 for a better alignment with the two\n neighboring pixel indices. This version is used in Detectron2\n\n Returns:\n Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.\n \"\"\"\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(roi_align)\n _assert_has_ops()\n check_roi_boxes_shape(boxes)\n rois = boxes\n output_size = _pair(output_size)\n if not isinstance(rois, torch.Tensor):\n rois = convert_boxes_to_roi_format(rois)\n return torch.ops.torchvision.roi_align(\n input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio, aligned\n )\n\n\nclass RoIAlign(nn.Module):\n \"\"\"\n See :func:`roi_align`.\n \"\"\"\n\n def __init__(\n self,\n output_size: BroadcastingList2[int],\n spatial_scale: float,\n sampling_ratio: int,\n aligned: bool = False,\n ):\n super().__init__()\n _log_api_usage_once(self)\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n self.sampling_ratio = sampling_ratio\n self.aligned = aligned\n\n def forward(self, input: Tensor, rois: Tensor) -> Tensor:\n return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)\n\n def __repr__(self) -> str:\n s = (\n f\"{self.__class__.__name__}(\"\n f\"output_size={self.output_size}\"\n f\", spatial_scale={self.spatial_scale}\"\n f\", sampling_ratio={self.sampling_ratio}\"\n f\", aligned={self.aligned}\"\n f\")\"\n )\n return s\n", "path": "torchvision/ops/roi_align.py"}, {"content": "from typing import List, Union\n\nimport torch\nfrom torch import nn, Tensor\nfrom torch.jit.annotations import BroadcastingList2\nfrom torch.nn.modules.utils import _pair\nfrom torchvision.extension import _assert_has_ops\n\nfrom ..utils import _log_api_usage_once\nfrom ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format\n\n\ndef roi_pool(\n input: Tensor,\n boxes: Union[Tensor, List[Tensor]],\n output_size: BroadcastingList2[int],\n spatial_scale: float = 1.0,\n) -> Tensor:\n \"\"\"\n Performs Region of Interest (RoI) Pool operator described in Fast R-CNN\n\n Args:\n input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element\n contains ``C`` feature maps of dimensions ``H x W``.\n boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)\n format where the regions will be taken from.\n The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.\n If a single Tensor is passed, then the first column should\n contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.\n If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i\n in the batch.\n output_size (int or Tuple[int, int]): the size of the output after the cropping\n is performed, as (height, width)\n spatial_scale (float): a scaling factor that maps the box coordinates to\n the input coordinates. For example, if your boxes are defined on the scale\n of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of\n the original image), you'll want to set this to 0.5. Default: 1.0\n\n Returns:\n Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.\n \"\"\"\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(roi_pool)\n _assert_has_ops()\n check_roi_boxes_shape(boxes)\n rois = boxes\n output_size = _pair(output_size)\n if not isinstance(rois, torch.Tensor):\n rois = convert_boxes_to_roi_format(rois)\n output, _ = torch.ops.torchvision.roi_pool(input, rois, spatial_scale, output_size[0], output_size[1])\n return output\n\n\nclass RoIPool(nn.Module):\n \"\"\"\n See :func:`roi_pool`.\n \"\"\"\n\n def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float):\n super().__init__()\n _log_api_usage_once(self)\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n\n def forward(self, input: Tensor, rois: Tensor) -> Tensor:\n return roi_pool(input, rois, self.output_size, self.spatial_scale)\n\n def __repr__(self) -> str:\n s = f\"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})\"\n return s\n", "path": "torchvision/ops/roi_pool.py"}], "after_files": [{"content": "from typing import List, Union\n\nimport torch\nfrom torch import nn, Tensor\nfrom torch.jit.annotations import BroadcastingList2\nfrom torch.nn.modules.utils import _pair\nfrom torchvision.extension import _assert_has_ops\n\nfrom ..utils import _log_api_usage_once\nfrom ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format\n\n\ndef roi_align(\n input: Tensor,\n boxes: Union[Tensor, List[Tensor]],\n output_size: BroadcastingList2[int],\n spatial_scale: float = 1.0,\n sampling_ratio: int = -1,\n aligned: bool = False,\n) -> Tensor:\n \"\"\"\n Performs Region of Interest (RoI) Align operator with average pooling, as described in Mask R-CNN.\n\n Args:\n input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element\n contains ``C`` feature maps of dimensions ``H x W``.\n If the tensor is quantized, we expect a batch size of ``N == 1``.\n boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)\n format where the regions will be taken from.\n The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.\n If a single Tensor is passed, then the first column should\n contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.\n If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i\n in the batch.\n output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling\n is performed, as (height, width).\n spatial_scale (float): a scaling factor that maps the box coordinates to\n the input coordinates. For example, if your boxes are defined on the scale\n of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of\n the original image), you'll want to set this to 0.5. Default: 1.0\n sampling_ratio (int): number of sampling points in the interpolation grid\n used to compute the output value of each pooled output bin. If > 0,\n then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If\n <= 0, then an adaptive number of grid points are used (computed as\n ``ceil(roi_width / output_width)``, and likewise for height). Default: -1\n aligned (bool): If False, use the legacy implementation.\n If True, pixel shift the box coordinates it by -0.5 for a better alignment with the two\n neighboring pixel indices. This version is used in Detectron2\n\n Returns:\n Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.\n \"\"\"\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(roi_align)\n _assert_has_ops()\n check_roi_boxes_shape(boxes)\n rois = boxes\n output_size = _pair(output_size)\n if not isinstance(rois, torch.Tensor):\n rois = convert_boxes_to_roi_format(rois)\n return torch.ops.torchvision.roi_align(\n input, rois, spatial_scale, output_size[0], output_size[1], sampling_ratio, aligned\n )\n\n\nclass RoIAlign(nn.Module):\n \"\"\"\n See :func:`roi_align`.\n \"\"\"\n\n def __init__(\n self,\n output_size: BroadcastingList2[int],\n spatial_scale: float,\n sampling_ratio: int,\n aligned: bool = False,\n ):\n super().__init__()\n _log_api_usage_once(self)\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n self.sampling_ratio = sampling_ratio\n self.aligned = aligned\n\n def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor:\n return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned)\n\n def __repr__(self) -> str:\n s = (\n f\"{self.__class__.__name__}(\"\n f\"output_size={self.output_size}\"\n f\", spatial_scale={self.spatial_scale}\"\n f\", sampling_ratio={self.sampling_ratio}\"\n f\", aligned={self.aligned}\"\n f\")\"\n )\n return s\n", "path": "torchvision/ops/roi_align.py"}, {"content": "from typing import List, Union\n\nimport torch\nfrom torch import nn, Tensor\nfrom torch.jit.annotations import BroadcastingList2\nfrom torch.nn.modules.utils import _pair\nfrom torchvision.extension import _assert_has_ops\n\nfrom ..utils import _log_api_usage_once\nfrom ._utils import check_roi_boxes_shape, convert_boxes_to_roi_format\n\n\ndef roi_pool(\n input: Tensor,\n boxes: Union[Tensor, List[Tensor]],\n output_size: BroadcastingList2[int],\n spatial_scale: float = 1.0,\n) -> Tensor:\n \"\"\"\n Performs Region of Interest (RoI) Pool operator described in Fast R-CNN\n\n Args:\n input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element\n contains ``C`` feature maps of dimensions ``H x W``.\n boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)\n format where the regions will be taken from.\n The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``.\n If a single Tensor is passed, then the first column should\n contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``.\n If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i\n in the batch.\n output_size (int or Tuple[int, int]): the size of the output after the cropping\n is performed, as (height, width)\n spatial_scale (float): a scaling factor that maps the box coordinates to\n the input coordinates. For example, if your boxes are defined on the scale\n of a 224x224 image and your input is a 112x112 feature map (resulting from a 0.5x scaling of\n the original image), you'll want to set this to 0.5. Default: 1.0\n\n Returns:\n Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs.\n \"\"\"\n if not torch.jit.is_scripting() and not torch.jit.is_tracing():\n _log_api_usage_once(roi_pool)\n _assert_has_ops()\n check_roi_boxes_shape(boxes)\n rois = boxes\n output_size = _pair(output_size)\n if not isinstance(rois, torch.Tensor):\n rois = convert_boxes_to_roi_format(rois)\n output, _ = torch.ops.torchvision.roi_pool(input, rois, spatial_scale, output_size[0], output_size[1])\n return output\n\n\nclass RoIPool(nn.Module):\n \"\"\"\n See :func:`roi_pool`.\n \"\"\"\n\n def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float):\n super().__init__()\n _log_api_usage_once(self)\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n\n def forward(self, input: Tensor, rois: Union[Tensor, List[Tensor]]) -> Tensor:\n return roi_pool(input, rois, self.output_size, self.spatial_scale)\n\n def __repr__(self) -> str:\n s = f\"{self.__class__.__name__}(output_size={self.output_size}, spatial_scale={self.spatial_scale})\"\n return s\n", "path": "torchvision/ops/roi_pool.py"}]} | 2,793 | 301 |
gh_patches_debug_23561 | rasdani/github-patches | git_diff | rasterio__rasterio-2110 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cast nodata value before filling in rio calc
At line 192, `res = res.filled(kwargs['nodata'])`, we need to convert that string to a numpy scalar or array of `res` dtype.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/rio/calc.py`
Content:
```
1 """$ rio calc"""
2
3 from __future__ import division
4
5 from collections import OrderedDict
6 from distutils.version import LooseVersion
7 import math
8
9 import click
10 import snuggs
11
12 import rasterio
13 from rasterio.features import sieve
14 from rasterio.fill import fillnodata
15 from rasterio.windows import Window
16 from rasterio.rio import options
17 from rasterio.rio.helpers import resolve_inout
18
19
20 def _get_bands(inputs, sources, d, i=None):
21 """Get a rasterio.Band object from calc's inputs"""
22 idx = d if d in dict(inputs) else int(d) - 1
23 src = sources[idx]
24 return (rasterio.band(src, i) if i else
25 [rasterio.band(src, j) for j in src.indexes])
26
27
28 def _read_array(ix, subix=None, dtype=None):
29 """Change the type of a read array"""
30 arr = snuggs._ctx.lookup(ix, subix)
31 if dtype:
32 arr = arr.astype(dtype)
33 return arr
34
35
36 def _chunk_output(width, height, count, itemsize, mem_limit=1):
37 """Divide the calculation output into chunks
38
39 This function determines the chunk size such that an array of shape
40 (chunk_size, chunk_size, count) with itemsize bytes per element
41 requires no more than mem_limit megabytes of memory.
42
43 Output chunks are described by rasterio Windows.
44
45 Parameters
46 ----------
47 width : int
48 Output width
49 height : int
50 Output height
51 count : int
52 Number of output bands
53 itemsize : int
54 Number of bytes per pixel
55 mem_limit : int, default
56 The maximum size in memory of a chunk array
57
58 Returns
59 -------
60 sequence of Windows
61 """
62 max_pixels = mem_limit * 1.0e+6 / itemsize * count
63 chunk_size = int(math.floor(math.sqrt(max_pixels)))
64 ncols = int(math.ceil(width / chunk_size))
65 nrows = int(math.ceil(height / chunk_size))
66 chunk_windows = []
67
68 for col in range(ncols):
69 col_offset = col * chunk_size
70 w = min(chunk_size, width - col_offset)
71 for row in range(nrows):
72 row_offset = row * chunk_size
73 h = min(chunk_size, height - row_offset)
74 chunk_windows.append(((row, col), Window(col_offset, row_offset, w, h)))
75
76 return chunk_windows
77
78
79 @click.command(short_help="Raster data calculator.")
80 @click.argument('command')
81 @options.files_inout_arg
82 @options.output_opt
83 @options.format_opt
84 @click.option('--name', multiple=True,
85 help='Specify an input file with a unique short (alphas only) '
86 'name for use in commands like '
87 '"a=tests/data/RGB.byte.tif".')
88 @options.dtype_opt
89 @options.masked_opt
90 @options.overwrite_opt
91 @click.option("--mem-limit", type=int, default=64, help="Limit on memory used to perform calculations, in MB.")
92 @options.creation_options
93 @click.pass_context
94 def calc(ctx, command, files, output, driver, name, dtype, masked, overwrite, mem_limit, creation_options):
95 """A raster data calculator
96
97 Evaluates an expression using input datasets and writes the result
98 to a new dataset.
99
100 Command syntax is lisp-like. An expression consists of an operator
101 or function name and one or more strings, numbers, or expressions
102 enclosed in parentheses. Functions include ``read`` (gets a raster
103 array) and ``asarray`` (makes a 3-D array from 2-D arrays).
104
105 \b
106 * (read i) evaluates to the i-th input dataset (a 3-D array).
107 * (read i j) evaluates to the j-th band of the i-th dataset (a
108 2-D array).
109 * (take foo j) evaluates to the j-th band of a dataset named foo
110 (see help on the --name option above).
111 * Standard numpy array operators (+, -, *, /) are available.
112 * When the final result is a list of arrays, a multiple band
113 output file is written.
114 * When the final result is a single array, a single band output
115 file is written.
116
117 Example:
118
119 \b
120 $ rio calc "(+ 2 (* 0.95 (read 1)))" tests/data/RGB.byte.tif \\
121 > /tmp/out.tif
122
123 The command above produces a 3-band GeoTIFF with all values scaled
124 by 0.95 and incremented by 2.
125
126 \b
127 $ rio calc "(asarray (+ 125 (read 1)) (read 1) (read 1))" \\
128 > tests/data/shade.tif /tmp/out.tif
129
130 The command above produces a 3-band RGB GeoTIFF, with red levels
131 incremented by 125, from the single-band input.
132
133 The maximum amount of memory used to perform caculations defaults to
134 64 MB. This number can be increased to improve speed of calculation.
135
136 """
137 import numpy as np
138
139 dst = None
140 sources = []
141
142 try:
143 with ctx.obj['env']:
144 output, files = resolve_inout(files=files, output=output,
145 overwrite=overwrite)
146 inputs = ([tuple(n.split('=')) for n in name] +
147 [(None, n) for n in files])
148 sources = [rasterio.open(path) for name, path in inputs]
149
150 first = sources[0]
151 kwargs = first.profile
152 kwargs.update(**creation_options)
153 dtype = dtype or first.meta['dtype']
154 kwargs['dtype'] = dtype
155 kwargs.pop("driver", None)
156 if driver:
157 kwargs['driver'] = driver
158
159 # Extend snuggs.
160 snuggs.func_map['read'] = _read_array
161 snuggs.func_map['band'] = lambda d, i: _get_bands(inputs, sources, d, i)
162 snuggs.func_map['bands'] = lambda d: _get_bands(inputs, sources, d)
163 snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args)
164 snuggs.func_map['sieve'] = lambda *args: sieve(*args)
165
166 # The windows iterator is initialized with a single sample.
167 # The actual work windows will be added in the second
168 # iteration of the loop.
169 work_windows = [(None, Window(0, 0, 16, 16))]
170
171 for ij, window in work_windows:
172
173 ctxkwds = OrderedDict()
174
175 for i, ((name, path), src) in enumerate(zip(inputs, sources)):
176
177 # Using the class method instead of instance
178 # method. Latter raises
179 #
180 # TypeError: astype() got an unexpected keyword
181 # argument 'copy'
182 #
183 # possibly something to do with the instance being
184 # a masked array.
185 ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked, window=window)
186
187 res = snuggs.eval(command, **ctxkwds)
188
189 if (isinstance(res, np.ma.core.MaskedArray) and (
190 tuple(LooseVersion(np.__version__).version) < (1, 9) or
191 tuple(LooseVersion(np.__version__).version) > (1, 10))):
192 res = res.filled(kwargs['nodata'])
193
194 if len(res.shape) == 3:
195 results = np.ndarray.astype(res, dtype, copy=False)
196 else:
197 results = np.asanyarray(
198 [np.ndarray.astype(res, dtype, copy=False)])
199
200 # The first iteration is only to get sample results and from them
201 # compute some properties of the output dataset.
202 if dst is None:
203 kwargs['count'] = results.shape[0]
204 dst = rasterio.open(output, 'w', **kwargs)
205 work_windows.extend(_chunk_output(dst.width, dst.height, dst.count, np.dtype(dst.dtypes[0]).itemsize, mem_limit=mem_limit))
206
207 # In subsequent iterations we write results.
208 else:
209 dst.write(results, window=window)
210
211 except snuggs.ExpressionError as err:
212 click.echo("Expression Error:")
213 click.echo(" {}".format(err.text))
214 click.echo(" {}^".format(" " * err.offset))
215 click.echo(err)
216 raise click.Abort()
217
218 finally:
219 if dst:
220 dst.close()
221 for src in sources:
222 src.close()
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/rio/calc.py b/rasterio/rio/calc.py
--- a/rasterio/rio/calc.py
+++ b/rasterio/rio/calc.py
@@ -185,17 +185,14 @@
ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked, window=window)
res = snuggs.eval(command, **ctxkwds)
-
- if (isinstance(res, np.ma.core.MaskedArray) and (
- tuple(LooseVersion(np.__version__).version) < (1, 9) or
- tuple(LooseVersion(np.__version__).version) > (1, 10))):
- res = res.filled(kwargs['nodata'])
-
- if len(res.shape) == 3:
- results = np.ndarray.astype(res, dtype, copy=False)
- else:
- results = np.asanyarray(
- [np.ndarray.astype(res, dtype, copy=False)])
+ results = res.astype(dtype, copy=False)
+
+ if isinstance(results, np.ma.core.MaskedArray):
+ results = results.filled(float(kwargs['nodata']))
+ if len(results.shape) == 2:
+ results = np.ma.asanyarray([results])
+ elif len(results.shape) == 2:
+ results = np.asanyarray([results])
# The first iteration is only to get sample results and from them
# compute some properties of the output dataset.
| {"golden_diff": "diff --git a/rasterio/rio/calc.py b/rasterio/rio/calc.py\n--- a/rasterio/rio/calc.py\n+++ b/rasterio/rio/calc.py\n@@ -185,17 +185,14 @@\n ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked, window=window)\n \n res = snuggs.eval(command, **ctxkwds)\n-\n- if (isinstance(res, np.ma.core.MaskedArray) and (\n- tuple(LooseVersion(np.__version__).version) < (1, 9) or\n- tuple(LooseVersion(np.__version__).version) > (1, 10))):\n- res = res.filled(kwargs['nodata'])\n-\n- if len(res.shape) == 3:\n- results = np.ndarray.astype(res, dtype, copy=False)\n- else:\n- results = np.asanyarray(\n- [np.ndarray.astype(res, dtype, copy=False)])\n+ results = res.astype(dtype, copy=False)\n+\n+ if isinstance(results, np.ma.core.MaskedArray):\n+ results = results.filled(float(kwargs['nodata']))\n+ if len(results.shape) == 2:\n+ results = np.ma.asanyarray([results])\n+ elif len(results.shape) == 2:\n+ results = np.asanyarray([results])\n \n # The first iteration is only to get sample results and from them\n # compute some properties of the output dataset.\n", "issue": "Cast nodata value before filling in rio calc\nAt line 192, `res = res.filled(kwargs['nodata'])`, we need to convert that string to a numpy scalar or array of `res` dtype.\n", "before_files": [{"content": "\"\"\"$ rio calc\"\"\"\n\nfrom __future__ import division\n\nfrom collections import OrderedDict\nfrom distutils.version import LooseVersion\nimport math\n\nimport click\nimport snuggs\n\nimport rasterio\nfrom rasterio.features import sieve\nfrom rasterio.fill import fillnodata\nfrom rasterio.windows import Window\nfrom rasterio.rio import options\nfrom rasterio.rio.helpers import resolve_inout\n\n\ndef _get_bands(inputs, sources, d, i=None):\n \"\"\"Get a rasterio.Band object from calc's inputs\"\"\"\n idx = d if d in dict(inputs) else int(d) - 1\n src = sources[idx]\n return (rasterio.band(src, i) if i else\n [rasterio.band(src, j) for j in src.indexes])\n\n\ndef _read_array(ix, subix=None, dtype=None):\n \"\"\"Change the type of a read array\"\"\"\n arr = snuggs._ctx.lookup(ix, subix)\n if dtype:\n arr = arr.astype(dtype)\n return arr\n\n\ndef _chunk_output(width, height, count, itemsize, mem_limit=1):\n \"\"\"Divide the calculation output into chunks\n\n This function determines the chunk size such that an array of shape\n (chunk_size, chunk_size, count) with itemsize bytes per element\n requires no more than mem_limit megabytes of memory.\n\n Output chunks are described by rasterio Windows.\n\n Parameters\n ----------\n width : int\n Output width\n height : int\n Output height\n count : int\n Number of output bands\n itemsize : int\n Number of bytes per pixel\n mem_limit : int, default\n The maximum size in memory of a chunk array\n\n Returns\n -------\n sequence of Windows\n \"\"\"\n max_pixels = mem_limit * 1.0e+6 / itemsize * count\n chunk_size = int(math.floor(math.sqrt(max_pixels)))\n ncols = int(math.ceil(width / chunk_size))\n nrows = int(math.ceil(height / chunk_size))\n chunk_windows = []\n\n for col in range(ncols):\n col_offset = col * chunk_size\n w = min(chunk_size, width - col_offset)\n for row in range(nrows):\n row_offset = row * chunk_size\n h = min(chunk_size, height - row_offset)\n chunk_windows.append(((row, col), Window(col_offset, row_offset, w, h)))\n\n return chunk_windows\n\n\[email protected](short_help=\"Raster data calculator.\")\[email protected]('command')\[email protected]_inout_arg\[email protected]_opt\[email protected]_opt\[email protected]('--name', multiple=True,\n help='Specify an input file with a unique short (alphas only) '\n 'name for use in commands like '\n '\"a=tests/data/RGB.byte.tif\".')\[email protected]_opt\[email protected]_opt\[email protected]_opt\[email protected](\"--mem-limit\", type=int, default=64, help=\"Limit on memory used to perform calculations, in MB.\")\[email protected]_options\[email protected]_context\ndef calc(ctx, command, files, output, driver, name, dtype, masked, overwrite, mem_limit, creation_options):\n \"\"\"A raster data calculator\n\n Evaluates an expression using input datasets and writes the result\n to a new dataset.\n\n Command syntax is lisp-like. An expression consists of an operator\n or function name and one or more strings, numbers, or expressions\n enclosed in parentheses. Functions include ``read`` (gets a raster\n array) and ``asarray`` (makes a 3-D array from 2-D arrays).\n\n \\b\n * (read i) evaluates to the i-th input dataset (a 3-D array).\n * (read i j) evaluates to the j-th band of the i-th dataset (a\n 2-D array).\n * (take foo j) evaluates to the j-th band of a dataset named foo\n (see help on the --name option above).\n * Standard numpy array operators (+, -, *, /) are available.\n * When the final result is a list of arrays, a multiple band\n output file is written.\n * When the final result is a single array, a single band output\n file is written.\n\n Example:\n\n \\b\n $ rio calc \"(+ 2 (* 0.95 (read 1)))\" tests/data/RGB.byte.tif \\\\\n > /tmp/out.tif\n\n The command above produces a 3-band GeoTIFF with all values scaled\n by 0.95 and incremented by 2.\n\n \\b\n $ rio calc \"(asarray (+ 125 (read 1)) (read 1) (read 1))\" \\\\\n > tests/data/shade.tif /tmp/out.tif\n\n The command above produces a 3-band RGB GeoTIFF, with red levels\n incremented by 125, from the single-band input.\n\n The maximum amount of memory used to perform caculations defaults to\n 64 MB. This number can be increased to improve speed of calculation.\n\n \"\"\"\n import numpy as np\n\n dst = None\n sources = []\n\n try:\n with ctx.obj['env']:\n output, files = resolve_inout(files=files, output=output,\n overwrite=overwrite)\n inputs = ([tuple(n.split('=')) for n in name] +\n [(None, n) for n in files])\n sources = [rasterio.open(path) for name, path in inputs]\n\n first = sources[0]\n kwargs = first.profile\n kwargs.update(**creation_options)\n dtype = dtype or first.meta['dtype']\n kwargs['dtype'] = dtype\n kwargs.pop(\"driver\", None)\n if driver:\n kwargs['driver'] = driver\n\n # Extend snuggs.\n snuggs.func_map['read'] = _read_array\n snuggs.func_map['band'] = lambda d, i: _get_bands(inputs, sources, d, i)\n snuggs.func_map['bands'] = lambda d: _get_bands(inputs, sources, d)\n snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args)\n snuggs.func_map['sieve'] = lambda *args: sieve(*args)\n\n # The windows iterator is initialized with a single sample.\n # The actual work windows will be added in the second\n # iteration of the loop.\n work_windows = [(None, Window(0, 0, 16, 16))]\n\n for ij, window in work_windows:\n\n ctxkwds = OrderedDict()\n\n for i, ((name, path), src) in enumerate(zip(inputs, sources)):\n\n # Using the class method instead of instance\n # method. Latter raises\n #\n # TypeError: astype() got an unexpected keyword\n # argument 'copy'\n #\n # possibly something to do with the instance being\n # a masked array.\n ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked, window=window)\n\n res = snuggs.eval(command, **ctxkwds)\n\n if (isinstance(res, np.ma.core.MaskedArray) and (\n tuple(LooseVersion(np.__version__).version) < (1, 9) or\n tuple(LooseVersion(np.__version__).version) > (1, 10))):\n res = res.filled(kwargs['nodata'])\n\n if len(res.shape) == 3:\n results = np.ndarray.astype(res, dtype, copy=False)\n else:\n results = np.asanyarray(\n [np.ndarray.astype(res, dtype, copy=False)])\n\n # The first iteration is only to get sample results and from them\n # compute some properties of the output dataset.\n if dst is None:\n kwargs['count'] = results.shape[0]\n dst = rasterio.open(output, 'w', **kwargs)\n work_windows.extend(_chunk_output(dst.width, dst.height, dst.count, np.dtype(dst.dtypes[0]).itemsize, mem_limit=mem_limit))\n\n # In subsequent iterations we write results.\n else:\n dst.write(results, window=window)\n\n except snuggs.ExpressionError as err:\n click.echo(\"Expression Error:\")\n click.echo(\" {}\".format(err.text))\n click.echo(\" {}^\".format(\" \" * err.offset))\n click.echo(err)\n raise click.Abort()\n\n finally:\n if dst:\n dst.close()\n for src in sources:\n src.close()\n", "path": "rasterio/rio/calc.py"}], "after_files": [{"content": "\"\"\"$ rio calc\"\"\"\n\nfrom __future__ import division\n\nfrom collections import OrderedDict\nfrom distutils.version import LooseVersion\nimport math\n\nimport click\nimport snuggs\n\nimport rasterio\nfrom rasterio.features import sieve\nfrom rasterio.fill import fillnodata\nfrom rasterio.windows import Window\nfrom rasterio.rio import options\nfrom rasterio.rio.helpers import resolve_inout\n\n\ndef _get_bands(inputs, sources, d, i=None):\n \"\"\"Get a rasterio.Band object from calc's inputs\"\"\"\n idx = d if d in dict(inputs) else int(d) - 1\n src = sources[idx]\n return (rasterio.band(src, i) if i else\n [rasterio.band(src, j) for j in src.indexes])\n\n\ndef _read_array(ix, subix=None, dtype=None):\n \"\"\"Change the type of a read array\"\"\"\n arr = snuggs._ctx.lookup(ix, subix)\n if dtype:\n arr = arr.astype(dtype)\n return arr\n\n\ndef _chunk_output(width, height, count, itemsize, mem_limit=1):\n \"\"\"Divide the calculation output into chunks\n\n This function determines the chunk size such that an array of shape\n (chunk_size, chunk_size, count) with itemsize bytes per element\n requires no more than mem_limit megabytes of memory.\n\n Output chunks are described by rasterio Windows.\n\n Parameters\n ----------\n width : int\n Output width\n height : int\n Output height\n count : int\n Number of output bands\n itemsize : int\n Number of bytes per pixel\n mem_limit : int, default\n The maximum size in memory of a chunk array\n\n Returns\n -------\n sequence of Windows\n \"\"\"\n max_pixels = mem_limit * 1.0e+6 / itemsize * count\n chunk_size = int(math.floor(math.sqrt(max_pixels)))\n ncols = int(math.ceil(width / chunk_size))\n nrows = int(math.ceil(height / chunk_size))\n chunk_windows = []\n\n for col in range(ncols):\n col_offset = col * chunk_size\n w = min(chunk_size, width - col_offset)\n for row in range(nrows):\n row_offset = row * chunk_size\n h = min(chunk_size, height - row_offset)\n chunk_windows.append(((row, col), Window(col_offset, row_offset, w, h)))\n\n return chunk_windows\n\n\[email protected](short_help=\"Raster data calculator.\")\[email protected]('command')\[email protected]_inout_arg\[email protected]_opt\[email protected]_opt\[email protected]('--name', multiple=True,\n help='Specify an input file with a unique short (alphas only) '\n 'name for use in commands like '\n '\"a=tests/data/RGB.byte.tif\".')\[email protected]_opt\[email protected]_opt\[email protected]_opt\[email protected](\"--mem-limit\", type=int, default=64, help=\"Limit on memory used to perform calculations, in MB.\")\[email protected]_options\[email protected]_context\ndef calc(ctx, command, files, output, driver, name, dtype, masked, overwrite, mem_limit, creation_options):\n \"\"\"A raster data calculator\n\n Evaluates an expression using input datasets and writes the result\n to a new dataset.\n\n Command syntax is lisp-like. An expression consists of an operator\n or function name and one or more strings, numbers, or expressions\n enclosed in parentheses. Functions include ``read`` (gets a raster\n array) and ``asarray`` (makes a 3-D array from 2-D arrays).\n\n \\b\n * (read i) evaluates to the i-th input dataset (a 3-D array).\n * (read i j) evaluates to the j-th band of the i-th dataset (a\n 2-D array).\n * (take foo j) evaluates to the j-th band of a dataset named foo\n (see help on the --name option above).\n * Standard numpy array operators (+, -, *, /) are available.\n * When the final result is a list of arrays, a multiple band\n output file is written.\n * When the final result is a single array, a single band output\n file is written.\n\n Example:\n\n \\b\n $ rio calc \"(+ 2 (* 0.95 (read 1)))\" tests/data/RGB.byte.tif \\\\\n > /tmp/out.tif\n\n The command above produces a 3-band GeoTIFF with all values scaled\n by 0.95 and incremented by 2.\n\n \\b\n $ rio calc \"(asarray (+ 125 (read 1)) (read 1) (read 1))\" \\\\\n > tests/data/shade.tif /tmp/out.tif\n\n The command above produces a 3-band RGB GeoTIFF, with red levels\n incremented by 125, from the single-band input.\n\n The maximum amount of memory used to perform caculations defaults to\n 64 MB. This number can be increased to improve speed of calculation.\n\n \"\"\"\n import numpy as np\n\n dst = None\n sources = []\n\n try:\n with ctx.obj['env']:\n output, files = resolve_inout(files=files, output=output,\n overwrite=overwrite)\n inputs = ([tuple(n.split('=')) for n in name] +\n [(None, n) for n in files])\n sources = [rasterio.open(path) for name, path in inputs]\n\n first = sources[0]\n kwargs = first.profile\n kwargs.update(**creation_options)\n dtype = dtype or first.meta['dtype']\n kwargs['dtype'] = dtype\n kwargs.pop(\"driver\", None)\n if driver:\n kwargs['driver'] = driver\n\n # Extend snuggs.\n snuggs.func_map['read'] = _read_array\n snuggs.func_map['band'] = lambda d, i: _get_bands(inputs, sources, d, i)\n snuggs.func_map['bands'] = lambda d: _get_bands(inputs, sources, d)\n snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args)\n snuggs.func_map['sieve'] = lambda *args: sieve(*args)\n\n # The windows iterator is initialized with a single sample.\n # The actual work windows will be added in the second\n # iteration of the loop.\n work_windows = [(None, Window(0, 0, 16, 16))]\n\n for ij, window in work_windows:\n\n ctxkwds = OrderedDict()\n\n for i, ((name, path), src) in enumerate(zip(inputs, sources)):\n\n # Using the class method instead of instance\n # method. Latter raises\n #\n # TypeError: astype() got an unexpected keyword\n # argument 'copy'\n #\n # possibly something to do with the instance being\n # a masked array.\n ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked, window=window)\n\n res = snuggs.eval(command, **ctxkwds)\n results = res.astype(dtype, copy=False)\n\n if isinstance(results, np.ma.core.MaskedArray):\n results = results.filled(float(kwargs['nodata']))\n if len(results.shape) == 2:\n results = np.ma.asanyarray([results])\n elif len(results.shape) == 2:\n results = np.asanyarray([results])\n\n # The first iteration is only to get sample results and from them\n # compute some properties of the output dataset.\n if dst is None:\n kwargs['count'] = results.shape[0]\n dst = rasterio.open(output, 'w', **kwargs)\n work_windows.extend(_chunk_output(dst.width, dst.height, dst.count, np.dtype(dst.dtypes[0]).itemsize, mem_limit=mem_limit))\n\n # In subsequent iterations we write results.\n else:\n dst.write(results, window=window)\n\n except snuggs.ExpressionError as err:\n click.echo(\"Expression Error:\")\n click.echo(\" {}\".format(err.text))\n click.echo(\" {}^\".format(\" \" * err.offset))\n click.echo(err)\n raise click.Abort()\n\n finally:\n if dst:\n dst.close()\n for src in sources:\n src.close()\n", "path": "rasterio/rio/calc.py"}]} | 2,748 | 338 |
gh_patches_debug_11279 | rasdani/github-patches | git_diff | abey79__vpype-440 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace cKDTree by KDTree
see note here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.html#scipy.spatial.cKDTree
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vpype/line_index.py`
Content:
```
1 from __future__ import annotations
2
3 import logging
4 from typing import Iterable
5
6 import numpy as np
7 from scipy.spatial import cKDTree as KDTree
8
9 # REMINDER: anything added here must be added to docs/api.rst
10 __all__ = ["LineIndex"]
11
12
13 class LineIndex:
14 """Wrapper to scipy.spatial.cKDTree to facilitate systematic processing of a line
15 collection.
16
17 Implementation note: we use the `available` bool array because deleting stuff from the
18 index is costly.
19 """
20
21 def __init__(self, lines: Iterable[np.ndarray], reverse: bool = False):
22 self.lines = [line for line in lines if len(line) > 0]
23 self.reverse = reverse
24 self._make_index()
25
26 def _make_index(self) -> None:
27 logging.info(f"LineIndex: creating index for {len(self.lines)} lines")
28 self.available = np.ones(shape=len(self.lines), dtype=bool)
29
30 # create rtree index
31 self.index = KDTree(
32 np.array([(line[0].real, line[0].imag) for line in self.lines]).reshape(-1, 2)
33 )
34
35 # create reverse index
36 if self.reverse:
37 self.rindex = KDTree(
38 np.array([(line[-1].real, line[-1].imag) for line in self.lines]).reshape(
39 -1, 2
40 )
41 )
42
43 def _reindex(self) -> None:
44 self.lines = [line for idx, line in enumerate(self.lines) if self.available[idx]]
45 self._make_index()
46
47 def __len__(self) -> int:
48 return np.count_nonzero(self.available)
49
50 def __getitem__(self, item):
51 return self.lines[item]
52
53 def pop_front(self) -> np.ndarray:
54 if len(self) == 0:
55 raise RuntimeError
56 idx = int(np.argmax(self.available))
57 self.available[idx] = False
58 return self.lines[idx]
59
60 def pop(self, idx: int) -> np.ndarray | None:
61 if not self.available[idx]:
62 return None
63 self.available[idx] = False
64 return self.lines[idx]
65
66 def find_nearest_within(self, p: complex, max_dist: float) -> tuple[int | None, bool]:
67 """Find the closest line, assuming a maximum admissible distance.
68 Returns a tuple of (idx, reverse), where `idx` may be None if nothing is found.
69 `reverse` indicates whether or not a line ending has been matched instead of a start.
70 False is always returned if index was created with `reverse=False`.s
71 """
72
73 ridx = None
74 rdist: float | None = 0.0
75
76 while True:
77 reindex, idx, dist = self._find_nearest_within_in_index(p, max_dist, self.index)
78 if reindex:
79 self._reindex()
80 continue
81
82 if self.reverse:
83 reindex, ridx, rdist = self._find_nearest_within_in_index(
84 p, max_dist, self.rindex
85 )
86 if reindex:
87 self._reindex()
88 continue
89 break
90
91 if self.reverse:
92 if idx is None and ridx is None:
93 return None, False
94 elif idx is not None and ridx is None:
95 return idx, False
96 elif idx is None and ridx is not None:
97 return ridx, True
98 elif rdist < dist: # type: ignore
99 return ridx, True
100 else:
101 return idx, False
102 else:
103 return idx, False
104
105 def _find_nearest_within_in_index(
106 self, p: complex, max_dist: float, index: KDTree
107 ) -> tuple[bool, int | None, float | None]:
108 """Find nearest in specific index. Return (reindex, idx, dist) tuple, where
109 reindex indicates if a reindex is needed.
110 """
111
112 # For performance reason, we query only a max of k candidates. In the special case
113 # where all distances are not inf and none are available, we might have more than k
114 # suitable candidate, so we reindex and loop. Otherwise, we check the query results
115 # for availability and not inf and return anything found
116 dists, idxs = index.query((p.real, p.imag), k=50, distance_upper_bound=max_dist)
117 dists = np.array(dists)
118
119 not_inf = ~np.isinf(dists)
120 if np.all(not_inf) and np.all(~self.available[idxs[not_inf]]):
121 return True, None, 0
122
123 candidates = self.available[idxs[not_inf]]
124
125 if np.any(candidates):
126 idx = np.argmax(candidates)
127 return False, idxs[not_inf][idx], dists[not_inf][idx]
128 else:
129 return False, None, 0
130
131 # noinspection PyUnboundLocalVariable
132 def find_nearest(self, p: complex) -> tuple[int, bool]:
133 while True:
134 idx, dist = self._find_nearest_in_index(p, self.index)
135 if self.reverse:
136 ridx, rdist = self._find_nearest_in_index(p, self.rindex)
137
138 if ridx is not None and idx is not None:
139 break
140 elif idx is not None:
141 break
142 self._reindex()
143
144 if self.reverse:
145 if rdist < dist:
146 return ridx, True # type: ignore
147 else:
148 return idx, False
149 else:
150 return idx, False
151
152 def _find_nearest_in_index(self, p: complex, index: KDTree) -> tuple[int | None, float]:
153 """Check the N nearest lines, hopefully find one that is active."""
154
155 dists, idxs = index.query((p.real, p.imag), k=100)
156 for dist, idx in zip(dists, idxs):
157 if ~np.isinf(dist) and self.available[idx]:
158 return idx, dist
159
160 return None, 0.0
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vpype/line_index.py b/vpype/line_index.py
--- a/vpype/line_index.py
+++ b/vpype/line_index.py
@@ -4,14 +4,14 @@
from typing import Iterable
import numpy as np
-from scipy.spatial import cKDTree as KDTree
+from scipy.spatial import KDTree
# REMINDER: anything added here must be added to docs/api.rst
__all__ = ["LineIndex"]
class LineIndex:
- """Wrapper to scipy.spatial.cKDTree to facilitate systematic processing of a line
+ """Wrapper to scipy.spatial.KDTree to facilitate systematic processing of a line
collection.
Implementation note: we use the `available` bool array because deleting stuff from the
| {"golden_diff": "diff --git a/vpype/line_index.py b/vpype/line_index.py\n--- a/vpype/line_index.py\n+++ b/vpype/line_index.py\n@@ -4,14 +4,14 @@\n from typing import Iterable\n \n import numpy as np\n-from scipy.spatial import cKDTree as KDTree\n+from scipy.spatial import KDTree\n \n # REMINDER: anything added here must be added to docs/api.rst\n __all__ = [\"LineIndex\"]\n \n \n class LineIndex:\n- \"\"\"Wrapper to scipy.spatial.cKDTree to facilitate systematic processing of a line\n+ \"\"\"Wrapper to scipy.spatial.KDTree to facilitate systematic processing of a line\n collection.\n \n Implementation note: we use the `available` bool array because deleting stuff from the\n", "issue": "Replace cKDTree by KDTree\nsee note here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.cKDTree.html#scipy.spatial.cKDTree\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nfrom typing import Iterable\n\nimport numpy as np\nfrom scipy.spatial import cKDTree as KDTree\n\n# REMINDER: anything added here must be added to docs/api.rst\n__all__ = [\"LineIndex\"]\n\n\nclass LineIndex:\n \"\"\"Wrapper to scipy.spatial.cKDTree to facilitate systematic processing of a line\n collection.\n\n Implementation note: we use the `available` bool array because deleting stuff from the\n index is costly.\n \"\"\"\n\n def __init__(self, lines: Iterable[np.ndarray], reverse: bool = False):\n self.lines = [line for line in lines if len(line) > 0]\n self.reverse = reverse\n self._make_index()\n\n def _make_index(self) -> None:\n logging.info(f\"LineIndex: creating index for {len(self.lines)} lines\")\n self.available = np.ones(shape=len(self.lines), dtype=bool)\n\n # create rtree index\n self.index = KDTree(\n np.array([(line[0].real, line[0].imag) for line in self.lines]).reshape(-1, 2)\n )\n\n # create reverse index\n if self.reverse:\n self.rindex = KDTree(\n np.array([(line[-1].real, line[-1].imag) for line in self.lines]).reshape(\n -1, 2\n )\n )\n\n def _reindex(self) -> None:\n self.lines = [line for idx, line in enumerate(self.lines) if self.available[idx]]\n self._make_index()\n\n def __len__(self) -> int:\n return np.count_nonzero(self.available)\n\n def __getitem__(self, item):\n return self.lines[item]\n\n def pop_front(self) -> np.ndarray:\n if len(self) == 0:\n raise RuntimeError\n idx = int(np.argmax(self.available))\n self.available[idx] = False\n return self.lines[idx]\n\n def pop(self, idx: int) -> np.ndarray | None:\n if not self.available[idx]:\n return None\n self.available[idx] = False\n return self.lines[idx]\n\n def find_nearest_within(self, p: complex, max_dist: float) -> tuple[int | None, bool]:\n \"\"\"Find the closest line, assuming a maximum admissible distance.\n Returns a tuple of (idx, reverse), where `idx` may be None if nothing is found.\n `reverse` indicates whether or not a line ending has been matched instead of a start.\n False is always returned if index was created with `reverse=False`.s\n \"\"\"\n\n ridx = None\n rdist: float | None = 0.0\n\n while True:\n reindex, idx, dist = self._find_nearest_within_in_index(p, max_dist, self.index)\n if reindex:\n self._reindex()\n continue\n\n if self.reverse:\n reindex, ridx, rdist = self._find_nearest_within_in_index(\n p, max_dist, self.rindex\n )\n if reindex:\n self._reindex()\n continue\n break\n\n if self.reverse:\n if idx is None and ridx is None:\n return None, False\n elif idx is not None and ridx is None:\n return idx, False\n elif idx is None and ridx is not None:\n return ridx, True\n elif rdist < dist: # type: ignore\n return ridx, True\n else:\n return idx, False\n else:\n return idx, False\n\n def _find_nearest_within_in_index(\n self, p: complex, max_dist: float, index: KDTree\n ) -> tuple[bool, int | None, float | None]:\n \"\"\"Find nearest in specific index. Return (reindex, idx, dist) tuple, where\n reindex indicates if a reindex is needed.\n \"\"\"\n\n # For performance reason, we query only a max of k candidates. In the special case\n # where all distances are not inf and none are available, we might have more than k\n # suitable candidate, so we reindex and loop. Otherwise, we check the query results\n # for availability and not inf and return anything found\n dists, idxs = index.query((p.real, p.imag), k=50, distance_upper_bound=max_dist)\n dists = np.array(dists)\n\n not_inf = ~np.isinf(dists)\n if np.all(not_inf) and np.all(~self.available[idxs[not_inf]]):\n return True, None, 0\n\n candidates = self.available[idxs[not_inf]]\n\n if np.any(candidates):\n idx = np.argmax(candidates)\n return False, idxs[not_inf][idx], dists[not_inf][idx]\n else:\n return False, None, 0\n\n # noinspection PyUnboundLocalVariable\n def find_nearest(self, p: complex) -> tuple[int, bool]:\n while True:\n idx, dist = self._find_nearest_in_index(p, self.index)\n if self.reverse:\n ridx, rdist = self._find_nearest_in_index(p, self.rindex)\n\n if ridx is not None and idx is not None:\n break\n elif idx is not None:\n break\n self._reindex()\n\n if self.reverse:\n if rdist < dist:\n return ridx, True # type: ignore\n else:\n return idx, False\n else:\n return idx, False\n\n def _find_nearest_in_index(self, p: complex, index: KDTree) -> tuple[int | None, float]:\n \"\"\"Check the N nearest lines, hopefully find one that is active.\"\"\"\n\n dists, idxs = index.query((p.real, p.imag), k=100)\n for dist, idx in zip(dists, idxs):\n if ~np.isinf(dist) and self.available[idx]:\n return idx, dist\n\n return None, 0.0\n", "path": "vpype/line_index.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\nfrom typing import Iterable\n\nimport numpy as np\nfrom scipy.spatial import KDTree\n\n# REMINDER: anything added here must be added to docs/api.rst\n__all__ = [\"LineIndex\"]\n\n\nclass LineIndex:\n \"\"\"Wrapper to scipy.spatial.KDTree to facilitate systematic processing of a line\n collection.\n\n Implementation note: we use the `available` bool array because deleting stuff from the\n index is costly.\n \"\"\"\n\n def __init__(self, lines: Iterable[np.ndarray], reverse: bool = False):\n self.lines = [line for line in lines if len(line) > 0]\n self.reverse = reverse\n self._make_index()\n\n def _make_index(self) -> None:\n logging.info(f\"LineIndex: creating index for {len(self.lines)} lines\")\n self.available = np.ones(shape=len(self.lines), dtype=bool)\n\n # create rtree index\n self.index = KDTree(\n np.array([(line[0].real, line[0].imag) for line in self.lines]).reshape(-1, 2)\n )\n\n # create reverse index\n if self.reverse:\n self.rindex = KDTree(\n np.array([(line[-1].real, line[-1].imag) for line in self.lines]).reshape(\n -1, 2\n )\n )\n\n def _reindex(self) -> None:\n self.lines = [line for idx, line in enumerate(self.lines) if self.available[idx]]\n self._make_index()\n\n def __len__(self) -> int:\n return np.count_nonzero(self.available)\n\n def __getitem__(self, item):\n return self.lines[item]\n\n def pop_front(self) -> np.ndarray:\n if len(self) == 0:\n raise RuntimeError\n idx = int(np.argmax(self.available))\n self.available[idx] = False\n return self.lines[idx]\n\n def pop(self, idx: int) -> np.ndarray | None:\n if not self.available[idx]:\n return None\n self.available[idx] = False\n return self.lines[idx]\n\n def find_nearest_within(self, p: complex, max_dist: float) -> tuple[int | None, bool]:\n \"\"\"Find the closest line, assuming a maximum admissible distance.\n Returns a tuple of (idx, reverse), where `idx` may be None if nothing is found.\n `reverse` indicates whether or not a line ending has been matched instead of a start.\n False is always returned if index was created with `reverse=False`.s\n \"\"\"\n\n ridx = None\n rdist: float | None = 0.0\n\n while True:\n reindex, idx, dist = self._find_nearest_within_in_index(p, max_dist, self.index)\n if reindex:\n self._reindex()\n continue\n\n if self.reverse:\n reindex, ridx, rdist = self._find_nearest_within_in_index(\n p, max_dist, self.rindex\n )\n if reindex:\n self._reindex()\n continue\n break\n\n if self.reverse:\n if idx is None and ridx is None:\n return None, False\n elif idx is not None and ridx is None:\n return idx, False\n elif idx is None and ridx is not None:\n return ridx, True\n elif rdist < dist: # type: ignore\n return ridx, True\n else:\n return idx, False\n else:\n return idx, False\n\n def _find_nearest_within_in_index(\n self, p: complex, max_dist: float, index: KDTree\n ) -> tuple[bool, int | None, float | None]:\n \"\"\"Find nearest in specific index. Return (reindex, idx, dist) tuple, where\n reindex indicates if a reindex is needed.\n \"\"\"\n\n # For performance reason, we query only a max of k candidates. In the special case\n # where all distances are not inf and none are available, we might have more than k\n # suitable candidate, so we reindex and loop. Otherwise, we check the query results\n # for availability and not inf and return anything found\n dists, idxs = index.query((p.real, p.imag), k=50, distance_upper_bound=max_dist)\n dists = np.array(dists)\n\n not_inf = ~np.isinf(dists)\n if np.all(not_inf) and np.all(~self.available[idxs[not_inf]]):\n return True, None, 0\n\n candidates = self.available[idxs[not_inf]]\n\n if np.any(candidates):\n idx = np.argmax(candidates)\n return False, idxs[not_inf][idx], dists[not_inf][idx]\n else:\n return False, None, 0\n\n # noinspection PyUnboundLocalVariable\n def find_nearest(self, p: complex) -> tuple[int, bool]:\n while True:\n idx, dist = self._find_nearest_in_index(p, self.index)\n if self.reverse:\n ridx, rdist = self._find_nearest_in_index(p, self.rindex)\n\n if ridx is not None and idx is not None:\n break\n elif idx is not None:\n break\n self._reindex()\n\n if self.reverse:\n if rdist < dist:\n return ridx, True # type: ignore\n else:\n return idx, False\n else:\n return idx, False\n\n def _find_nearest_in_index(self, p: complex, index: KDTree) -> tuple[int | None, float]:\n \"\"\"Check the N nearest lines, hopefully find one that is active.\"\"\"\n\n dists, idxs = index.query((p.real, p.imag), k=100)\n for dist, idx in zip(dists, idxs):\n if ~np.isinf(dist) and self.available[idx]:\n return idx, dist\n\n return None, 0.0\n", "path": "vpype/line_index.py"}]} | 1,997 | 168 |
gh_patches_debug_32004 | rasdani/github-patches | git_diff | OCA__bank-payment-18 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
many transactions not regonized
After importing a dutch bank statements file from the ING, I got the following screen:

Many transactions are not processed because of not recognizing the correct bankaccount number.
The system is complaining about "Statements found for unknown account 68xxx" and "Statements found for account 06xxxxxx, but no default journal was defined.
This is a little strange, because I have setup "Default Import Settings for Bank Accounts"
in the settings of the accounting section.
I also put a default journal in there.
My information entered there is the IBAN number of my account, together with the national account (with leading zero). I also tried without the leading zero in the national account, but this doesn't make any difference at all.
Why is openerp mixing up the number of the bankaccount. (with/without zero) and not matching the national number.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `account_banking/res_partner_bank.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 ##############################################################################
3 #
4 # Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
5 # (C) 2011 - 2014 Therp BV (<http://therp.nl>).
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Affero General Public License as
9 # published by the Free Software Foundation, either version 3 of the
10 # License, or (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Affero General Public License for more details.
16 #
17 # You should have received a copy of the GNU Affero General Public License
18 # along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #
20 ##############################################################################
21 from openerp.osv import orm
22 from openerp.addons.account_banking import sepa
23
24
25 class ResPartnerBank(orm.Model):
26 _inherit = 'res.partner.bank'
27
28 def online_account_info(
29 self, cr, uid, country_code, acc_number, context=None):
30 """
31 API hook for legacy online lookup of account info,
32 to be removed in OpenERP 8.0.
33 """
34 return False
35
36 def search(self, cr, uid, args, *rest, **kwargs):
37 """
38 When a complete IBAN is searched, also search for its BBAN
39 if we have the domestic column. Disregard spaces
40 when comparing IBANs.
41 """
42
43 def is_term(arg):
44 '''Flag an arg as term or otherwise'''
45 return isinstance(arg, (list, tuple)) and len(arg) == 3
46
47 def extended_filter_term(term):
48 '''
49 Extend the search criteria in term when appropriate.
50 '''
51 result = [term]
52 extra_terms = []
53 if term[0].lower() == 'acc_number' and term[1] in ('=', '=='):
54 iban = sepa.IBAN(term[2])
55 if iban.valid:
56 # Disregard spaces when comparing IBANs
57 cr.execute(
58 """
59 SELECT id FROM res_partner_bank
60 WHERE replace(acc_number, ' ', '') = %s
61 """, (term[2].replace(' ', ''),))
62 ids = [row[0] for row in cr.fetchall()]
63 result = [('id', 'in', ids)]
64
65 if 'acc_number_domestic' in self._columns:
66 bban = iban.localized_BBAN
67 # Prevent empty search filters
68 if bban:
69 extra_terms.append(
70 ('acc_number_domestic', term[1], bban))
71 for extra_term in extra_terms:
72 result = ['|'] + result + [extra_term]
73 return result
74
75 def extended_search_expression(args):
76 '''
77 Extend the search expression in args when appropriate.
78 The expression itself is in reverse polish notation, so recursion
79 is not needed.
80 '''
81 if not args:
82 return []
83
84 result = []
85 if is_term(args[0]) and len(args) > 1:
86 # Classic filter, implicit '&'
87 result += ['&']
88
89 for arg in args:
90 if is_term(arg):
91 result += extended_filter_term(arg)
92 else:
93 result += arg
94 return result
95
96 # Extend search filter
97 newargs = extended_search_expression(args)
98
99 # Original search
100 return super(ResPartnerBank, self).search(
101 cr, uid, newargs, *rest, **kwargs)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/account_banking/res_partner_bank.py b/account_banking/res_partner_bank.py
--- a/account_banking/res_partner_bank.py
+++ b/account_banking/res_partner_bank.py
@@ -44,6 +44,27 @@
'''Flag an arg as term or otherwise'''
return isinstance(arg, (list, tuple)) and len(arg) == 3
+ def search_domestic(account):
+ """
+ Performing raw sql search for full account numbers,
+ ignoring leading zeroes.
+ """
+ account = (account or '').lstrip('0')
+ if not account:
+ return ('id', '=', 0)
+ query = """
+ SELECT id FROM res_partner_bank
+ WHERE (
+ STATE != 'iban'
+ AND TRIM(LEADING '0' FROM acc_number) = %(account)s)
+ """
+ if 'acc_number_domestic' in self._columns:
+ query += """
+ OR TRIM(LEADING '0' FROM acc_number_domestic)
+ = %(account)s"""
+ cr.execute(query, {'account': account})
+ return ('id', 'in', [row[0] for row in cr.fetchall()])
+
def extended_filter_term(term):
'''
Extend the search criteria in term when appropriate.
@@ -62,12 +83,11 @@
ids = [row[0] for row in cr.fetchall()]
result = [('id', 'in', ids)]
- if 'acc_number_domestic' in self._columns:
- bban = iban.localized_BBAN
- # Prevent empty search filters
- if bban:
- extra_terms.append(
- ('acc_number_domestic', term[1], bban))
+ bban = iban.localized_BBAN
+ if bban:
+ extra_terms.append(search_domestic(bban))
+ else:
+ result = [search_domestic(term[2])]
for extra_term in extra_terms:
result = ['|'] + result + [extra_term]
return result
| {"golden_diff": "diff --git a/account_banking/res_partner_bank.py b/account_banking/res_partner_bank.py\n--- a/account_banking/res_partner_bank.py\n+++ b/account_banking/res_partner_bank.py\n@@ -44,6 +44,27 @@\n '''Flag an arg as term or otherwise'''\n return isinstance(arg, (list, tuple)) and len(arg) == 3\n \n+ def search_domestic(account):\n+ \"\"\"\n+ Performing raw sql search for full account numbers,\n+ ignoring leading zeroes.\n+ \"\"\"\n+ account = (account or '').lstrip('0')\n+ if not account:\n+ return ('id', '=', 0)\n+ query = \"\"\"\n+ SELECT id FROM res_partner_bank\n+ WHERE (\n+ STATE != 'iban'\n+ AND TRIM(LEADING '0' FROM acc_number) = %(account)s)\n+ \"\"\"\n+ if 'acc_number_domestic' in self._columns:\n+ query += \"\"\"\n+ OR TRIM(LEADING '0' FROM acc_number_domestic)\n+ = %(account)s\"\"\"\n+ cr.execute(query, {'account': account})\n+ return ('id', 'in', [row[0] for row in cr.fetchall()])\n+\n def extended_filter_term(term):\n '''\n Extend the search criteria in term when appropriate.\n@@ -62,12 +83,11 @@\n ids = [row[0] for row in cr.fetchall()]\n result = [('id', 'in', ids)]\n \n- if 'acc_number_domestic' in self._columns:\n- bban = iban.localized_BBAN\n- # Prevent empty search filters\n- if bban:\n- extra_terms.append(\n- ('acc_number_domestic', term[1], bban))\n+ bban = iban.localized_BBAN\n+ if bban:\n+ extra_terms.append(search_domestic(bban))\n+ else:\n+ result = [search_domestic(term[2])]\n for extra_term in extra_terms:\n result = ['|'] + result + [extra_term]\n return result\n", "issue": "many transactions not regonized\nAfter importing a dutch bank statements file from the ING, I got the following screen:\n\n\n\nMany transactions are not processed because of not recognizing the correct bankaccount number. \n\nThe system is complaining about \"Statements found for unknown account 68xxx\" and \"Statements found for account 06xxxxxx, but no default journal was defined.\n\nThis is a little strange, because I have setup \"Default Import Settings for Bank Accounts\"\nin the settings of the accounting section.\n\nI also put a default journal in there.\n\nMy information entered there is the IBAN number of my account, together with the national account (with leading zero). I also tried without the leading zero in the national account, but this doesn't make any difference at all.\n\nWhy is openerp mixing up the number of the bankaccount. (with/without zero) and not matching the national number.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).\n# (C) 2011 - 2014 Therp BV (<http://therp.nl>).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\nfrom openerp.osv import orm\nfrom openerp.addons.account_banking import sepa\n\n\nclass ResPartnerBank(orm.Model):\n _inherit = 'res.partner.bank'\n\n def online_account_info(\n self, cr, uid, country_code, acc_number, context=None):\n \"\"\"\n API hook for legacy online lookup of account info,\n to be removed in OpenERP 8.0.\n \"\"\"\n return False\n\n def search(self, cr, uid, args, *rest, **kwargs):\n \"\"\"\n When a complete IBAN is searched, also search for its BBAN\n if we have the domestic column. Disregard spaces\n when comparing IBANs.\n \"\"\"\n\n def is_term(arg):\n '''Flag an arg as term or otherwise'''\n return isinstance(arg, (list, tuple)) and len(arg) == 3\n\n def extended_filter_term(term):\n '''\n Extend the search criteria in term when appropriate.\n '''\n result = [term]\n extra_terms = []\n if term[0].lower() == 'acc_number' and term[1] in ('=', '=='):\n iban = sepa.IBAN(term[2])\n if iban.valid:\n # Disregard spaces when comparing IBANs\n cr.execute(\n \"\"\"\n SELECT id FROM res_partner_bank\n WHERE replace(acc_number, ' ', '') = %s\n \"\"\", (term[2].replace(' ', ''),))\n ids = [row[0] for row in cr.fetchall()]\n result = [('id', 'in', ids)]\n\n if 'acc_number_domestic' in self._columns:\n bban = iban.localized_BBAN\n # Prevent empty search filters\n if bban:\n extra_terms.append(\n ('acc_number_domestic', term[1], bban))\n for extra_term in extra_terms:\n result = ['|'] + result + [extra_term]\n return result\n\n def extended_search_expression(args):\n '''\n Extend the search expression in args when appropriate.\n The expression itself is in reverse polish notation, so recursion\n is not needed.\n '''\n if not args:\n return []\n\n result = []\n if is_term(args[0]) and len(args) > 1:\n # Classic filter, implicit '&'\n result += ['&']\n\n for arg in args:\n if is_term(arg):\n result += extended_filter_term(arg)\n else:\n result += arg\n return result\n\n # Extend search filter\n newargs = extended_search_expression(args)\n\n # Original search\n return super(ResPartnerBank, self).search(\n cr, uid, newargs, *rest, **kwargs)\n", "path": "account_banking/res_partner_bank.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).\n# (C) 2011 - 2014 Therp BV (<http://therp.nl>).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\nfrom openerp.osv import orm\nfrom openerp.addons.account_banking import sepa\n\n\nclass ResPartnerBank(orm.Model):\n _inherit = 'res.partner.bank'\n\n def online_account_info(\n self, cr, uid, country_code, acc_number, context=None):\n \"\"\"\n API hook for legacy online lookup of account info,\n to be removed in OpenERP 8.0.\n \"\"\"\n return False\n\n def search(self, cr, uid, args, *rest, **kwargs):\n \"\"\"\n When a complete IBAN is searched, also search for its BBAN\n if we have the domestic column. Disregard spaces\n when comparing IBANs.\n \"\"\"\n\n def is_term(arg):\n '''Flag an arg as term or otherwise'''\n return isinstance(arg, (list, tuple)) and len(arg) == 3\n\n def search_domestic(account):\n \"\"\"\n Performing raw sql search for full account numbers,\n ignoring leading zeroes.\n \"\"\"\n account = (account or '').lstrip('0')\n if not account:\n return ('id', '=', 0)\n query = \"\"\"\n SELECT id FROM res_partner_bank\n WHERE (\n STATE != 'iban'\n AND TRIM(LEADING '0' FROM acc_number) = %(account)s)\n \"\"\"\n if 'acc_number_domestic' in self._columns:\n query += \"\"\"\n OR TRIM(LEADING '0' FROM acc_number_domestic)\n = %(account)s\"\"\"\n cr.execute(query, {'account': account})\n return ('id', 'in', [row[0] for row in cr.fetchall()])\n\n def extended_filter_term(term):\n '''\n Extend the search criteria in term when appropriate.\n '''\n result = [term]\n extra_terms = []\n if term[0].lower() == 'acc_number' and term[1] in ('=', '=='):\n iban = sepa.IBAN(term[2])\n if iban.valid:\n # Disregard spaces when comparing IBANs\n cr.execute(\n \"\"\"\n SELECT id FROM res_partner_bank\n WHERE replace(acc_number, ' ', '') = %s\n \"\"\", (term[2].replace(' ', ''),))\n ids = [row[0] for row in cr.fetchall()]\n result = [('id', 'in', ids)]\n\n bban = iban.localized_BBAN\n if bban:\n extra_terms.append(search_domestic(bban))\n else:\n result = [search_domestic(term[2])]\n for extra_term in extra_terms:\n result = ['|'] + result + [extra_term]\n return result\n\n def extended_search_expression(args):\n '''\n Extend the search expression in args when appropriate.\n The expression itself is in reverse polish notation, so recursion\n is not needed.\n '''\n if not args:\n return []\n\n result = []\n if is_term(args[0]) and len(args) > 1:\n # Classic filter, implicit '&'\n result += ['&']\n\n for arg in args:\n if is_term(arg):\n result += extended_filter_term(arg)\n else:\n result += arg\n return result\n\n # Extend search filter\n newargs = extended_search_expression(args)\n\n # Original search\n return super(ResPartnerBank, self).search(\n cr, uid, newargs, *rest, **kwargs)\n", "path": "account_banking/res_partner_bank.py"}]} | 1,504 | 452 |
gh_patches_debug_17655 | rasdani/github-patches | git_diff | sktime__sktime-3168 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] changelog generator picks up merges into branches other than `main`
The changelog generator seems to pick up PR that were not merged into `main` but into another branch.
Current example for 0.13.0 release at pre-0.13.0 state: #2941, #2952.
FYI @lmmentel
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `build_tools/changelog.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """RestructuredText changelog generator."""
3
4 import os
5 from collections import defaultdict
6 from typing import Dict, List
7
8 import httpx
9 from dateutil import parser
10
11 HEADERS = {
12 "Accept": "application/vnd.github.v3+json",
13 }
14
15 if os.getenv("GITHUB_TOKEN") is not None:
16 HEADERS["Authorization"] = f"token {os.getenv('GITHUB_TOKEN')}"
17
18 OWNER = "alan-turing-institute"
19 REPO = "sktime"
20 GITHUB_REPOS = "https://api.github.com/repos"
21
22
23 def fetch_merged_pull_requests(page: int = 1) -> List[Dict]: # noqa
24 "Fetch a page of pull requests"
25 params = {
26 "state": "closed",
27 "page": page,
28 "per_page": 50,
29 "sort": "updated",
30 "direction": "desc",
31 }
32 r = httpx.get(
33 f"{GITHUB_REPOS}/{OWNER}/{REPO}/pulls",
34 headers=HEADERS,
35 params=params,
36 )
37 return [pr for pr in r.json() if pr["merged_at"]]
38
39
40 def fetch_latest_release(): # noqa
41 response = httpx.get(
42 f"{GITHUB_REPOS}/{OWNER}/{REPO}/releases/latest", headers=HEADERS
43 )
44
45 if response.status_code == 200:
46 return response.json()
47 else:
48 raise ValueError(response.text, response.status_code)
49
50
51 def fetch_pull_requests_since_last_release() -> List[Dict]: # noqa
52 "Fetch pull requests and filter based on merged date"
53
54 release = fetch_latest_release()
55 published_at = parser.parse(release["published_at"])
56 print( # noqa
57 f"Latest release {release['tag_name']} was published at {published_at}"
58 )
59
60 is_exhausted = False
61 page = 1
62 all_pulls = []
63 while not is_exhausted:
64 pulls = fetch_merged_pull_requests(page=page)
65 all_pulls.extend(
66 [p for p in pulls if parser.parse(p["merged_at"]) > published_at]
67 )
68 is_exhausted = any(parser.parse(p["merged_at"]) < published_at for p in pulls)
69 page += 1
70 return all_pulls
71
72
73 def github_compare_tags(tag_left: str, tag_right: str = "HEAD"): # noqa
74 "Compare commit between two tags"
75 response = httpx.get(
76 f"{GITHUB_REPOS}/{OWNER}/{REPO}/compare/{tag_left}...{tag_right}"
77 )
78 if response.status_code == 200:
79 return response.json()
80 else:
81 raise ValueError(response.text, response.status_code)
82
83
84 def render_contributors(prs: List, fmt: str = "rst"): # noqa
85 "Find unique authors and print a list in given format"
86 authors = sorted({pr["user"]["login"] for pr in prs}, key=lambda x: x.lower())
87
88 header = "Contributors"
89 if fmt == "github":
90 print(f"### {header}") # noqa
91 print(", ".join(f"@{user}" for user in authors)) # noqa
92 elif fmt == "rst":
93 print(header) # noqa
94 print("~" * len(header), end="\n\n") # noqa
95 print(",\n".join(f":user:`{user}`" for user in authors)) # noqa
96
97
98 def assign_prs(prs, categs: List[Dict[str, List[str]]]): # noqa
99 "Assign PR to categories based on labels"
100 assigned = defaultdict(list)
101
102 for i, pr in enumerate(prs):
103 for cat in categs:
104 pr_labels = [label["name"] for label in pr["labels"]]
105 if not set(cat["labels"]).isdisjoint(set(pr_labels)):
106 assigned[cat["title"]].append(i)
107
108 # if any(l.startswith("module") for l in pr_labels):
109 # print(i, pr_labels)
110
111 assigned["Other"] = list(
112 set(range(len(prs))) - {i for _, l in assigned.items() for i in l}
113 )
114
115 return assigned
116
117
118 def render_row(pr): # noqa
119 "Render a single row with PR in restructuredText format"
120 print( # noqa
121 "*",
122 pr["title"].replace("`", "``"),
123 f"(:pr:`{pr['number']}`)",
124 f":user:`{pr['user']['login']}`",
125 )
126
127
128 def render_changelog(prs, assigned): # noqa
129 # sourcery skip: use-named-expression
130 "Render changelog"
131 for title, _ in assigned.items():
132 pr_group = [prs[i] for i in assigned[title]]
133 if pr_group:
134 print(f"\n{title}") # noqa
135 print("~" * len(title), end="\n\n") # noqa
136
137 for pr in sorted(pr_group, key=lambda x: parser.parse(x["merged_at"])):
138 render_row(pr)
139
140
141 if __name__ == "__main__":
142
143 categories = [
144 {"title": "Enhancements", "labels": ["feature", "enhancement"]},
145 {"title": "Fixes", "labels": ["bug", "fix", "bugfix"]},
146 {"title": "Maintenance", "labels": ["maintenance", "chore"]},
147 {"title": "Refactored", "labels": ["refactor"]},
148 {"title": "Documentation", "labels": ["documentation"]},
149 ]
150
151 pulls = fetch_pull_requests_since_last_release()
152 print(f"Found {len(pulls)} merged PRs since last release") # noqa
153 assigned = assign_prs(pulls, categories)
154 render_changelog(pulls, assigned)
155 print() # noqa
156 render_contributors(pulls)
157
158 release = fetch_latest_release()
159 diff = github_compare_tags(release["tag_name"])
160 if diff["total_commits"] != len(pulls):
161 raise ValueError(
162 "Something went wrong and not all PR were fetched. "
163 f'There is {len(pulls)} PRs but {diff["total_commits"]} in the diff'
164 "Please verify that all PRs are included in the changelog."
165 ) # noqa
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/build_tools/changelog.py b/build_tools/changelog.py
--- a/build_tools/changelog.py
+++ b/build_tools/changelog.py
@@ -23,6 +23,7 @@
def fetch_merged_pull_requests(page: int = 1) -> List[Dict]: # noqa
"Fetch a page of pull requests"
params = {
+ "base": "main",
"state": "closed",
"page": page,
"per_page": 50,
@@ -160,6 +161,6 @@
if diff["total_commits"] != len(pulls):
raise ValueError(
"Something went wrong and not all PR were fetched. "
- f'There is {len(pulls)} PRs but {diff["total_commits"]} in the diff'
+ f'There are {len(pulls)} PRs but {diff["total_commits"]} in the diff. '
"Please verify that all PRs are included in the changelog."
) # noqa
| {"golden_diff": "diff --git a/build_tools/changelog.py b/build_tools/changelog.py\n--- a/build_tools/changelog.py\n+++ b/build_tools/changelog.py\n@@ -23,6 +23,7 @@\n def fetch_merged_pull_requests(page: int = 1) -> List[Dict]: # noqa\n \"Fetch a page of pull requests\"\n params = {\n+ \"base\": \"main\",\n \"state\": \"closed\",\n \"page\": page,\n \"per_page\": 50,\n@@ -160,6 +161,6 @@\n if diff[\"total_commits\"] != len(pulls):\n raise ValueError(\n \"Something went wrong and not all PR were fetched. \"\n- f'There is {len(pulls)} PRs but {diff[\"total_commits\"]} in the diff'\n+ f'There are {len(pulls)} PRs but {diff[\"total_commits\"]} in the diff. '\n \"Please verify that all PRs are included in the changelog.\"\n ) # noqa\n", "issue": "[BUG] changelog generator picks up merges into branches other than `main`\nThe changelog generator seems to pick up PR that were not merged into `main` but into another branch.\r\n\r\nCurrent example for 0.13.0 release at pre-0.13.0 state: #2941, #2952.\r\n\r\nFYI @lmmentel \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"RestructuredText changelog generator.\"\"\"\n\nimport os\nfrom collections import defaultdict\nfrom typing import Dict, List\n\nimport httpx\nfrom dateutil import parser\n\nHEADERS = {\n \"Accept\": \"application/vnd.github.v3+json\",\n}\n\nif os.getenv(\"GITHUB_TOKEN\") is not None:\n HEADERS[\"Authorization\"] = f\"token {os.getenv('GITHUB_TOKEN')}\"\n\nOWNER = \"alan-turing-institute\"\nREPO = \"sktime\"\nGITHUB_REPOS = \"https://api.github.com/repos\"\n\n\ndef fetch_merged_pull_requests(page: int = 1) -> List[Dict]: # noqa\n \"Fetch a page of pull requests\"\n params = {\n \"state\": \"closed\",\n \"page\": page,\n \"per_page\": 50,\n \"sort\": \"updated\",\n \"direction\": \"desc\",\n }\n r = httpx.get(\n f\"{GITHUB_REPOS}/{OWNER}/{REPO}/pulls\",\n headers=HEADERS,\n params=params,\n )\n return [pr for pr in r.json() if pr[\"merged_at\"]]\n\n\ndef fetch_latest_release(): # noqa\n response = httpx.get(\n f\"{GITHUB_REPOS}/{OWNER}/{REPO}/releases/latest\", headers=HEADERS\n )\n\n if response.status_code == 200:\n return response.json()\n else:\n raise ValueError(response.text, response.status_code)\n\n\ndef fetch_pull_requests_since_last_release() -> List[Dict]: # noqa\n \"Fetch pull requests and filter based on merged date\"\n\n release = fetch_latest_release()\n published_at = parser.parse(release[\"published_at\"])\n print( # noqa\n f\"Latest release {release['tag_name']} was published at {published_at}\"\n )\n\n is_exhausted = False\n page = 1\n all_pulls = []\n while not is_exhausted:\n pulls = fetch_merged_pull_requests(page=page)\n all_pulls.extend(\n [p for p in pulls if parser.parse(p[\"merged_at\"]) > published_at]\n )\n is_exhausted = any(parser.parse(p[\"merged_at\"]) < published_at for p in pulls)\n page += 1\n return all_pulls\n\n\ndef github_compare_tags(tag_left: str, tag_right: str = \"HEAD\"): # noqa\n \"Compare commit between two tags\"\n response = httpx.get(\n f\"{GITHUB_REPOS}/{OWNER}/{REPO}/compare/{tag_left}...{tag_right}\"\n )\n if response.status_code == 200:\n return response.json()\n else:\n raise ValueError(response.text, response.status_code)\n\n\ndef render_contributors(prs: List, fmt: str = \"rst\"): # noqa\n \"Find unique authors and print a list in given format\"\n authors = sorted({pr[\"user\"][\"login\"] for pr in prs}, key=lambda x: x.lower())\n\n header = \"Contributors\"\n if fmt == \"github\":\n print(f\"### {header}\") # noqa\n print(\", \".join(f\"@{user}\" for user in authors)) # noqa\n elif fmt == \"rst\":\n print(header) # noqa\n print(\"~\" * len(header), end=\"\\n\\n\") # noqa\n print(\",\\n\".join(f\":user:`{user}`\" for user in authors)) # noqa\n\n\ndef assign_prs(prs, categs: List[Dict[str, List[str]]]): # noqa\n \"Assign PR to categories based on labels\"\n assigned = defaultdict(list)\n\n for i, pr in enumerate(prs):\n for cat in categs:\n pr_labels = [label[\"name\"] for label in pr[\"labels\"]]\n if not set(cat[\"labels\"]).isdisjoint(set(pr_labels)):\n assigned[cat[\"title\"]].append(i)\n\n # if any(l.startswith(\"module\") for l in pr_labels):\n # print(i, pr_labels)\n\n assigned[\"Other\"] = list(\n set(range(len(prs))) - {i for _, l in assigned.items() for i in l}\n )\n\n return assigned\n\n\ndef render_row(pr): # noqa\n \"Render a single row with PR in restructuredText format\"\n print( # noqa\n \"*\",\n pr[\"title\"].replace(\"`\", \"``\"),\n f\"(:pr:`{pr['number']}`)\",\n f\":user:`{pr['user']['login']}`\",\n )\n\n\ndef render_changelog(prs, assigned): # noqa\n # sourcery skip: use-named-expression\n \"Render changelog\"\n for title, _ in assigned.items():\n pr_group = [prs[i] for i in assigned[title]]\n if pr_group:\n print(f\"\\n{title}\") # noqa\n print(\"~\" * len(title), end=\"\\n\\n\") # noqa\n\n for pr in sorted(pr_group, key=lambda x: parser.parse(x[\"merged_at\"])):\n render_row(pr)\n\n\nif __name__ == \"__main__\":\n\n categories = [\n {\"title\": \"Enhancements\", \"labels\": [\"feature\", \"enhancement\"]},\n {\"title\": \"Fixes\", \"labels\": [\"bug\", \"fix\", \"bugfix\"]},\n {\"title\": \"Maintenance\", \"labels\": [\"maintenance\", \"chore\"]},\n {\"title\": \"Refactored\", \"labels\": [\"refactor\"]},\n {\"title\": \"Documentation\", \"labels\": [\"documentation\"]},\n ]\n\n pulls = fetch_pull_requests_since_last_release()\n print(f\"Found {len(pulls)} merged PRs since last release\") # noqa\n assigned = assign_prs(pulls, categories)\n render_changelog(pulls, assigned)\n print() # noqa\n render_contributors(pulls)\n\n release = fetch_latest_release()\n diff = github_compare_tags(release[\"tag_name\"])\n if diff[\"total_commits\"] != len(pulls):\n raise ValueError(\n \"Something went wrong and not all PR were fetched. \"\n f'There is {len(pulls)} PRs but {diff[\"total_commits\"]} in the diff'\n \"Please verify that all PRs are included in the changelog.\"\n ) # noqa\n", "path": "build_tools/changelog.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"RestructuredText changelog generator.\"\"\"\n\nimport os\nfrom collections import defaultdict\nfrom typing import Dict, List\n\nimport httpx\nfrom dateutil import parser\n\nHEADERS = {\n \"Accept\": \"application/vnd.github.v3+json\",\n}\n\nif os.getenv(\"GITHUB_TOKEN\") is not None:\n HEADERS[\"Authorization\"] = f\"token {os.getenv('GITHUB_TOKEN')}\"\n\nOWNER = \"alan-turing-institute\"\nREPO = \"sktime\"\nGITHUB_REPOS = \"https://api.github.com/repos\"\n\n\ndef fetch_merged_pull_requests(page: int = 1) -> List[Dict]: # noqa\n \"Fetch a page of pull requests\"\n params = {\n \"base\": \"main\",\n \"state\": \"closed\",\n \"page\": page,\n \"per_page\": 50,\n \"sort\": \"updated\",\n \"direction\": \"desc\",\n }\n r = httpx.get(\n f\"{GITHUB_REPOS}/{OWNER}/{REPO}/pulls\",\n headers=HEADERS,\n params=params,\n )\n return [pr for pr in r.json() if pr[\"merged_at\"]]\n\n\ndef fetch_latest_release(): # noqa\n response = httpx.get(\n f\"{GITHUB_REPOS}/{OWNER}/{REPO}/releases/latest\", headers=HEADERS\n )\n\n if response.status_code == 200:\n return response.json()\n else:\n raise ValueError(response.text, response.status_code)\n\n\ndef fetch_pull_requests_since_last_release() -> List[Dict]: # noqa\n \"Fetch pull requests and filter based on merged date\"\n\n release = fetch_latest_release()\n published_at = parser.parse(release[\"published_at\"])\n print( # noqa\n f\"Latest release {release['tag_name']} was published at {published_at}\"\n )\n\n is_exhausted = False\n page = 1\n all_pulls = []\n while not is_exhausted:\n pulls = fetch_merged_pull_requests(page=page)\n all_pulls.extend(\n [p for p in pulls if parser.parse(p[\"merged_at\"]) > published_at]\n )\n is_exhausted = any(parser.parse(p[\"merged_at\"]) < published_at for p in pulls)\n page += 1\n return all_pulls\n\n\ndef github_compare_tags(tag_left: str, tag_right: str = \"HEAD\"): # noqa\n \"Compare commit between two tags\"\n response = httpx.get(\n f\"{GITHUB_REPOS}/{OWNER}/{REPO}/compare/{tag_left}...{tag_right}\"\n )\n if response.status_code == 200:\n return response.json()\n else:\n raise ValueError(response.text, response.status_code)\n\n\ndef render_contributors(prs: List, fmt: str = \"rst\"): # noqa\n \"Find unique authors and print a list in given format\"\n authors = sorted({pr[\"user\"][\"login\"] for pr in prs}, key=lambda x: x.lower())\n\n header = \"Contributors\"\n if fmt == \"github\":\n print(f\"### {header}\") # noqa\n print(\", \".join(f\"@{user}\" for user in authors)) # noqa\n elif fmt == \"rst\":\n print(header) # noqa\n print(\"~\" * len(header), end=\"\\n\\n\") # noqa\n print(\",\\n\".join(f\":user:`{user}`\" for user in authors)) # noqa\n\n\ndef assign_prs(prs, categs: List[Dict[str, List[str]]]): # noqa\n \"Assign PR to categories based on labels\"\n assigned = defaultdict(list)\n\n for i, pr in enumerate(prs):\n for cat in categs:\n pr_labels = [label[\"name\"] for label in pr[\"labels\"]]\n if not set(cat[\"labels\"]).isdisjoint(set(pr_labels)):\n assigned[cat[\"title\"]].append(i)\n\n # if any(l.startswith(\"module\") for l in pr_labels):\n # print(i, pr_labels)\n\n assigned[\"Other\"] = list(\n set(range(len(prs))) - {i for _, l in assigned.items() for i in l}\n )\n\n return assigned\n\n\ndef render_row(pr): # noqa\n \"Render a single row with PR in restructuredText format\"\n print( # noqa\n \"*\",\n pr[\"title\"].replace(\"`\", \"``\"),\n f\"(:pr:`{pr['number']}`)\",\n f\":user:`{pr['user']['login']}`\",\n )\n\n\ndef render_changelog(prs, assigned): # noqa\n # sourcery skip: use-named-expression\n \"Render changelog\"\n for title, _ in assigned.items():\n pr_group = [prs[i] for i in assigned[title]]\n if pr_group:\n print(f\"\\n{title}\") # noqa\n print(\"~\" * len(title), end=\"\\n\\n\") # noqa\n\n for pr in sorted(pr_group, key=lambda x: parser.parse(x[\"merged_at\"])):\n render_row(pr)\n\n\nif __name__ == \"__main__\":\n\n categories = [\n {\"title\": \"Enhancements\", \"labels\": [\"feature\", \"enhancement\"]},\n {\"title\": \"Fixes\", \"labels\": [\"bug\", \"fix\", \"bugfix\"]},\n {\"title\": \"Maintenance\", \"labels\": [\"maintenance\", \"chore\"]},\n {\"title\": \"Refactored\", \"labels\": [\"refactor\"]},\n {\"title\": \"Documentation\", \"labels\": [\"documentation\"]},\n ]\n\n pulls = fetch_pull_requests_since_last_release()\n print(f\"Found {len(pulls)} merged PRs since last release\") # noqa\n assigned = assign_prs(pulls, categories)\n render_changelog(pulls, assigned)\n print() # noqa\n render_contributors(pulls)\n\n release = fetch_latest_release()\n diff = github_compare_tags(release[\"tag_name\"])\n if diff[\"total_commits\"] != len(pulls):\n raise ValueError(\n \"Something went wrong and not all PR were fetched. \"\n f'There are {len(pulls)} PRs but {diff[\"total_commits\"]} in the diff. '\n \"Please verify that all PRs are included in the changelog.\"\n ) # noqa\n", "path": "build_tools/changelog.py"}]} | 2,109 | 224 |
gh_patches_debug_28894 | rasdani/github-patches | git_diff | openfun__richie-1616 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move course run enroll button to the contact block
## Feature Request
**Is your feature request related to a problem or unsupported use case? Please describe.**
Change the course detail template to be equal than the fun-mooc.
On fun-mooc site it is given much more visibility to the current open for enrollment course run button, because it was customized to be on the contact django block.
I've done exactly the same thing for nau.edu.pt.
So I think we should make this the default case for Richie.
**Describe the solution you'd like**
I've copy paste the fun-mooc `course_detail.html` that change the blocks `contact` and `runs_open`.
**Describe alternatives you've considered**
Add same information bellow the enrollment count, but I think the fun-mooc's version is better.
**Discovery, Documentation, Adoption, Migration Strategy**
If we decide to go further this change, we need to document it on the UPGRADE.md, because the django blocks on the `course_detail.html` could be used to customize the UI.
**Do you want to work on it through a Pull Request?**
If we approve this ideia, I could send a PR.
FUN-MOOC version:

NAU version:

Demo version:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/richie/apps/courses/templatetags/extra_tags.py`
Content:
```
1 """Custom template tags for the courses application of Richie."""
2 import json
3
4 from django import template
5 from django.core.exceptions import ObjectDoesNotExist
6 from django.template.defaultfilters import stringfilter
7 from django.template.loader import render_to_string
8 from django.utils import timezone
9 from django.utils.translation import get_language
10 from django.utils.translation import gettext as _
11 from django.utils.translation import to_locale
12
13 import arrow
14 from classytags.arguments import Argument, MultiValueArgument
15 from classytags.core import Options, Tag
16 from classytags.utils import flatten_context
17 from cms.templatetags.cms_tags import (
18 Placeholder,
19 PlaceholderOptions,
20 _get_page_by_untyped_arg,
21 )
22 from cms.toolbar.utils import get_toolbar_from_request
23 from cms.utils import get_site_id
24 from cms.utils.plugins import get_plugins
25
26 from ..lms import LMSHandler
27
28 # pylint: disable=invalid-name
29 register = template.Library()
30
31
32 # pylint: disable=too-many-arguments
33 def get_plugins_render_tag(
34 context, name, varname, nodelist, page_lookup=None, edit=True
35 ):
36 """
37 Retrieve the placeholder's plugins and set them as a variable in the template context.
38 If the placeholder is empty, render the block as fallback content and return the
39 resulting HTML.
40 If the placeholder is editable and rendered on its own page, the edit script and markup
41 are added to the HTML content.
42 """
43 content = ""
44 request = context.get("request")
45
46 if request:
47
48 context[varname] = []
49 page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))
50
51 if not page:
52 return ""
53
54 try:
55 placeholder = page.placeholders.get(slot=name)
56 except ObjectDoesNotExist:
57 return ""
58 else:
59 context[varname] = [
60 cms_plugin.get_plugin_instance()[0]
61 for cms_plugin in get_plugins(
62 request, placeholder, template=page.get_template()
63 )
64 ]
65
66 # Default content if there is no plugins in the placeholder
67 if not context[varname] and nodelist:
68 content = nodelist.render(context)
69
70 # Add the edit script and markup to the content, only if the placeholder is editable
71 if edit:
72 toolbar = get_toolbar_from_request(request)
73 if toolbar.edit_mode_active:
74 renderer = toolbar.get_content_renderer()
75 data = renderer.get_editable_placeholder_context(placeholder, page=page)
76 data["content"] = content
77 content = renderer.placeholder_edit_template.format(**data)
78
79 return content
80
81
82 @register.tag("placeholder_as_plugins")
83 class PlaceholderAsPlugins(Placeholder):
84 """
85 Like DjangoCMS 'placeholder' but sets the list of linked plugins to a variable name
86 instead of rendering the placeholder.
87 """
88
89 name = "placeholder_as_plugins"
90 options = PlaceholderOptions(
91 Argument("name", resolve=False),
92 "as",
93 Argument("varname", resolve=False),
94 MultiValueArgument("extra_bits", required=False, resolve=False),
95 blocks=[("endplaceholder_as_plugins", "nodelist")],
96 )
97
98 # pylint: disable=arguments-renamed,too-many-arguments
99 def render_tag(self, context, name, varname, extra_bits, nodelist=None):
100 return get_plugins_render_tag(context, name, varname, nodelist)
101
102
103 @register.tag("get_placeholder_plugins")
104 class GetPlaceholderPlugins(Tag):
105 """
106 A template tag that gets plugins from a page's placeholder and sets them as a context variable:
107
108 {% get_placeholder_plugins "logo" page_lookup as varname %}
109 {% get_placeholder_plugins "logo" page_lookup as varname or %}
110 <div>No content</div>
111 {% endget_placeholder_plugins %}
112
113 The page_lookup parameter can be omitted and will default to the current page
114
115 {% get_placeholder_plugins "logo" as varname %}
116 {% get_placeholder_plugins "logo" as varname or %}
117 <div>No content</div>
118 {% endget_placeholder_plugins %}
119
120 This tag can typically be used in association with the block_plugin tag,
121 to render the retrieved plugins:
122
123 {% get_placeholder_plugins "logo" page_lookup as plugins %}
124 {% blockplugin plugins.0 %}
125 <img src="{% thumbnail instance.picture 300x150 %}"/>
126 {% endblockplugin %}
127
128 Keyword arguments:
129 name: the name of the placeholder
130 page_lookup: lookup argument for Page. See `_get_page_by_untyped_arg()`
131 for detailed information on the allowed types and their interpretation for the
132 `page_lookup` argument.
133 varname: context variable name. Output will be added to template context as this variable
134 instead of being returned.
135 or: optional argument which if given will make the template tag a block
136 tag whose content is shown if the placeholder is empty
137 """
138
139 name = "get_placeholder_plugins"
140 options = PlaceholderOptions(
141 Argument("name", resolve=False),
142 Argument("page_lookup", required=False, default=None),
143 "as",
144 Argument("varname", resolve=False),
145 MultiValueArgument("extra_bits", required=False, resolve=False),
146 blocks=[("endget_placeholder_plugins", "nodelist")],
147 )
148
149 # pylint: disable=arguments-differ,too-many-arguments, unused-argument
150 def render_tag(
151 self, context, name, page_lookup, varname, extra_bits, nodelist=None
152 ):
153 return get_plugins_render_tag(
154 context, name, varname, nodelist, page_lookup, edit=False
155 )
156
157
158 @register.tag()
159 class BlockPlugin(Tag):
160 """
161 Like DjangoCMS 'render_plugin_block' but only includes the edit script and markup when
162 the related placeholder is editable.
163
164 This issue was raised to DjangoCMS and we need our own template tag until they find a way
165 to fix it in DjangoCMS (https://github.com/divio/django-cms/issues/6683).
166 """
167
168 name = "blockplugin"
169 template = "cms/toolbar/plugin.html"
170 options = Options(Argument("plugin"), blocks=[("endblockplugin", "nodelist")])
171
172 # pylint: disable=arguments-differ
173 def render_tag(self, context, plugin, nodelist):
174 """
175 Renders the block for the plugin and returns the resulting HTML leaving the temmpate
176 context untouched.
177 If the placeholder is editable, the edit script and markup are added to the rendered HTML.
178 """
179 request = context.get("request")
180 if not plugin or not request:
181 return ""
182
183 # Add the plugin and its rendered content to an internal context
184 internal_context = flatten_context(context)
185 internal_context["instance"] = plugin
186 internal_context["content"] = nodelist.render(context.new(internal_context))
187
188 # Add the edit script and markup to the content, only if the placeholder is editable
189 # and the visited page is the one on which the plugin's placeholder is declared.
190 toolbar = get_toolbar_from_request(request)
191 if plugin.placeholder.page == request.current_page and toolbar.edit_mode_active:
192 return render_to_string(self.template, internal_context)
193
194 return internal_context["content"]
195
196
197 @register.filter()
198 def is_empty_placeholder(page, slot):
199 """A template filter to determine if a placeholder is empty.
200
201 This is useful when we don't want to include any wrapper markup in our template unless
202 the placeholder unless it actually contains plugins.
203 """
204 placeholder = page.placeholders.get(slot=slot)
205 return not placeholder.cmsplugin_set.exists()
206
207
208 @register.filter()
209 def order_by(queryset, args):
210 """A template filter to force ordering on a queryset.
211
212 Taken from: https://djangosnippets.org/snippets/741/
213 This is useful for DjangoCMS page querysets because we don't have access to the view.
214 """
215 args = [x.strip() for x in args.split(",")]
216 return queryset.order_by(*args)
217
218
219 @register.filter()
220 def has_connected_lms(course_run):
221 """
222 Determine if the passed course run has a connected LMS (as determined through out LMSHandler
223 and settings).
224 This enables our templates to either use the <CourseRunEnrollment /> component or a simple
225 link to the course run.
226 """
227 return LMSHandler.select_lms(course_run.resource_link) is not None
228
229
230 @register.simple_tag(takes_context=True)
231 def course_enrollment_widget_props(context):
232 """
233 Return a json dumps which contains all course_run's properties required by
234 CourseEnrollment React widget
235 """
236 course_run = context["run"]
237
238 profile_urls = json.loads(
239 context.get("AUTHENTICATION", {}).get("profile_urls", "{}")
240 )
241 dashboard_link = profile_urls.get("dashboard", {}).get("action")
242
243 starts_in_message = None
244 if course_run.start > timezone.now():
245 course_start = arrow.get(course_run.start)
246 humanized_course_start = course_start.humanize(
247 arrow.now(), locale=to_locale(get_language())
248 )
249 # Translators: delay indicates when the course will start as a duration.
250 # In english the string will be "The course will start in 3 days"
251 starts_in_message = _("The course will start {delay:s}").format(
252 delay=humanized_course_start
253 )
254
255 return json.dumps(
256 {
257 "courseRun": {
258 "id": course_run.id,
259 "resource_link": course_run.resource_link,
260 "priority": course_run.state["priority"],
261 "starts_in_message": starts_in_message,
262 "dashboard_link": dashboard_link,
263 }
264 }
265 )
266
267
268 @register.filter
269 @stringfilter
270 def trim(value):
271 """
272 Remove whitespaces before and after a string.
273 """
274 return value.strip()
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/richie/apps/courses/templatetags/extra_tags.py b/src/richie/apps/courses/templatetags/extra_tags.py
--- a/src/richie/apps/courses/templatetags/extra_tags.py
+++ b/src/richie/apps/courses/templatetags/extra_tags.py
@@ -5,7 +5,7 @@
from django.core.exceptions import ObjectDoesNotExist
from django.template.defaultfilters import stringfilter
from django.template.loader import render_to_string
-from django.utils import timezone
+from django.utils import timezone, translation
from django.utils.translation import get_language
from django.utils.translation import gettext as _
from django.utils.translation import to_locale
@@ -227,6 +227,33 @@
return LMSHandler.select_lms(course_run.resource_link) is not None
[email protected]()
+def visible_on_course_page(course_runs, edit_mode_active=None):
+ """
+ Determine if the passed course run should be visible on the course page, if on edit mode
+ show all the course runs.
+ """
+ if edit_mode_active:
+ return course_runs
+ return list(filter(lambda run: run.is_visible_on_course_page, course_runs))
+
+
[email protected]()
+def sort_runs_by_language_and_start_date(course_runs):
+ """
+ Order course runs by: firstly runs that contains the language of the current user and only
+ after the runs that don't match the current user authenticated language. On both groups, they
+ should be sorted by course start date.
+ """
+ current_language = translation.get_language()
+ return list(
+ sorted(
+ course_runs,
+ key=lambda run: (current_language not in run.languages, run.start),
+ )
+ )
+
+
@register.simple_tag(takes_context=True)
def course_enrollment_widget_props(context):
"""
| {"golden_diff": "diff --git a/src/richie/apps/courses/templatetags/extra_tags.py b/src/richie/apps/courses/templatetags/extra_tags.py\n--- a/src/richie/apps/courses/templatetags/extra_tags.py\n+++ b/src/richie/apps/courses/templatetags/extra_tags.py\n@@ -5,7 +5,7 @@\n from django.core.exceptions import ObjectDoesNotExist\n from django.template.defaultfilters import stringfilter\n from django.template.loader import render_to_string\n-from django.utils import timezone\n+from django.utils import timezone, translation\n from django.utils.translation import get_language\n from django.utils.translation import gettext as _\n from django.utils.translation import to_locale\n@@ -227,6 +227,33 @@\n return LMSHandler.select_lms(course_run.resource_link) is not None\n \n \[email protected]()\n+def visible_on_course_page(course_runs, edit_mode_active=None):\n+ \"\"\"\n+ Determine if the passed course run should be visible on the course page, if on edit mode\n+ show all the course runs.\n+ \"\"\"\n+ if edit_mode_active:\n+ return course_runs\n+ return list(filter(lambda run: run.is_visible_on_course_page, course_runs))\n+\n+\[email protected]()\n+def sort_runs_by_language_and_start_date(course_runs):\n+ \"\"\"\n+ Order course runs by: firstly runs that contains the language of the current user and only\n+ after the runs that don't match the current user authenticated language. On both groups, they\n+ should be sorted by course start date.\n+ \"\"\"\n+ current_language = translation.get_language()\n+ return list(\n+ sorted(\n+ course_runs,\n+ key=lambda run: (current_language not in run.languages, run.start),\n+ )\n+ )\n+\n+\n @register.simple_tag(takes_context=True)\n def course_enrollment_widget_props(context):\n \"\"\"\n", "issue": "Move course run enroll button to the contact block\n## Feature Request\r\n\r\n**Is your feature request related to a problem or unsupported use case? Please describe.**\r\nChange the course detail template to be equal than the fun-mooc.\r\nOn fun-mooc site it is given much more visibility to the current open for enrollment course run button, because it was customized to be on the contact django block.\r\nI've done exactly the same thing for nau.edu.pt.\r\nSo I think we should make this the default case for Richie.\r\n\r\n**Describe the solution you'd like**\r\nI've copy paste the fun-mooc `course_detail.html` that change the blocks `contact` and `runs_open`.\r\n\r\n**Describe alternatives you've considered**\r\nAdd same information bellow the enrollment count, but I think the fun-mooc's version is better. \r\n\r\n**Discovery, Documentation, Adoption, Migration Strategy**\r\nIf we decide to go further this change, we need to document it on the UPGRADE.md, because the django blocks on the `course_detail.html` could be used to customize the UI.\r\n\r\n**Do you want to work on it through a Pull Request?**\r\nIf we approve this ideia, I could send a PR.\r\n\r\nFUN-MOOC version:\r\n\r\n\r\nNAU version:\r\n\r\n\r\nDemo version:\r\n\r\n\n", "before_files": [{"content": "\"\"\"Custom template tags for the courses application of Richie.\"\"\"\nimport json\n\nfrom django import template\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template.defaultfilters import stringfilter\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone\nfrom django.utils.translation import get_language\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import to_locale\n\nimport arrow\nfrom classytags.arguments import Argument, MultiValueArgument\nfrom classytags.core import Options, Tag\nfrom classytags.utils import flatten_context\nfrom cms.templatetags.cms_tags import (\n Placeholder,\n PlaceholderOptions,\n _get_page_by_untyped_arg,\n)\nfrom cms.toolbar.utils import get_toolbar_from_request\nfrom cms.utils import get_site_id\nfrom cms.utils.plugins import get_plugins\n\nfrom ..lms import LMSHandler\n\n# pylint: disable=invalid-name\nregister = template.Library()\n\n\n# pylint: disable=too-many-arguments\ndef get_plugins_render_tag(\n context, name, varname, nodelist, page_lookup=None, edit=True\n):\n \"\"\"\n Retrieve the placeholder's plugins and set them as a variable in the template context.\n If the placeholder is empty, render the block as fallback content and return the\n resulting HTML.\n If the placeholder is editable and rendered on its own page, the edit script and markup\n are added to the HTML content.\n \"\"\"\n content = \"\"\n request = context.get(\"request\")\n\n if request:\n\n context[varname] = []\n page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))\n\n if not page:\n return \"\"\n\n try:\n placeholder = page.placeholders.get(slot=name)\n except ObjectDoesNotExist:\n return \"\"\n else:\n context[varname] = [\n cms_plugin.get_plugin_instance()[0]\n for cms_plugin in get_plugins(\n request, placeholder, template=page.get_template()\n )\n ]\n\n # Default content if there is no plugins in the placeholder\n if not context[varname] and nodelist:\n content = nodelist.render(context)\n\n # Add the edit script and markup to the content, only if the placeholder is editable\n if edit:\n toolbar = get_toolbar_from_request(request)\n if toolbar.edit_mode_active:\n renderer = toolbar.get_content_renderer()\n data = renderer.get_editable_placeholder_context(placeholder, page=page)\n data[\"content\"] = content\n content = renderer.placeholder_edit_template.format(**data)\n\n return content\n\n\[email protected](\"placeholder_as_plugins\")\nclass PlaceholderAsPlugins(Placeholder):\n \"\"\"\n Like DjangoCMS 'placeholder' but sets the list of linked plugins to a variable name\n instead of rendering the placeholder.\n \"\"\"\n\n name = \"placeholder_as_plugins\"\n options = PlaceholderOptions(\n Argument(\"name\", resolve=False),\n \"as\",\n Argument(\"varname\", resolve=False),\n MultiValueArgument(\"extra_bits\", required=False, resolve=False),\n blocks=[(\"endplaceholder_as_plugins\", \"nodelist\")],\n )\n\n # pylint: disable=arguments-renamed,too-many-arguments\n def render_tag(self, context, name, varname, extra_bits, nodelist=None):\n return get_plugins_render_tag(context, name, varname, nodelist)\n\n\[email protected](\"get_placeholder_plugins\")\nclass GetPlaceholderPlugins(Tag):\n \"\"\"\n A template tag that gets plugins from a page's placeholder and sets them as a context variable:\n\n {% get_placeholder_plugins \"logo\" page_lookup as varname %}\n {% get_placeholder_plugins \"logo\" page_lookup as varname or %}\n <div>No content</div>\n {% endget_placeholder_plugins %}\n\n The page_lookup parameter can be omitted and will default to the current page\n\n {% get_placeholder_plugins \"logo\" as varname %}\n {% get_placeholder_plugins \"logo\" as varname or %}\n <div>No content</div>\n {% endget_placeholder_plugins %}\n\n This tag can typically be used in association with the block_plugin tag,\n to render the retrieved plugins:\n\n {% get_placeholder_plugins \"logo\" page_lookup as plugins %}\n {% blockplugin plugins.0 %}\n <img src=\"{% thumbnail instance.picture 300x150 %}\"/>\n {% endblockplugin %}\n\n Keyword arguments:\n name: the name of the placeholder\n page_lookup: lookup argument for Page. See `_get_page_by_untyped_arg()`\n for detailed information on the allowed types and their interpretation for the\n `page_lookup` argument.\n varname: context variable name. Output will be added to template context as this variable\n instead of being returned.\n or: optional argument which if given will make the template tag a block\n tag whose content is shown if the placeholder is empty\n \"\"\"\n\n name = \"get_placeholder_plugins\"\n options = PlaceholderOptions(\n Argument(\"name\", resolve=False),\n Argument(\"page_lookup\", required=False, default=None),\n \"as\",\n Argument(\"varname\", resolve=False),\n MultiValueArgument(\"extra_bits\", required=False, resolve=False),\n blocks=[(\"endget_placeholder_plugins\", \"nodelist\")],\n )\n\n # pylint: disable=arguments-differ,too-many-arguments, unused-argument\n def render_tag(\n self, context, name, page_lookup, varname, extra_bits, nodelist=None\n ):\n return get_plugins_render_tag(\n context, name, varname, nodelist, page_lookup, edit=False\n )\n\n\[email protected]()\nclass BlockPlugin(Tag):\n \"\"\"\n Like DjangoCMS 'render_plugin_block' but only includes the edit script and markup when\n the related placeholder is editable.\n\n This issue was raised to DjangoCMS and we need our own template tag until they find a way\n to fix it in DjangoCMS (https://github.com/divio/django-cms/issues/6683).\n \"\"\"\n\n name = \"blockplugin\"\n template = \"cms/toolbar/plugin.html\"\n options = Options(Argument(\"plugin\"), blocks=[(\"endblockplugin\", \"nodelist\")])\n\n # pylint: disable=arguments-differ\n def render_tag(self, context, plugin, nodelist):\n \"\"\"\n Renders the block for the plugin and returns the resulting HTML leaving the temmpate\n context untouched.\n If the placeholder is editable, the edit script and markup are added to the rendered HTML.\n \"\"\"\n request = context.get(\"request\")\n if not plugin or not request:\n return \"\"\n\n # Add the plugin and its rendered content to an internal context\n internal_context = flatten_context(context)\n internal_context[\"instance\"] = plugin\n internal_context[\"content\"] = nodelist.render(context.new(internal_context))\n\n # Add the edit script and markup to the content, only if the placeholder is editable\n # and the visited page is the one on which the plugin's placeholder is declared.\n toolbar = get_toolbar_from_request(request)\n if plugin.placeholder.page == request.current_page and toolbar.edit_mode_active:\n return render_to_string(self.template, internal_context)\n\n return internal_context[\"content\"]\n\n\[email protected]()\ndef is_empty_placeholder(page, slot):\n \"\"\"A template filter to determine if a placeholder is empty.\n\n This is useful when we don't want to include any wrapper markup in our template unless\n the placeholder unless it actually contains plugins.\n \"\"\"\n placeholder = page.placeholders.get(slot=slot)\n return not placeholder.cmsplugin_set.exists()\n\n\[email protected]()\ndef order_by(queryset, args):\n \"\"\"A template filter to force ordering on a queryset.\n\n Taken from: https://djangosnippets.org/snippets/741/\n This is useful for DjangoCMS page querysets because we don't have access to the view.\n \"\"\"\n args = [x.strip() for x in args.split(\",\")]\n return queryset.order_by(*args)\n\n\[email protected]()\ndef has_connected_lms(course_run):\n \"\"\"\n Determine if the passed course run has a connected LMS (as determined through out LMSHandler\n and settings).\n This enables our templates to either use the <CourseRunEnrollment /> component or a simple\n link to the course run.\n \"\"\"\n return LMSHandler.select_lms(course_run.resource_link) is not None\n\n\[email protected]_tag(takes_context=True)\ndef course_enrollment_widget_props(context):\n \"\"\"\n Return a json dumps which contains all course_run's properties required by\n CourseEnrollment React widget\n \"\"\"\n course_run = context[\"run\"]\n\n profile_urls = json.loads(\n context.get(\"AUTHENTICATION\", {}).get(\"profile_urls\", \"{}\")\n )\n dashboard_link = profile_urls.get(\"dashboard\", {}).get(\"action\")\n\n starts_in_message = None\n if course_run.start > timezone.now():\n course_start = arrow.get(course_run.start)\n humanized_course_start = course_start.humanize(\n arrow.now(), locale=to_locale(get_language())\n )\n # Translators: delay indicates when the course will start as a duration.\n # In english the string will be \"The course will start in 3 days\"\n starts_in_message = _(\"The course will start {delay:s}\").format(\n delay=humanized_course_start\n )\n\n return json.dumps(\n {\n \"courseRun\": {\n \"id\": course_run.id,\n \"resource_link\": course_run.resource_link,\n \"priority\": course_run.state[\"priority\"],\n \"starts_in_message\": starts_in_message,\n \"dashboard_link\": dashboard_link,\n }\n }\n )\n\n\[email protected]\n@stringfilter\ndef trim(value):\n \"\"\"\n Remove whitespaces before and after a string.\n \"\"\"\n return value.strip()\n", "path": "src/richie/apps/courses/templatetags/extra_tags.py"}], "after_files": [{"content": "\"\"\"Custom template tags for the courses application of Richie.\"\"\"\nimport json\n\nfrom django import template\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.template.defaultfilters import stringfilter\nfrom django.template.loader import render_to_string\nfrom django.utils import timezone, translation\nfrom django.utils.translation import get_language\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import to_locale\n\nimport arrow\nfrom classytags.arguments import Argument, MultiValueArgument\nfrom classytags.core import Options, Tag\nfrom classytags.utils import flatten_context\nfrom cms.templatetags.cms_tags import (\n Placeholder,\n PlaceholderOptions,\n _get_page_by_untyped_arg,\n)\nfrom cms.toolbar.utils import get_toolbar_from_request\nfrom cms.utils import get_site_id\nfrom cms.utils.plugins import get_plugins\n\nfrom ..lms import LMSHandler\n\n# pylint: disable=invalid-name\nregister = template.Library()\n\n\n# pylint: disable=too-many-arguments\ndef get_plugins_render_tag(\n context, name, varname, nodelist, page_lookup=None, edit=True\n):\n \"\"\"\n Retrieve the placeholder's plugins and set them as a variable in the template context.\n If the placeholder is empty, render the block as fallback content and return the\n resulting HTML.\n If the placeholder is editable and rendered on its own page, the edit script and markup\n are added to the HTML content.\n \"\"\"\n content = \"\"\n request = context.get(\"request\")\n\n if request:\n\n context[varname] = []\n page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))\n\n if not page:\n return \"\"\n\n try:\n placeholder = page.placeholders.get(slot=name)\n except ObjectDoesNotExist:\n return \"\"\n else:\n context[varname] = [\n cms_plugin.get_plugin_instance()[0]\n for cms_plugin in get_plugins(\n request, placeholder, template=page.get_template()\n )\n ]\n\n # Default content if there is no plugins in the placeholder\n if not context[varname] and nodelist:\n content = nodelist.render(context)\n\n # Add the edit script and markup to the content, only if the placeholder is editable\n if edit:\n toolbar = get_toolbar_from_request(request)\n if toolbar.edit_mode_active:\n renderer = toolbar.get_content_renderer()\n data = renderer.get_editable_placeholder_context(placeholder, page=page)\n data[\"content\"] = content\n content = renderer.placeholder_edit_template.format(**data)\n\n return content\n\n\[email protected](\"placeholder_as_plugins\")\nclass PlaceholderAsPlugins(Placeholder):\n \"\"\"\n Like DjangoCMS 'placeholder' but sets the list of linked plugins to a variable name\n instead of rendering the placeholder.\n \"\"\"\n\n name = \"placeholder_as_plugins\"\n options = PlaceholderOptions(\n Argument(\"name\", resolve=False),\n \"as\",\n Argument(\"varname\", resolve=False),\n MultiValueArgument(\"extra_bits\", required=False, resolve=False),\n blocks=[(\"endplaceholder_as_plugins\", \"nodelist\")],\n )\n\n # pylint: disable=arguments-renamed,too-many-arguments\n def render_tag(self, context, name, varname, extra_bits, nodelist=None):\n return get_plugins_render_tag(context, name, varname, nodelist)\n\n\[email protected](\"get_placeholder_plugins\")\nclass GetPlaceholderPlugins(Tag):\n \"\"\"\n A template tag that gets plugins from a page's placeholder and sets them as a context variable:\n\n {% get_placeholder_plugins \"logo\" page_lookup as varname %}\n {% get_placeholder_plugins \"logo\" page_lookup as varname or %}\n <div>No content</div>\n {% endget_placeholder_plugins %}\n\n The page_lookup parameter can be omitted and will default to the current page\n\n {% get_placeholder_plugins \"logo\" as varname %}\n {% get_placeholder_plugins \"logo\" as varname or %}\n <div>No content</div>\n {% endget_placeholder_plugins %}\n\n This tag can typically be used in association with the block_plugin tag,\n to render the retrieved plugins:\n\n {% get_placeholder_plugins \"logo\" page_lookup as plugins %}\n {% blockplugin plugins.0 %}\n <img src=\"{% thumbnail instance.picture 300x150 %}\"/>\n {% endblockplugin %}\n\n Keyword arguments:\n name: the name of the placeholder\n page_lookup: lookup argument for Page. See `_get_page_by_untyped_arg()`\n for detailed information on the allowed types and their interpretation for the\n `page_lookup` argument.\n varname: context variable name. Output will be added to template context as this variable\n instead of being returned.\n or: optional argument which if given will make the template tag a block\n tag whose content is shown if the placeholder is empty\n \"\"\"\n\n name = \"get_placeholder_plugins\"\n options = PlaceholderOptions(\n Argument(\"name\", resolve=False),\n Argument(\"page_lookup\", required=False, default=None),\n \"as\",\n Argument(\"varname\", resolve=False),\n MultiValueArgument(\"extra_bits\", required=False, resolve=False),\n blocks=[(\"endget_placeholder_plugins\", \"nodelist\")],\n )\n\n # pylint: disable=arguments-differ,too-many-arguments, unused-argument\n def render_tag(\n self, context, name, page_lookup, varname, extra_bits, nodelist=None\n ):\n return get_plugins_render_tag(\n context, name, varname, nodelist, page_lookup, edit=False\n )\n\n\[email protected]()\nclass BlockPlugin(Tag):\n \"\"\"\n Like DjangoCMS 'render_plugin_block' but only includes the edit script and markup when\n the related placeholder is editable.\n\n This issue was raised to DjangoCMS and we need our own template tag until they find a way\n to fix it in DjangoCMS (https://github.com/divio/django-cms/issues/6683).\n \"\"\"\n\n name = \"blockplugin\"\n template = \"cms/toolbar/plugin.html\"\n options = Options(Argument(\"plugin\"), blocks=[(\"endblockplugin\", \"nodelist\")])\n\n # pylint: disable=arguments-differ\n def render_tag(self, context, plugin, nodelist):\n \"\"\"\n Renders the block for the plugin and returns the resulting HTML leaving the temmpate\n context untouched.\n If the placeholder is editable, the edit script and markup are added to the rendered HTML.\n \"\"\"\n request = context.get(\"request\")\n if not plugin or not request:\n return \"\"\n\n # Add the plugin and its rendered content to an internal context\n internal_context = flatten_context(context)\n internal_context[\"instance\"] = plugin\n internal_context[\"content\"] = nodelist.render(context.new(internal_context))\n\n # Add the edit script and markup to the content, only if the placeholder is editable\n # and the visited page is the one on which the plugin's placeholder is declared.\n toolbar = get_toolbar_from_request(request)\n if plugin.placeholder.page == request.current_page and toolbar.edit_mode_active:\n return render_to_string(self.template, internal_context)\n\n return internal_context[\"content\"]\n\n\[email protected]()\ndef is_empty_placeholder(page, slot):\n \"\"\"A template filter to determine if a placeholder is empty.\n\n This is useful when we don't want to include any wrapper markup in our template unless\n the placeholder unless it actually contains plugins.\n \"\"\"\n placeholder = page.placeholders.get(slot=slot)\n return not placeholder.cmsplugin_set.exists()\n\n\[email protected]()\ndef order_by(queryset, args):\n \"\"\"A template filter to force ordering on a queryset.\n\n Taken from: https://djangosnippets.org/snippets/741/\n This is useful for DjangoCMS page querysets because we don't have access to the view.\n \"\"\"\n args = [x.strip() for x in args.split(\",\")]\n return queryset.order_by(*args)\n\n\[email protected]()\ndef has_connected_lms(course_run):\n \"\"\"\n Determine if the passed course run has a connected LMS (as determined through out LMSHandler\n and settings).\n This enables our templates to either use the <CourseRunEnrollment /> component or a simple\n link to the course run.\n \"\"\"\n return LMSHandler.select_lms(course_run.resource_link) is not None\n\n\[email protected]()\ndef visible_on_course_page(course_runs, edit_mode_active=None):\n \"\"\"\n Determine if the passed course run should be visible on the course page, if on edit mode\n show all the course runs.\n \"\"\"\n if edit_mode_active:\n return course_runs\n return list(filter(lambda run: run.is_visible_on_course_page, course_runs))\n\n\[email protected]()\ndef sort_runs_by_language_and_start_date(course_runs):\n \"\"\"\n Order course runs by: firstly runs that contains the language of the current user and only\n after the runs that don't match the current user authenticated language. On both groups, they\n should be sorted by course start date.\n \"\"\"\n current_language = translation.get_language()\n return list(\n sorted(\n course_runs,\n key=lambda run: (current_language not in run.languages, run.start),\n )\n )\n\n\[email protected]_tag(takes_context=True)\ndef course_enrollment_widget_props(context):\n \"\"\"\n Return a json dumps which contains all course_run's properties required by\n CourseEnrollment React widget\n \"\"\"\n course_run = context[\"run\"]\n\n profile_urls = json.loads(\n context.get(\"AUTHENTICATION\", {}).get(\"profile_urls\", \"{}\")\n )\n dashboard_link = profile_urls.get(\"dashboard\", {}).get(\"action\")\n\n starts_in_message = None\n if course_run.start > timezone.now():\n course_start = arrow.get(course_run.start)\n humanized_course_start = course_start.humanize(\n arrow.now(), locale=to_locale(get_language())\n )\n # Translators: delay indicates when the course will start as a duration.\n # In english the string will be \"The course will start in 3 days\"\n starts_in_message = _(\"The course will start {delay:s}\").format(\n delay=humanized_course_start\n )\n\n return json.dumps(\n {\n \"courseRun\": {\n \"id\": course_run.id,\n \"resource_link\": course_run.resource_link,\n \"priority\": course_run.state[\"priority\"],\n \"starts_in_message\": starts_in_message,\n \"dashboard_link\": dashboard_link,\n }\n }\n )\n\n\[email protected]\n@stringfilter\ndef trim(value):\n \"\"\"\n Remove whitespaces before and after a string.\n \"\"\"\n return value.strip()\n", "path": "src/richie/apps/courses/templatetags/extra_tags.py"}]} | 3,502 | 413 |
gh_patches_debug_31740 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2808 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider arby is broken
During the global build at 2021-05-26-14-42-23, spider **arby** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/arby.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/arbys.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4 import scrapy
5 from locations.items import GeojsonPointItem
6
7
8 class ArbysSpider(scrapy.Spider):
9
10 name = "arby"
11 item_attributes = { 'brand': "Arby's", 'brand_wikidata': "Q630866" }
12 allowed_domains = ["locations.arbys.com"]
13 download_delay = 0.2
14 start_urls = (
15 'https://locations.arbys.com/browse/',
16 )
17
18 def get_store_info(self, response):
19 data = response.xpath('//script[@type="application/ld+json"]/text()').extract_first()
20 if data:
21 data = json.loads(data)[0]
22
23 properties = {
24 # store name is after the pipe, e.g. Fast Food Drive-Thru
25 # Restaurants | Arby's 8437
26 'ref': data["name"].rsplit("|", 1)[-1].strip(),
27 'name': data["name"],
28 'addr_full': data["address"]["streetAddress"].strip(),
29 'city': data["address"]["addressLocality"].strip(),
30 'state': data["address"]["addressRegion"],
31 'postcode': data["address"]["postalCode"],
32 'phone': data.get("telephone", None),
33 'lat': float(data["geo"]["latitude"]),
34 'lon': float(data["geo"]["longitude"]),
35 'website': response.url,
36 'opening_hours': data["openingHours"],
37 }
38
39 yield GeojsonPointItem(**properties)
40
41 def parse_store(self, response):
42 city_stores = response.xpath('//a[@class="location-name ga-link"]/@href').extract()
43 for city_store in city_stores:
44 yield scrapy.Request(
45 response.urljoin(city_store),
46 callback=self.get_store_info
47 )
48
49 def parse_state(self, response):
50
51 cities = response.xpath('//a[@class="ga-link"]/@href').extract()
52 for city in cities:
53 yield scrapy.Request(
54 response.urljoin(city),
55 callback=self.parse_store
56 )
57
58 def parse(self, response):
59 states = response.xpath('//a[@class="ga-link"]/@href').extract()
60
61 for state in states:
62 yield scrapy.Request(
63 response.urljoin(state),
64 callback=self.parse_state
65 )
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/arbys.py b/locations/spiders/arbys.py
--- a/locations/spiders/arbys.py
+++ b/locations/spiders/arbys.py
@@ -12,24 +12,31 @@
allowed_domains = ["locations.arbys.com"]
download_delay = 0.2
start_urls = (
- 'https://locations.arbys.com/browse/',
+ 'https://locations.arbys.com/',
)
def get_store_info(self, response):
data = response.xpath('//script[@type="application/ld+json"]/text()').extract_first()
if data:
- data = json.loads(data)[0]
+ try:
+ data = json.loads(data)[0]
+ except json.JSONDecodeError:
+ # Unescaped " on two pages
+ lines = data.split('\n')
+ i = 2 + next(i for (i, line) in enumerate(lines) if "mainContentOfPage" in line)
+ lines[i] = '"text": ""}'
+ data = '\n'.join(lines)
+ data = json.loads(data)[0]
+
properties = {
- # store name is after the pipe, e.g. Fast Food Drive-Thru
- # Restaurants | Arby's 8437
- 'ref': data["name"].rsplit("|", 1)[-1].strip(),
+ 'ref': response.css('div.store-id::text').get().split(': ')[-1],
'name': data["name"],
'addr_full': data["address"]["streetAddress"].strip(),
'city': data["address"]["addressLocality"].strip(),
'state': data["address"]["addressRegion"],
'postcode': data["address"]["postalCode"],
- 'phone': data.get("telephone", None),
+ 'phone': data["address"]["telephone"],
'lat': float(data["geo"]["latitude"]),
'lon': float(data["geo"]["longitude"]),
'website': response.url,
| {"golden_diff": "diff --git a/locations/spiders/arbys.py b/locations/spiders/arbys.py\n--- a/locations/spiders/arbys.py\n+++ b/locations/spiders/arbys.py\n@@ -12,24 +12,31 @@\n allowed_domains = [\"locations.arbys.com\"]\n download_delay = 0.2\n start_urls = (\n- 'https://locations.arbys.com/browse/',\n+ 'https://locations.arbys.com/',\n )\n \n def get_store_info(self, response):\n data = response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n if data:\n- data = json.loads(data)[0]\n+ try:\n+ data = json.loads(data)[0]\n+ except json.JSONDecodeError:\n+ # Unescaped \" on two pages\n+ lines = data.split('\\n')\n+ i = 2 + next(i for (i, line) in enumerate(lines) if \"mainContentOfPage\" in line)\n+ lines[i] = '\"text\": \"\"}'\n+ data = '\\n'.join(lines)\n+ data = json.loads(data)[0]\n+\n \n properties = {\n- # store name is after the pipe, e.g. Fast Food Drive-Thru\n- # Restaurants | Arby's 8437\n- 'ref': data[\"name\"].rsplit(\"|\", 1)[-1].strip(),\n+ 'ref': response.css('div.store-id::text').get().split(': ')[-1],\n 'name': data[\"name\"],\n 'addr_full': data[\"address\"][\"streetAddress\"].strip(),\n 'city': data[\"address\"][\"addressLocality\"].strip(),\n 'state': data[\"address\"][\"addressRegion\"],\n 'postcode': data[\"address\"][\"postalCode\"],\n- 'phone': data.get(\"telephone\", None),\n+ 'phone': data[\"address\"][\"telephone\"],\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n 'website': response.url,\n", "issue": "Spider arby is broken\nDuring the global build at 2021-05-26-14-42-23, spider **arby** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/arby.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/arby.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\n\nclass ArbysSpider(scrapy.Spider):\n\n name = \"arby\"\n item_attributes = { 'brand': \"Arby's\", 'brand_wikidata': \"Q630866\" }\n allowed_domains = [\"locations.arbys.com\"]\n download_delay = 0.2\n start_urls = (\n 'https://locations.arbys.com/browse/',\n )\n\n def get_store_info(self, response):\n data = response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n if data:\n data = json.loads(data)[0]\n\n properties = {\n # store name is after the pipe, e.g. Fast Food Drive-Thru\n # Restaurants | Arby's 8437\n 'ref': data[\"name\"].rsplit(\"|\", 1)[-1].strip(),\n 'name': data[\"name\"],\n 'addr_full': data[\"address\"][\"streetAddress\"].strip(),\n 'city': data[\"address\"][\"addressLocality\"].strip(),\n 'state': data[\"address\"][\"addressRegion\"],\n 'postcode': data[\"address\"][\"postalCode\"],\n 'phone': data.get(\"telephone\", None),\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n 'website': response.url,\n 'opening_hours': data[\"openingHours\"],\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_store(self, response):\n city_stores = response.xpath('//a[@class=\"location-name ga-link\"]/@href').extract()\n for city_store in city_stores:\n yield scrapy.Request(\n response.urljoin(city_store),\n callback=self.get_store_info\n )\n\n def parse_state(self, response):\n\n cities = response.xpath('//a[@class=\"ga-link\"]/@href').extract()\n for city in cities:\n yield scrapy.Request(\n response.urljoin(city),\n callback=self.parse_store\n )\n\n def parse(self, response):\n states = response.xpath('//a[@class=\"ga-link\"]/@href').extract()\n\n for state in states:\n yield scrapy.Request(\n response.urljoin(state),\n callback=self.parse_state\n )\n", "path": "locations/spiders/arbys.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\n\nclass ArbysSpider(scrapy.Spider):\n\n name = \"arby\"\n item_attributes = { 'brand': \"Arby's\", 'brand_wikidata': \"Q630866\" }\n allowed_domains = [\"locations.arbys.com\"]\n download_delay = 0.2\n start_urls = (\n 'https://locations.arbys.com/',\n )\n\n def get_store_info(self, response):\n data = response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first()\n if data:\n try:\n data = json.loads(data)[0]\n except json.JSONDecodeError:\n # Unescaped \" on two pages\n lines = data.split('\\n')\n i = 2 + next(i for (i, line) in enumerate(lines) if \"mainContentOfPage\" in line)\n lines[i] = '\"text\": \"\"}'\n data = '\\n'.join(lines)\n data = json.loads(data)[0]\n\n\n properties = {\n 'ref': response.css('div.store-id::text').get().split(': ')[-1],\n 'name': data[\"name\"],\n 'addr_full': data[\"address\"][\"streetAddress\"].strip(),\n 'city': data[\"address\"][\"addressLocality\"].strip(),\n 'state': data[\"address\"][\"addressRegion\"],\n 'postcode': data[\"address\"][\"postalCode\"],\n 'phone': data[\"address\"][\"telephone\"],\n 'lat': float(data[\"geo\"][\"latitude\"]),\n 'lon': float(data[\"geo\"][\"longitude\"]),\n 'website': response.url,\n 'opening_hours': data[\"openingHours\"],\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_store(self, response):\n city_stores = response.xpath('//a[@class=\"location-name ga-link\"]/@href').extract()\n for city_store in city_stores:\n yield scrapy.Request(\n response.urljoin(city_store),\n callback=self.get_store_info\n )\n\n def parse_state(self, response):\n\n cities = response.xpath('//a[@class=\"ga-link\"]/@href').extract()\n for city in cities:\n yield scrapy.Request(\n response.urljoin(city),\n callback=self.parse_store\n )\n\n def parse(self, response):\n states = response.xpath('//a[@class=\"ga-link\"]/@href').extract()\n\n for state in states:\n yield scrapy.Request(\n response.urljoin(state),\n callback=self.parse_state\n )\n", "path": "locations/spiders/arbys.py"}]} | 1,062 | 446 |
gh_patches_debug_33993 | rasdani/github-patches | git_diff | mozilla__kitsune-3175 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve performance of _get_creator_counts util function
`kitsune.community.utils._get_creator_counts` until function is DB heavy and takes a lot of time to execute. Evaluate its usefulness and provide a way to optimize the query and/or cache the results.
This issue is related to the degraded performance SUMO experienced on Fri March 30th ([NR Error](https://rpm.newrelic.com/accounts/1299394/applications/45097089/downtime/34422892))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kitsune/community/utils.py`
Content:
```
1 import hashlib
2
3 from datetime import datetime, date, timedelta
4 from django.conf import settings
5 from django.core.cache import cache
6 from django.db.models import Count, F
7
8 from kitsune.products.models import Product
9 from kitsune.questions.models import Answer
10 from kitsune.users.models import User
11 from kitsune.users.templatetags.jinja_helpers import profile_avatar
12 from kitsune.wiki.models import Revision
13
14
15 def top_contributors_questions(start=None, end=None, locale=None, product=None,
16 count=10, page=1, use_cache=True):
17 """Get the top Support Forum contributors."""
18 if use_cache:
19 cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)
20 cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()
21 cache_key = 'top_contributors_questions_{}'.format(cache_key)
22 cached = cache.get(cache_key, None)
23 if cached:
24 return cached
25
26 answers = (Answer.objects
27 .exclude(is_spam=True)
28 .exclude(question__is_spam=True)
29 # Adding answer to your own question, isn't a contribution.
30 .exclude(creator_id=F('question__creator_id')))
31
32 if start is None:
33 # By default we go back 90 days.
34 start = date.today() - timedelta(days=90)
35 answers = answers.filter(created__gte=start)
36 if end:
37 # If no end is specified, we don't need to filter by it.
38 answers = answers.filter(created__lt=end)
39 if locale:
40 answers = answers.filter(question__locale=locale)
41 if product:
42 if isinstance(product, Product):
43 product = product.slug
44 answers = answers.filter(question__product__slug=product)
45
46 users = (User.objects
47 .filter(answers__in=answers)
48 .annotate(query_count=Count('answers'))
49 .order_by('-query_count'))
50 counts = _get_creator_counts(users, count, page)
51
52 if use_cache:
53 cache.set(cache_key, counts, 60*15) # 15 minutes
54 return counts
55
56
57 def top_contributors_kb(start=None, end=None, product=None, count=10, page=1, use_cache=True):
58 """Get the top KB editors (locale='en-US')."""
59 return top_contributors_l10n(
60 start, end, settings.WIKI_DEFAULT_LANGUAGE, product, count, use_cache)
61
62
63 def top_contributors_l10n(start=None, end=None, locale=None, product=None,
64 count=10, page=1, use_cache=True):
65 """Get the top l10n contributors for the KB."""
66 if use_cache:
67 cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)
68 cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()
69 cache_key = u'top_contributors_l10n_{}'.format(cache_key)
70 cached = cache.get(cache_key, None)
71 if cached:
72 return cached
73
74 # Get the user ids and contribution count of the top contributors.
75 revisions = Revision.objects.all()
76 if locale is None:
77 # If there is no locale specified, exclude en-US only. The rest are
78 # l10n.
79 revisions = revisions.exclude(document__locale=settings.WIKI_DEFAULT_LANGUAGE)
80 if start is None:
81 # By default we go back 90 days.
82 start = date.today() - timedelta(days=90)
83 revisions = revisions.filter(created__gte=start)
84 if end:
85 # If no end is specified, we don't need to filter by it.
86 revisions = revisions.filter(created__lt=end)
87 if locale:
88 revisions = revisions.filter(document__locale=locale)
89 if product:
90 if isinstance(product, Product):
91 product = product.slug
92 revisions = revisions.filter(document__products__slug=product)
93
94 users = (User.objects
95 .filter(created_revisions__in=revisions)
96 .annotate(query_count=Count('created_revisions'))
97 .order_by('-query_count'))
98 counts = _get_creator_counts(users, count, page)
99
100 if use_cache:
101 cache.set(cache_key, counts, 60*15) # 15 minutes
102 return counts
103
104
105 def top_contributors_aoa(start=None, end=None, locale=None, count=10, page=1, use_cache=True):
106 """Get the top Army of Awesome contributors."""
107 # AoA is deprecated, return 0 until we remove all related code.
108 return ([], 0)
109
110
111 def _get_creator_counts(query, count, page):
112 total = query.count()
113 results = []
114 now = datetime.now()
115 for user in query[((page - 1) * count):(page * count)]:
116 last_contribution_date = user.profile.last_contribution_date
117 days_since_last_activity = None
118 if last_contribution_date:
119 days_since_last_activity = now - last_contribution_date
120
121 data = {
122 'count': user.query_count,
123 'term': user.id,
124 'user': {
125 'id': user.id,
126 'username': user.username,
127 'display_name': user.profile.display_name,
128 'avatar': profile_avatar(user, size=120),
129 'twitter_usernames': user.profile.twitter_usernames,
130 'last_contribution_date': last_contribution_date,
131 'days_since_last_activity': days_since_last_activity,
132 }
133 }
134 results.append(data)
135
136 return (results, total)
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kitsune/community/utils.py b/kitsune/community/utils.py
--- a/kitsune/community/utils.py
+++ b/kitsune/community/utils.py
@@ -7,8 +7,7 @@
from kitsune.products.models import Product
from kitsune.questions.models import Answer
-from kitsune.users.models import User
-from kitsune.users.templatetags.jinja_helpers import profile_avatar
+from kitsune.users.models import User, UserMappingType
from kitsune.wiki.models import Revision
@@ -110,27 +109,36 @@
def _get_creator_counts(query, count, page):
total = query.count()
+
+ start = (page - 1) * count
+ end = page * count
+ query_data = query.values('id', 'query_count')[start:end]
+
+ query_data = {obj['id']: obj['query_count'] for obj in query_data}
+
+ users_data = (UserMappingType.search().filter(id__in=query_data.keys())
+ .values_dict('id', 'username', 'display_name',
+ 'avatar', 'twitter_usernames',
+ 'last_contribution_date')[:count])
+
+ users_data = UserMappingType.reshape(users_data)
+
results = []
now = datetime.now()
- for user in query[((page - 1) * count):(page * count)]:
- last_contribution_date = user.profile.last_contribution_date
- days_since_last_activity = None
- if last_contribution_date:
- days_since_last_activity = now - last_contribution_date
+
+ for u_data in users_data:
+ user_id = u_data.get('id')
+ last_contribution_date = u_data.get('last_contribution_date', None)
+
+ u_data['days_since_last_activity'] = ((now - last_contribution_date).days
+ if last_contribution_date else None)
data = {
- 'count': user.query_count,
- 'term': user.id,
- 'user': {
- 'id': user.id,
- 'username': user.username,
- 'display_name': user.profile.display_name,
- 'avatar': profile_avatar(user, size=120),
- 'twitter_usernames': user.profile.twitter_usernames,
- 'last_contribution_date': last_contribution_date,
- 'days_since_last_activity': days_since_last_activity,
- }
+ 'count': query_data.get(user_id),
+ 'term': user_id,
+ 'user': u_data
}
+
results.append(data)
- return (results, total)
+ return results, total
| {"golden_diff": "diff --git a/kitsune/community/utils.py b/kitsune/community/utils.py\n--- a/kitsune/community/utils.py\n+++ b/kitsune/community/utils.py\n@@ -7,8 +7,7 @@\n \n from kitsune.products.models import Product\n from kitsune.questions.models import Answer\n-from kitsune.users.models import User\n-from kitsune.users.templatetags.jinja_helpers import profile_avatar\n+from kitsune.users.models import User, UserMappingType\n from kitsune.wiki.models import Revision\n \n \n@@ -110,27 +109,36 @@\n \n def _get_creator_counts(query, count, page):\n total = query.count()\n+\n+ start = (page - 1) * count\n+ end = page * count\n+ query_data = query.values('id', 'query_count')[start:end]\n+\n+ query_data = {obj['id']: obj['query_count'] for obj in query_data}\n+\n+ users_data = (UserMappingType.search().filter(id__in=query_data.keys())\n+ .values_dict('id', 'username', 'display_name',\n+ 'avatar', 'twitter_usernames',\n+ 'last_contribution_date')[:count])\n+\n+ users_data = UserMappingType.reshape(users_data)\n+\n results = []\n now = datetime.now()\n- for user in query[((page - 1) * count):(page * count)]:\n- last_contribution_date = user.profile.last_contribution_date\n- days_since_last_activity = None\n- if last_contribution_date:\n- days_since_last_activity = now - last_contribution_date\n+\n+ for u_data in users_data:\n+ user_id = u_data.get('id')\n+ last_contribution_date = u_data.get('last_contribution_date', None)\n+\n+ u_data['days_since_last_activity'] = ((now - last_contribution_date).days\n+ if last_contribution_date else None)\n \n data = {\n- 'count': user.query_count,\n- 'term': user.id,\n- 'user': {\n- 'id': user.id,\n- 'username': user.username,\n- 'display_name': user.profile.display_name,\n- 'avatar': profile_avatar(user, size=120),\n- 'twitter_usernames': user.profile.twitter_usernames,\n- 'last_contribution_date': last_contribution_date,\n- 'days_since_last_activity': days_since_last_activity,\n- }\n+ 'count': query_data.get(user_id),\n+ 'term': user_id,\n+ 'user': u_data\n }\n+\n results.append(data)\n \n- return (results, total)\n+ return results, total\n", "issue": "Improve performance of _get_creator_counts util function\n`kitsune.community.utils._get_creator_counts` until function is DB heavy and takes a lot of time to execute. Evaluate its usefulness and provide a way to optimize the query and/or cache the results. \r\n\r\nThis issue is related to the degraded performance SUMO experienced on Fri March 30th ([NR Error](https://rpm.newrelic.com/accounts/1299394/applications/45097089/downtime/34422892))\n", "before_files": [{"content": "import hashlib\n\nfrom datetime import datetime, date, timedelta\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.db.models import Count, F\n\nfrom kitsune.products.models import Product\nfrom kitsune.questions.models import Answer\nfrom kitsune.users.models import User\nfrom kitsune.users.templatetags.jinja_helpers import profile_avatar\nfrom kitsune.wiki.models import Revision\n\n\ndef top_contributors_questions(start=None, end=None, locale=None, product=None,\n count=10, page=1, use_cache=True):\n \"\"\"Get the top Support Forum contributors.\"\"\"\n if use_cache:\n cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)\n cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()\n cache_key = 'top_contributors_questions_{}'.format(cache_key)\n cached = cache.get(cache_key, None)\n if cached:\n return cached\n\n answers = (Answer.objects\n .exclude(is_spam=True)\n .exclude(question__is_spam=True)\n # Adding answer to your own question, isn't a contribution.\n .exclude(creator_id=F('question__creator_id')))\n\n if start is None:\n # By default we go back 90 days.\n start = date.today() - timedelta(days=90)\n answers = answers.filter(created__gte=start)\n if end:\n # If no end is specified, we don't need to filter by it.\n answers = answers.filter(created__lt=end)\n if locale:\n answers = answers.filter(question__locale=locale)\n if product:\n if isinstance(product, Product):\n product = product.slug\n answers = answers.filter(question__product__slug=product)\n\n users = (User.objects\n .filter(answers__in=answers)\n .annotate(query_count=Count('answers'))\n .order_by('-query_count'))\n counts = _get_creator_counts(users, count, page)\n\n if use_cache:\n cache.set(cache_key, counts, 60*15) # 15 minutes\n return counts\n\n\ndef top_contributors_kb(start=None, end=None, product=None, count=10, page=1, use_cache=True):\n \"\"\"Get the top KB editors (locale='en-US').\"\"\"\n return top_contributors_l10n(\n start, end, settings.WIKI_DEFAULT_LANGUAGE, product, count, use_cache)\n\n\ndef top_contributors_l10n(start=None, end=None, locale=None, product=None,\n count=10, page=1, use_cache=True):\n \"\"\"Get the top l10n contributors for the KB.\"\"\"\n if use_cache:\n cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)\n cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()\n cache_key = u'top_contributors_l10n_{}'.format(cache_key)\n cached = cache.get(cache_key, None)\n if cached:\n return cached\n\n # Get the user ids and contribution count of the top contributors.\n revisions = Revision.objects.all()\n if locale is None:\n # If there is no locale specified, exclude en-US only. The rest are\n # l10n.\n revisions = revisions.exclude(document__locale=settings.WIKI_DEFAULT_LANGUAGE)\n if start is None:\n # By default we go back 90 days.\n start = date.today() - timedelta(days=90)\n revisions = revisions.filter(created__gte=start)\n if end:\n # If no end is specified, we don't need to filter by it.\n revisions = revisions.filter(created__lt=end)\n if locale:\n revisions = revisions.filter(document__locale=locale)\n if product:\n if isinstance(product, Product):\n product = product.slug\n revisions = revisions.filter(document__products__slug=product)\n\n users = (User.objects\n .filter(created_revisions__in=revisions)\n .annotate(query_count=Count('created_revisions'))\n .order_by('-query_count'))\n counts = _get_creator_counts(users, count, page)\n\n if use_cache:\n cache.set(cache_key, counts, 60*15) # 15 minutes\n return counts\n\n\ndef top_contributors_aoa(start=None, end=None, locale=None, count=10, page=1, use_cache=True):\n \"\"\"Get the top Army of Awesome contributors.\"\"\"\n # AoA is deprecated, return 0 until we remove all related code.\n return ([], 0)\n\n\ndef _get_creator_counts(query, count, page):\n total = query.count()\n results = []\n now = datetime.now()\n for user in query[((page - 1) * count):(page * count)]:\n last_contribution_date = user.profile.last_contribution_date\n days_since_last_activity = None\n if last_contribution_date:\n days_since_last_activity = now - last_contribution_date\n\n data = {\n 'count': user.query_count,\n 'term': user.id,\n 'user': {\n 'id': user.id,\n 'username': user.username,\n 'display_name': user.profile.display_name,\n 'avatar': profile_avatar(user, size=120),\n 'twitter_usernames': user.profile.twitter_usernames,\n 'last_contribution_date': last_contribution_date,\n 'days_since_last_activity': days_since_last_activity,\n }\n }\n results.append(data)\n\n return (results, total)\n", "path": "kitsune/community/utils.py"}], "after_files": [{"content": "import hashlib\n\nfrom datetime import datetime, date, timedelta\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.db.models import Count, F\n\nfrom kitsune.products.models import Product\nfrom kitsune.questions.models import Answer\nfrom kitsune.users.models import User, UserMappingType\nfrom kitsune.wiki.models import Revision\n\n\ndef top_contributors_questions(start=None, end=None, locale=None, product=None,\n count=10, page=1, use_cache=True):\n \"\"\"Get the top Support Forum contributors.\"\"\"\n if use_cache:\n cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)\n cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()\n cache_key = 'top_contributors_questions_{}'.format(cache_key)\n cached = cache.get(cache_key, None)\n if cached:\n return cached\n\n answers = (Answer.objects\n .exclude(is_spam=True)\n .exclude(question__is_spam=True)\n # Adding answer to your own question, isn't a contribution.\n .exclude(creator_id=F('question__creator_id')))\n\n if start is None:\n # By default we go back 90 days.\n start = date.today() - timedelta(days=90)\n answers = answers.filter(created__gte=start)\n if end:\n # If no end is specified, we don't need to filter by it.\n answers = answers.filter(created__lt=end)\n if locale:\n answers = answers.filter(question__locale=locale)\n if product:\n if isinstance(product, Product):\n product = product.slug\n answers = answers.filter(question__product__slug=product)\n\n users = (User.objects\n .filter(answers__in=answers)\n .annotate(query_count=Count('answers'))\n .order_by('-query_count'))\n counts = _get_creator_counts(users, count, page)\n\n if use_cache:\n cache.set(cache_key, counts, 60*15) # 15 minutes\n return counts\n\n\ndef top_contributors_kb(start=None, end=None, product=None, count=10, page=1, use_cache=True):\n \"\"\"Get the top KB editors (locale='en-US').\"\"\"\n return top_contributors_l10n(\n start, end, settings.WIKI_DEFAULT_LANGUAGE, product, count, use_cache)\n\n\ndef top_contributors_l10n(start=None, end=None, locale=None, product=None,\n count=10, page=1, use_cache=True):\n \"\"\"Get the top l10n contributors for the KB.\"\"\"\n if use_cache:\n cache_key = u'{}_{}_{}_{}_{}_{}'.format(start, end, locale, product, count, page)\n cache_key = hashlib.sha1(cache_key.encode('utf-8')).hexdigest()\n cache_key = u'top_contributors_l10n_{}'.format(cache_key)\n cached = cache.get(cache_key, None)\n if cached:\n return cached\n\n # Get the user ids and contribution count of the top contributors.\n revisions = Revision.objects.all()\n if locale is None:\n # If there is no locale specified, exclude en-US only. The rest are\n # l10n.\n revisions = revisions.exclude(document__locale=settings.WIKI_DEFAULT_LANGUAGE)\n if start is None:\n # By default we go back 90 days.\n start = date.today() - timedelta(days=90)\n revisions = revisions.filter(created__gte=start)\n if end:\n # If no end is specified, we don't need to filter by it.\n revisions = revisions.filter(created__lt=end)\n if locale:\n revisions = revisions.filter(document__locale=locale)\n if product:\n if isinstance(product, Product):\n product = product.slug\n revisions = revisions.filter(document__products__slug=product)\n\n users = (User.objects\n .filter(created_revisions__in=revisions)\n .annotate(query_count=Count('created_revisions'))\n .order_by('-query_count'))\n counts = _get_creator_counts(users, count, page)\n\n if use_cache:\n cache.set(cache_key, counts, 60*15) # 15 minutes\n return counts\n\n\ndef top_contributors_aoa(start=None, end=None, locale=None, count=10, page=1, use_cache=True):\n \"\"\"Get the top Army of Awesome contributors.\"\"\"\n # AoA is deprecated, return 0 until we remove all related code.\n return ([], 0)\n\n\ndef _get_creator_counts(query, count, page):\n total = query.count()\n\n start = (page - 1) * count\n end = page * count\n query_data = query.values('id', 'query_count')[start:end]\n\n query_data = {obj['id']: obj['query_count'] for obj in query_data}\n\n users_data = (UserMappingType.search().filter(id__in=query_data.keys())\n .values_dict('id', 'username', 'display_name',\n 'avatar', 'twitter_usernames',\n 'last_contribution_date')[:count])\n\n users_data = UserMappingType.reshape(users_data)\n\n results = []\n now = datetime.now()\n\n for u_data in users_data:\n user_id = u_data.get('id')\n last_contribution_date = u_data.get('last_contribution_date', None)\n\n u_data['days_since_last_activity'] = ((now - last_contribution_date).days\n if last_contribution_date else None)\n\n data = {\n 'count': query_data.get(user_id),\n 'term': user_id,\n 'user': u_data\n }\n\n results.append(data)\n\n return results, total\n", "path": "kitsune/community/utils.py"}]} | 1,881 | 584 |
gh_patches_debug_55589 | rasdani/github-patches | git_diff | wagtail__wagtail-8800 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
patternlibrary_override_tags breaks all non-development installations
#8665 added a `patternlibrary_override_tags` tag library to wagtail.admin, which depends on the django-pattern-library package listed in our testing_extras dependencies. However, this tag library will be loaded on all environments that have `wagtail.admin` in their INSTALLED_APPS, so any Wagtail installation that doesn't include testing_extras is currently broken.
As a non-frontend dev, I don't know what this tag library is for, and there's no documentation for me to find out. Deleting it and uninstalling django-pattern-library doesn't break any tests, so it seems to me that it doesn't really belong in the testing dependencies. (If it really is a testing dependency, I'd suggest that moving patternlibrary_override_tags.py into one of the test apps in wagtail/test/ would let it do whatever it's meant to do without breaking non-testing environments.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/admin/templatetags/patternlibrary_override_tags.py`
Content:
```
1 from pattern_library.monkey_utils import override_tag
2
3 from wagtail.admin.templatetags.wagtailadmin_tags import register
4
5 override_tag(register, name="test_page_is_public")
6
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/admin/templatetags/patternlibrary_override_tags.py b/wagtail/admin/templatetags/patternlibrary_override_tags.py
deleted file mode 100644
--- a/wagtail/admin/templatetags/patternlibrary_override_tags.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from pattern_library.monkey_utils import override_tag
-
-from wagtail.admin.templatetags.wagtailadmin_tags import register
-
-override_tag(register, name="test_page_is_public")
| {"golden_diff": "diff --git a/wagtail/admin/templatetags/patternlibrary_override_tags.py b/wagtail/admin/templatetags/patternlibrary_override_tags.py\ndeleted file mode 100644\n--- a/wagtail/admin/templatetags/patternlibrary_override_tags.py\n+++ /dev/null\n@@ -1,5 +0,0 @@\n-from pattern_library.monkey_utils import override_tag\n-\n-from wagtail.admin.templatetags.wagtailadmin_tags import register\n-\n-override_tag(register, name=\"test_page_is_public\")\n", "issue": "patternlibrary_override_tags breaks all non-development installations\n#8665 added a `patternlibrary_override_tags` tag library to wagtail.admin, which depends on the django-pattern-library package listed in our testing_extras dependencies. However, this tag library will be loaded on all environments that have `wagtail.admin` in their INSTALLED_APPS, so any Wagtail installation that doesn't include testing_extras is currently broken.\r\n\r\nAs a non-frontend dev, I don't know what this tag library is for, and there's no documentation for me to find out. Deleting it and uninstalling django-pattern-library doesn't break any tests, so it seems to me that it doesn't really belong in the testing dependencies. (If it really is a testing dependency, I'd suggest that moving patternlibrary_override_tags.py into one of the test apps in wagtail/test/ would let it do whatever it's meant to do without breaking non-testing environments.)\n", "before_files": [{"content": "from pattern_library.monkey_utils import override_tag\n\nfrom wagtail.admin.templatetags.wagtailadmin_tags import register\n\noverride_tag(register, name=\"test_page_is_public\")\n", "path": "wagtail/admin/templatetags/patternlibrary_override_tags.py"}], "after_files": [{"content": null, "path": "wagtail/admin/templatetags/patternlibrary_override_tags.py"}]} | 507 | 121 |
gh_patches_debug_17682 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-3868 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mypy invalid syntax error with python 2
### Which version of dd-trace-py are you using?
v1.2.0
### Which version of pip are you using?
22.1.2
### Which version of the libraries are you using?
```
ddtrace==1.2.0
mypy[python2]==0.961
```
### How can we reproduce your problem?
1. Clone https://gist.github.com/majorgreys/720201c4bdd33131e4c231207ef716ce
2. Run `docker run --rm -it (docker build .)`
### What is the result that you get?
```
usr/local/lib/python3.9/site-packages/ddtrace/contrib/redis/asyncio_patch.py:12: error: invalid syntax
Found 1 error in 1 file (errors prevented further checking)
```
### What is the result that you expected?
```
Success: no issues found in 1 source file
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/redis/patch.py`
Content:
```
1 import redis
2
3 from ddtrace import config
4 from ddtrace.vendor import wrapt
5
6 from ...internal.utils.formats import stringify_cache_args
7 from ...pin import Pin
8 from ..trace_utils import unwrap
9 from .util import _trace_redis_cmd
10 from .util import _trace_redis_execute_pipeline
11
12
13 config._add("redis", dict(_default_service="redis"))
14
15
16 def patch():
17 """Patch the instrumented methods
18
19 This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top
20 of Redis and StrictRedis. However, it means that any "import redis.Redis" won't be instrumented.
21 """
22 if getattr(redis, "_datadog_patch", False):
23 return
24 setattr(redis, "_datadog_patch", True)
25
26 _w = wrapt.wrap_function_wrapper
27
28 if redis.VERSION < (3, 0, 0):
29 _w("redis", "StrictRedis.execute_command", traced_execute_command)
30 _w("redis", "StrictRedis.pipeline", traced_pipeline)
31 _w("redis", "Redis.pipeline", traced_pipeline)
32 _w("redis.client", "BasePipeline.execute", traced_execute_pipeline)
33 _w("redis.client", "BasePipeline.immediate_execute_command", traced_execute_command)
34 else:
35 _w("redis", "Redis.execute_command", traced_execute_command)
36 _w("redis", "Redis.pipeline", traced_pipeline)
37 _w("redis.client", "Pipeline.execute", traced_execute_pipeline)
38 _w("redis.client", "Pipeline.immediate_execute_command", traced_execute_command)
39 if redis.VERSION >= (4, 2, 0):
40 from .asyncio_patch import traced_async_execute_command
41 from .asyncio_patch import traced_async_execute_pipeline
42
43 _w("redis.asyncio.client", "Redis.execute_command", traced_async_execute_command)
44 _w("redis.asyncio.client", "Redis.pipeline", traced_pipeline)
45 _w("redis.asyncio.client", "Pipeline.execute", traced_async_execute_pipeline)
46 _w("redis.asyncio.client", "Pipeline.immediate_execute_command", traced_async_execute_command)
47 Pin(service=None).onto(redis.asyncio.Redis)
48 Pin(service=None).onto(redis.StrictRedis)
49
50
51 def unpatch():
52 if getattr(redis, "_datadog_patch", False):
53 setattr(redis, "_datadog_patch", False)
54
55 if redis.VERSION < (3, 0, 0):
56 unwrap(redis.StrictRedis, "execute_command")
57 unwrap(redis.StrictRedis, "pipeline")
58 unwrap(redis.Redis, "pipeline")
59 unwrap(redis.client.BasePipeline, "execute")
60 unwrap(redis.client.BasePipeline, "immediate_execute_command")
61 else:
62 unwrap(redis.Redis, "execute_command")
63 unwrap(redis.Redis, "pipeline")
64 unwrap(redis.client.Pipeline, "execute")
65 unwrap(redis.client.Pipeline, "immediate_execute_command")
66 if redis.VERSION >= (4, 2, 0):
67 unwrap(redis.asyncio.client.Redis, "execute_command")
68 unwrap(redis.asyncio.client.Redis, "pipeline")
69 unwrap(redis.asyncio.client.Pipeline, "execute")
70 unwrap(redis.asyncio.client.Pipeline, "immediate_execute_command")
71
72
73 #
74 # tracing functions
75 #
76 def traced_execute_command(func, instance, args, kwargs):
77 pin = Pin.get_from(instance)
78 if not pin or not pin.enabled():
79 return func(*args, **kwargs)
80
81 with _trace_redis_cmd(pin, config.redis, instance, args):
82 return func(*args, **kwargs)
83
84
85 def traced_pipeline(func, instance, args, kwargs):
86 pipeline = func(*args, **kwargs)
87 pin = Pin.get_from(instance)
88 if pin:
89 pin.onto(pipeline)
90 return pipeline
91
92
93 def traced_execute_pipeline(func, instance, args, kwargs):
94 pin = Pin.get_from(instance)
95 if not pin or not pin.enabled():
96 return func(*args, **kwargs)
97
98 cmds = [stringify_cache_args(c) for c, _ in instance.command_stack]
99 resource = "\n".join(cmds)
100 with _trace_redis_execute_pipeline(pin, config.redis, resource, instance):
101 return func(*args, **kwargs)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/redis/patch.py b/ddtrace/contrib/redis/patch.py
--- a/ddtrace/contrib/redis/patch.py
+++ b/ddtrace/contrib/redis/patch.py
@@ -1,4 +1,5 @@
import redis
+from six import PY3
from ddtrace import config
from ddtrace.vendor import wrapt
@@ -36,7 +37,8 @@
_w("redis", "Redis.pipeline", traced_pipeline)
_w("redis.client", "Pipeline.execute", traced_execute_pipeline)
_w("redis.client", "Pipeline.immediate_execute_command", traced_execute_command)
- if redis.VERSION >= (4, 2, 0):
+ # Avoid mypy invalid syntax errors when parsing Python 2 files
+ if PY3 and redis.VERSION >= (4, 2, 0):
from .asyncio_patch import traced_async_execute_command
from .asyncio_patch import traced_async_execute_pipeline
| {"golden_diff": "diff --git a/ddtrace/contrib/redis/patch.py b/ddtrace/contrib/redis/patch.py\n--- a/ddtrace/contrib/redis/patch.py\n+++ b/ddtrace/contrib/redis/patch.py\n@@ -1,4 +1,5 @@\n import redis\n+from six import PY3\n \n from ddtrace import config\n from ddtrace.vendor import wrapt\n@@ -36,7 +37,8 @@\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"Pipeline.immediate_execute_command\", traced_execute_command)\n- if redis.VERSION >= (4, 2, 0):\n+ # Avoid mypy invalid syntax errors when parsing Python 2 files\n+ if PY3 and redis.VERSION >= (4, 2, 0):\n from .asyncio_patch import traced_async_execute_command\n from .asyncio_patch import traced_async_execute_pipeline\n", "issue": "mypy invalid syntax error with python 2\n### Which version of dd-trace-py are you using?\r\n\r\nv1.2.0\r\n\r\n### Which version of pip are you using?\r\n\r\n22.1.2\r\n\r\n### Which version of the libraries are you using?\r\n\r\n```\r\nddtrace==1.2.0\r\nmypy[python2]==0.961\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\n1. Clone https://gist.github.com/majorgreys/720201c4bdd33131e4c231207ef716ce\r\n2. Run `docker run --rm -it (docker build .)`\r\n\r\n### What is the result that you get?\r\n\r\n```\r\nusr/local/lib/python3.9/site-packages/ddtrace/contrib/redis/asyncio_patch.py:12: error: invalid syntax\r\nFound 1 error in 1 file (errors prevented further checking)\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\n```\r\nSuccess: no issues found in 1 source file\r\n```\r\n\n", "before_files": [{"content": "import redis\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\nfrom ...internal.utils.formats import stringify_cache_args\nfrom ...pin import Pin\nfrom ..trace_utils import unwrap\nfrom .util import _trace_redis_cmd\nfrom .util import _trace_redis_execute_pipeline\n\n\nconfig._add(\"redis\", dict(_default_service=\"redis\"))\n\n\ndef patch():\n \"\"\"Patch the instrumented methods\n\n This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top\n of Redis and StrictRedis. However, it means that any \"import redis.Redis\" won't be instrumented.\n \"\"\"\n if getattr(redis, \"_datadog_patch\", False):\n return\n setattr(redis, \"_datadog_patch\", True)\n\n _w = wrapt.wrap_function_wrapper\n\n if redis.VERSION < (3, 0, 0):\n _w(\"redis\", \"StrictRedis.execute_command\", traced_execute_command)\n _w(\"redis\", \"StrictRedis.pipeline\", traced_pipeline)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"BasePipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"BasePipeline.immediate_execute_command\", traced_execute_command)\n else:\n _w(\"redis\", \"Redis.execute_command\", traced_execute_command)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"Pipeline.immediate_execute_command\", traced_execute_command)\n if redis.VERSION >= (4, 2, 0):\n from .asyncio_patch import traced_async_execute_command\n from .asyncio_patch import traced_async_execute_pipeline\n\n _w(\"redis.asyncio.client\", \"Redis.execute_command\", traced_async_execute_command)\n _w(\"redis.asyncio.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.execute\", traced_async_execute_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.immediate_execute_command\", traced_async_execute_command)\n Pin(service=None).onto(redis.asyncio.Redis)\n Pin(service=None).onto(redis.StrictRedis)\n\n\ndef unpatch():\n if getattr(redis, \"_datadog_patch\", False):\n setattr(redis, \"_datadog_patch\", False)\n\n if redis.VERSION < (3, 0, 0):\n unwrap(redis.StrictRedis, \"execute_command\")\n unwrap(redis.StrictRedis, \"pipeline\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.BasePipeline, \"execute\")\n unwrap(redis.client.BasePipeline, \"immediate_execute_command\")\n else:\n unwrap(redis.Redis, \"execute_command\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.Pipeline, \"execute\")\n unwrap(redis.client.Pipeline, \"immediate_execute_command\")\n if redis.VERSION >= (4, 2, 0):\n unwrap(redis.asyncio.client.Redis, \"execute_command\")\n unwrap(redis.asyncio.client.Redis, \"pipeline\")\n unwrap(redis.asyncio.client.Pipeline, \"execute\")\n unwrap(redis.asyncio.client.Pipeline, \"immediate_execute_command\")\n\n\n#\n# tracing functions\n#\ndef traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.redis, instance, args):\n return func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n cmds = [stringify_cache_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.redis, resource, instance):\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/redis/patch.py"}], "after_files": [{"content": "import redis\nfrom six import PY3\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\nfrom ...internal.utils.formats import stringify_cache_args\nfrom ...pin import Pin\nfrom ..trace_utils import unwrap\nfrom .util import _trace_redis_cmd\nfrom .util import _trace_redis_execute_pipeline\n\n\nconfig._add(\"redis\", dict(_default_service=\"redis\"))\n\n\ndef patch():\n \"\"\"Patch the instrumented methods\n\n This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top\n of Redis and StrictRedis. However, it means that any \"import redis.Redis\" won't be instrumented.\n \"\"\"\n if getattr(redis, \"_datadog_patch\", False):\n return\n setattr(redis, \"_datadog_patch\", True)\n\n _w = wrapt.wrap_function_wrapper\n\n if redis.VERSION < (3, 0, 0):\n _w(\"redis\", \"StrictRedis.execute_command\", traced_execute_command)\n _w(\"redis\", \"StrictRedis.pipeline\", traced_pipeline)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"BasePipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"BasePipeline.immediate_execute_command\", traced_execute_command)\n else:\n _w(\"redis\", \"Redis.execute_command\", traced_execute_command)\n _w(\"redis\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n _w(\"redis.client\", \"Pipeline.immediate_execute_command\", traced_execute_command)\n # Avoid mypy invalid syntax errors when parsing Python 2 files\n if PY3 and redis.VERSION >= (4, 2, 0):\n from .asyncio_patch import traced_async_execute_command\n from .asyncio_patch import traced_async_execute_pipeline\n\n _w(\"redis.asyncio.client\", \"Redis.execute_command\", traced_async_execute_command)\n _w(\"redis.asyncio.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.execute\", traced_async_execute_pipeline)\n _w(\"redis.asyncio.client\", \"Pipeline.immediate_execute_command\", traced_async_execute_command)\n Pin(service=None).onto(redis.asyncio.Redis)\n Pin(service=None).onto(redis.StrictRedis)\n\n\ndef unpatch():\n if getattr(redis, \"_datadog_patch\", False):\n setattr(redis, \"_datadog_patch\", False)\n\n if redis.VERSION < (3, 0, 0):\n unwrap(redis.StrictRedis, \"execute_command\")\n unwrap(redis.StrictRedis, \"pipeline\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.BasePipeline, \"execute\")\n unwrap(redis.client.BasePipeline, \"immediate_execute_command\")\n else:\n unwrap(redis.Redis, \"execute_command\")\n unwrap(redis.Redis, \"pipeline\")\n unwrap(redis.client.Pipeline, \"execute\")\n unwrap(redis.client.Pipeline, \"immediate_execute_command\")\n if redis.VERSION >= (4, 2, 0):\n unwrap(redis.asyncio.client.Redis, \"execute_command\")\n unwrap(redis.asyncio.client.Redis, \"pipeline\")\n unwrap(redis.asyncio.client.Pipeline, \"execute\")\n unwrap(redis.asyncio.client.Pipeline, \"immediate_execute_command\")\n\n\n#\n# tracing functions\n#\ndef traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.redis, instance, args):\n return func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n cmds = [stringify_cache_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.redis, resource, instance):\n return func(*args, **kwargs)\n", "path": "ddtrace/contrib/redis/patch.py"}]} | 1,578 | 213 |
gh_patches_debug_14674 | rasdani/github-patches | git_diff | beeware__toga-1626 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Android: `paths.app` doesn't work in event handlers
**Describe the bug**
On Android, `paths.app` returns `/` when called outside of the `startup` method, e.g. in an event handler. This is because the `__main__` module has finished importing, and it no longer exists in `sys.modules` under that name.
This doesn't happen on Windows, because it runs the entire main loop within the top level of the `__main__` module. The other desktop platforms are probably the same.
But on Android, the main loop is already running and we have to integrate into it, so importing the `__main__` module does not block. From inspection of the code, it looks like iOS is similar, so it might have the same problem.
The fallback to `/` makes no sense on Android anyway, so it should be removed.
Reported by @kakos-k9 on Discord.
**Test app**
```python
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class Hello(toga.App):
def startup(self):
button = toga.Button("Print", on_press=self.on_press)
self.on_press(button)
self.main_window = toga.MainWindow()
self.main_window.content = toga.Box(
style=Pack(direction=COLUMN),
children=[button]
)
self.main_window.show()
def on_press(self, button):
import traceback
for name in ["app", "data", "toga"]:
print(f"{name}={getattr(self.paths, name)}")
def main():
return Hello()
```
**Environment:**
- Operating System: Android
- Python version: 3.8
- Software versions:
- Toga: 9a292f8e26
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/android/toga_android/paths.py`
Content:
```
1 import sys
2 from pathlib import Path
3
4 import toga
5 from toga import App
6
7
8 class Paths:
9 # Allow instantiating Path object via the factory
10 Path = Path
11
12 @property
13 def __context(self):
14 return App.app._impl.native.getApplicationContext()
15
16 @property
17 def app(self):
18 try:
19 return Path(sys.modules["__main__"].__file__).parent
20 except KeyError:
21 # If we're running in test conditions,
22 # there is no __main__ module.
23 return Path.cwd()
24 except AttributeError:
25 # If we're running at an interactive prompt,
26 # the __main__ module isn't file-based.
27 return Path.cwd()
28
29 @property
30 def data(self):
31 return Path(self.__context.getFilesDir().getPath())
32
33 @property
34 def cache(self):
35 return Path(self.__context.getCacheDir().getPath())
36
37 @property
38 def logs(self):
39 return self.data
40
41 @property
42 def toga(self):
43 """Return a path to a Toga resources
44 """
45 return Path(toga.__file__).parent
46
47
48 paths = Paths()
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/android/toga_android/paths.py b/src/android/toga_android/paths.py
--- a/src/android/toga_android/paths.py
+++ b/src/android/toga_android/paths.py
@@ -13,18 +13,13 @@
def __context(self):
return App.app._impl.native.getApplicationContext()
+ def __init__(self):
+ # On Android, __main__ only exists during app startup, so cache its location now.
+ self._app = Path(sys.modules["__main__"].__file__).parent
+
@property
def app(self):
- try:
- return Path(sys.modules["__main__"].__file__).parent
- except KeyError:
- # If we're running in test conditions,
- # there is no __main__ module.
- return Path.cwd()
- except AttributeError:
- # If we're running at an interactive prompt,
- # the __main__ module isn't file-based.
- return Path.cwd()
+ return self._app
@property
def data(self):
| {"golden_diff": "diff --git a/src/android/toga_android/paths.py b/src/android/toga_android/paths.py\n--- a/src/android/toga_android/paths.py\n+++ b/src/android/toga_android/paths.py\n@@ -13,18 +13,13 @@\n def __context(self):\n return App.app._impl.native.getApplicationContext()\n \n+ def __init__(self):\n+ # On Android, __main__ only exists during app startup, so cache its location now.\n+ self._app = Path(sys.modules[\"__main__\"].__file__).parent\n+\n @property\n def app(self):\n- try:\n- return Path(sys.modules[\"__main__\"].__file__).parent\n- except KeyError:\n- # If we're running in test conditions,\n- # there is no __main__ module.\n- return Path.cwd()\n- except AttributeError:\n- # If we're running at an interactive prompt,\n- # the __main__ module isn't file-based.\n- return Path.cwd()\n+ return self._app\n \n @property\n def data(self):\n", "issue": "Android: `paths.app` doesn't work in event handlers\n**Describe the bug**\r\n\r\nOn Android, `paths.app` returns `/` when called outside of the `startup` method, e.g. in an event handler. This is because the `__main__` module has finished importing, and it no longer exists in `sys.modules` under that name.\r\n\r\nThis doesn't happen on Windows, because it runs the entire main loop within the top level of the `__main__` module. The other desktop platforms are probably the same.\r\n\r\nBut on Android, the main loop is already running and we have to integrate into it, so importing the `__main__` module does not block. From inspection of the code, it looks like iOS is similar, so it might have the same problem.\r\n\r\nThe fallback to `/` makes no sense on Android anyway, so it should be removed.\r\n\r\nReported by @kakos-k9 on Discord.\r\n\r\n**Test app**\r\n```python\r\nimport toga\r\nfrom toga.style import Pack\r\nfrom toga.style.pack import COLUMN, ROW\r\n\r\nclass Hello(toga.App):\r\n\r\n def startup(self):\r\n button = toga.Button(\"Print\", on_press=self.on_press)\r\n self.on_press(button)\r\n\r\n self.main_window = toga.MainWindow()\r\n self.main_window.content = toga.Box(\r\n style=Pack(direction=COLUMN),\r\n children=[button]\r\n )\r\n self.main_window.show()\r\n\r\n def on_press(self, button):\r\n import traceback\r\n for name in [\"app\", \"data\", \"toga\"]:\r\n print(f\"{name}={getattr(self.paths, name)}\")\r\n\r\n\r\ndef main():\r\n return Hello()\r\n```\r\n\r\n**Environment:**\r\n - Operating System: Android\r\n - Python version: 3.8\r\n - Software versions:\r\n - Toga: 9a292f8e26\r\n\n", "before_files": [{"content": "import sys\nfrom pathlib import Path\n\nimport toga\nfrom toga import App\n\n\nclass Paths:\n # Allow instantiating Path object via the factory\n Path = Path\n\n @property\n def __context(self):\n return App.app._impl.native.getApplicationContext()\n\n @property\n def app(self):\n try:\n return Path(sys.modules[\"__main__\"].__file__).parent\n except KeyError:\n # If we're running in test conditions,\n # there is no __main__ module.\n return Path.cwd()\n except AttributeError:\n # If we're running at an interactive prompt,\n # the __main__ module isn't file-based.\n return Path.cwd()\n\n @property\n def data(self):\n return Path(self.__context.getFilesDir().getPath())\n\n @property\n def cache(self):\n return Path(self.__context.getCacheDir().getPath())\n\n @property\n def logs(self):\n return self.data\n\n @property\n def toga(self):\n \"\"\"Return a path to a Toga resources\n \"\"\"\n return Path(toga.__file__).parent\n\n\npaths = Paths()\n", "path": "src/android/toga_android/paths.py"}], "after_files": [{"content": "import sys\nfrom pathlib import Path\n\nimport toga\nfrom toga import App\n\n\nclass Paths:\n # Allow instantiating Path object via the factory\n Path = Path\n\n @property\n def __context(self):\n return App.app._impl.native.getApplicationContext()\n\n def __init__(self):\n # On Android, __main__ only exists during app startup, so cache its location now.\n self._app = Path(sys.modules[\"__main__\"].__file__).parent\n\n @property\n def app(self):\n return self._app\n\n @property\n def data(self):\n return Path(self.__context.getFilesDir().getPath())\n\n @property\n def cache(self):\n return Path(self.__context.getCacheDir().getPath())\n\n @property\n def logs(self):\n return self.data\n\n @property\n def toga(self):\n \"\"\"Return a path to a Toga resources\n \"\"\"\n return Path(toga.__file__).parent\n\n\npaths = Paths()\n", "path": "src/android/toga_android/paths.py"}]} | 986 | 238 |
gh_patches_debug_43832 | rasdani/github-patches | git_diff | weecology__retriever-401 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reserved keyword 'order' not being properly replaced
`clean_column_name()` in `table.py` should replace the column name `order` with `sporder`. It does not appear to be doing this in #286.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/table.py`
Content:
```
1 from retriever.lib.cleanup import *
2 import csv
3 import StringIO
4
5
6 class Table:
7 """Information about a database table."""
8 def __init__(self, name, **kwargs):
9 self.name = name
10 self.pk = True
11 self.contains_pk = False
12 self.delimiter = None
13 self.header_rows = 1
14 self.column_names_row = 1
15 self.fixed_width = False
16 self.cleanup = Cleanup()
17 self.record_id = 0
18 self.columns = []
19 self.replace_columns = []
20 self.escape_single_quotes=True
21 self.escape_double_quotes=True
22 for key, item in kwargs.items():
23 setattr(self, key, item[0] if isinstance(item, tuple) else item)
24
25 def auto_get_columns(self, header):
26 """Gets the column names from the header row
27
28 Identifies the column names from the header row.
29 Replaces database keywords with alternatives.
30 Replaces special characters and spaces.
31
32 """
33 if self.fixed_width:
34 column_names = self.extract_values(header)
35 else:
36 # Get column names from header row
37 values = self.split_on_delimiter(header)
38 column_names = [name.strip() for name in values]
39
40 columns = map(lambda x: self.clean_column_name(x), column_names)
41 column_values = {x:[] for x in columns if x}
42
43 return [[x, None] for x in columns if x], column_values
44
45 def clean_column_name(self, column_name):
46 '''Makes sure a column name is formatted correctly by removing reserved
47 words, symbols, numbers, etc.'''
48 column_name = column_name.lower()
49 replace_columns = {old.lower(): new.lower()
50 for old, new in self.replace_columns}
51 column_name = replace_columns.get(column_name, column_name)
52 replace = [
53 ("%", "percent"),
54 ("&", "and"),
55 ("\xb0", "degrees"),
56 ("group", "grp"),
57 ("order", "sporder"),
58 ("check", "checked"),
59 ("references", "refs"),
60 ("long", "lon"),
61 ("date", "record_date"),
62 ("?", ""),
63 ]
64 replace += [(x, '') for x in (")", "\n", "\r", '"', "'")]
65 replace += [(x, '_') for x in (" ", "(", "/", ".", "-")]
66 column_name = reduce(lambda x, y: x.replace(*y), replace, column_name)
67
68 while "__" in column_name:
69 column_name = column_name.replace("__", "_")
70 column_name = column_name.lstrip("0123456789_").rstrip("_")
71
72 return column_name
73
74 def split_on_delimiter(self, line):
75 dialect = csv.excel
76 dialect.escapechar = "\\"
77 r = csv.reader([line], dialect=dialect, delimiter=self.delimiter)
78 return r.next()
79
80 def combine_on_delimiter(self, line_as_list):
81 """Combine a list of values into a line of csv data"""
82 dialect = csv.excel
83 dialect.escapechar = "\\"
84 writer_file = StringIO.StringIO()
85 writer = csv.writer(writer_file, dialect=dialect, delimiter=self.delimiter)
86 writer.writerow(line_as_list)
87 return writer_file.getvalue()
88
89 def values_from_line(self, line):
90 linevalues = []
91 if (self.pk and self.contains_pk == False):
92 column = 0
93 else:
94 column = -1
95
96 for value in self.extract_values(line):
97 column += 1
98 try:
99 this_column = self.columns[column][1][0]
100
101 # If data type is "skip" ignore the value
102 if this_column == "skip":
103 pass
104 elif this_column == "combine":
105 # If "combine" append value to end of previous column
106 linevalues[-1] += " " + value
107 else:
108 # Otherwise, add new value
109 linevalues.append(value)
110 except:
111 # too many values for columns; ignore
112 pass
113
114 return linevalues
115
116 def extract_values(self, line):
117 """Given a line of data, this function returns a list of the individual
118 data values."""
119 if self.fixed_width:
120 pos = 0
121 values = []
122 for width in self.fixed_width:
123 values.append(line[pos:pos+width].strip())
124 pos += width
125 return values
126 else:
127 return self.split_on_delimiter(line)
128
129 def get_insert_columns(self, join=True):
130 """Gets a set of column names for insert statements."""
131 columns = ""
132 for item in self.columns:
133 thistype = item[1][0]
134 if ((thistype != "skip") and (thistype !="combine") and
135 (self.contains_pk == True or thistype[0:3] != "pk-")):
136 columns += item[0] + ", "
137 columns = columns.rstrip(', ')
138 if join:
139 return columns
140 else:
141 return columns.lstrip("(").rstrip(")").split(", ")
142
143 def get_column_datatypes(self):
144 """Gets a set of column names for insert statements."""
145 columns = []
146 for item in self.get_insert_columns(False):
147 for column in self.columns:
148 if item == column[0]:
149 columns.append(column[1][0])
150 return columns
151
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/table.py b/lib/table.py
--- a/lib/table.py
+++ b/lib/table.py
@@ -1,7 +1,8 @@
-from retriever.lib.cleanup import *
import csv
import StringIO
+from retriever.lib.cleanup import *
+
class Table:
"""Information about a database table."""
@@ -19,6 +20,7 @@
self.replace_columns = []
self.escape_single_quotes=True
self.escape_double_quotes=True
+ self.cleaned_columns = False
for key, item in kwargs.items():
setattr(self, key, item[0] if isinstance(item, tuple) else item)
@@ -39,36 +41,53 @@
columns = map(lambda x: self.clean_column_name(x), column_names)
column_values = {x:[] for x in columns if x}
-
+ self.cleaned_columns = True
return [[x, None] for x in columns if x], column_values
def clean_column_name(self, column_name):
- '''Makes sure a column name is formatted correctly by removing reserved
- words, symbols, numbers, etc.'''
- column_name = column_name.lower()
+ """Clean column names using the expected sql guidelines
+
+ remove leading whitespaces, replace sql key words, etc..
+ """
+ column_name = column_name.lower().strip()
replace_columns = {old.lower(): new.lower()
for old, new in self.replace_columns}
- column_name = replace_columns.get(column_name, column_name)
+
+ column_name = replace_columns.get(column_name, column_name).strip()
replace = [
- ("%", "percent"),
- ("&", "and"),
- ("\xb0", "degrees"),
- ("group", "grp"),
- ("order", "sporder"),
- ("check", "checked"),
- ("references", "refs"),
- ("long", "lon"),
- ("date", "record_date"),
- ("?", ""),
- ]
+ ("%", "percent"),
+ ("&", "and"),
+ ("\xb0", "degrees"),
+ ("?", ""),
+ ]
replace += [(x, '') for x in (")", "\n", "\r", '"', "'")]
- replace += [(x, '_') for x in (" ", "(", "/", ".", "-")]
+ replace += [(x, '_') for x in (" ", "(", "/", ".", "-", "*", ":")]
column_name = reduce(lambda x, y: x.replace(*y), replace, column_name)
-
while "__" in column_name:
column_name = column_name.replace("__", "_")
column_name = column_name.lstrip("0123456789_").rstrip("_")
-
+ replace_dict = {
+ "group": "grp",
+ "order": "ordered",
+ "check": "checked",
+ "references": "refs",
+ "long": "lon",
+ "column": "columns",
+ "cursor": "cursors",
+ "delete": "deleted",
+ "insert": "inserted",
+ "join": "joins",
+ "select": "selects",
+ "table": "tables",
+ "update": "updates",
+ "date": "record_date"
+ }
+ for x in (")", "\n", "\r", '"', "'"):
+ replace_dict[x] = ''
+ for x in (" ", "(", "/", ".", "-"):
+ replace_dict[x] = '_'
+ if column_name in replace_dict:
+ column_name = replace_dict[column_name]
return column_name
def split_on_delimiter(self, line):
@@ -129,6 +148,11 @@
def get_insert_columns(self, join=True):
"""Gets a set of column names for insert statements."""
columns = ""
+ if not self.cleaned_columns:
+ column_names = list(self.columns)
+ self.columns[:] = []
+ self.columns = [(self.clean_column_name(name[0]), name[1]) for name in column_names]
+ self.cleaned_columns = True
for item in self.columns:
thistype = item[1][0]
if ((thistype != "skip") and (thistype !="combine") and
@@ -148,4 +172,3 @@
if item == column[0]:
columns.append(column[1][0])
return columns
-
| {"golden_diff": "diff --git a/lib/table.py b/lib/table.py\n--- a/lib/table.py\n+++ b/lib/table.py\n@@ -1,7 +1,8 @@\n-from retriever.lib.cleanup import *\n import csv\n import StringIO\n \n+from retriever.lib.cleanup import *\n+\n \n class Table:\n \"\"\"Information about a database table.\"\"\"\n@@ -19,6 +20,7 @@\n self.replace_columns = []\n self.escape_single_quotes=True\n self.escape_double_quotes=True\n+ self.cleaned_columns = False\n for key, item in kwargs.items():\n setattr(self, key, item[0] if isinstance(item, tuple) else item)\n \n@@ -39,36 +41,53 @@\n \n columns = map(lambda x: self.clean_column_name(x), column_names)\n column_values = {x:[] for x in columns if x}\n-\n+ self.cleaned_columns = True\n return [[x, None] for x in columns if x], column_values\n \n def clean_column_name(self, column_name):\n- '''Makes sure a column name is formatted correctly by removing reserved\n- words, symbols, numbers, etc.'''\n- column_name = column_name.lower()\n+ \"\"\"Clean column names using the expected sql guidelines\n+\n+ remove leading whitespaces, replace sql key words, etc..\n+ \"\"\"\n+ column_name = column_name.lower().strip()\n replace_columns = {old.lower(): new.lower()\n for old, new in self.replace_columns}\n- column_name = replace_columns.get(column_name, column_name)\n+\n+ column_name = replace_columns.get(column_name, column_name).strip()\n replace = [\n- (\"%\", \"percent\"),\n- (\"&\", \"and\"),\n- (\"\\xb0\", \"degrees\"),\n- (\"group\", \"grp\"),\n- (\"order\", \"sporder\"),\n- (\"check\", \"checked\"),\n- (\"references\", \"refs\"),\n- (\"long\", \"lon\"),\n- (\"date\", \"record_date\"),\n- (\"?\", \"\"),\n- ]\n+ (\"%\", \"percent\"),\n+ (\"&\", \"and\"),\n+ (\"\\xb0\", \"degrees\"),\n+ (\"?\", \"\"),\n+ ]\n replace += [(x, '') for x in (\")\", \"\\n\", \"\\r\", '\"', \"'\")]\n- replace += [(x, '_') for x in (\" \", \"(\", \"/\", \".\", \"-\")]\n+ replace += [(x, '_') for x in (\" \", \"(\", \"/\", \".\", \"-\", \"*\", \":\")]\n column_name = reduce(lambda x, y: x.replace(*y), replace, column_name)\n-\n while \"__\" in column_name:\n column_name = column_name.replace(\"__\", \"_\")\n column_name = column_name.lstrip(\"0123456789_\").rstrip(\"_\")\n-\n+ replace_dict = {\n+ \"group\": \"grp\",\n+ \"order\": \"ordered\",\n+ \"check\": \"checked\",\n+ \"references\": \"refs\",\n+ \"long\": \"lon\",\n+ \"column\": \"columns\",\n+ \"cursor\": \"cursors\",\n+ \"delete\": \"deleted\",\n+ \"insert\": \"inserted\",\n+ \"join\": \"joins\",\n+ \"select\": \"selects\",\n+ \"table\": \"tables\",\n+ \"update\": \"updates\",\n+ \"date\": \"record_date\"\n+ }\n+ for x in (\")\", \"\\n\", \"\\r\", '\"', \"'\"):\n+ replace_dict[x] = ''\n+ for x in (\" \", \"(\", \"/\", \".\", \"-\"):\n+ replace_dict[x] = '_'\n+ if column_name in replace_dict:\n+ column_name = replace_dict[column_name]\n return column_name\n \n def split_on_delimiter(self, line):\n@@ -129,6 +148,11 @@\n def get_insert_columns(self, join=True):\n \"\"\"Gets a set of column names for insert statements.\"\"\"\n columns = \"\"\n+ if not self.cleaned_columns:\n+ column_names = list(self.columns)\n+ self.columns[:] = []\n+ self.columns = [(self.clean_column_name(name[0]), name[1]) for name in column_names]\n+ self.cleaned_columns = True\n for item in self.columns:\n thistype = item[1][0]\n if ((thistype != \"skip\") and (thistype !=\"combine\") and\n@@ -148,4 +172,3 @@\n if item == column[0]:\n columns.append(column[1][0])\n return columns\n-\n", "issue": "Reserved keyword 'order' not being properly replaced\n`clean_column_name()` in `table.py` should replace the column name `order` with `sporder`. It does not appear to be doing this in #286.\n\n", "before_files": [{"content": "from retriever.lib.cleanup import *\nimport csv\nimport StringIO\n\n\nclass Table:\n \"\"\"Information about a database table.\"\"\"\n def __init__(self, name, **kwargs):\n self.name = name\n self.pk = True\n self.contains_pk = False\n self.delimiter = None\n self.header_rows = 1\n self.column_names_row = 1\n self.fixed_width = False\n self.cleanup = Cleanup()\n self.record_id = 0\n self.columns = []\n self.replace_columns = []\n self.escape_single_quotes=True\n self.escape_double_quotes=True\n for key, item in kwargs.items():\n setattr(self, key, item[0] if isinstance(item, tuple) else item)\n\n def auto_get_columns(self, header):\n \"\"\"Gets the column names from the header row\n\n Identifies the column names from the header row.\n Replaces database keywords with alternatives.\n Replaces special characters and spaces.\n\n \"\"\"\n if self.fixed_width:\n column_names = self.extract_values(header)\n else:\n # Get column names from header row\n values = self.split_on_delimiter(header)\n column_names = [name.strip() for name in values]\n\n columns = map(lambda x: self.clean_column_name(x), column_names)\n column_values = {x:[] for x in columns if x}\n\n return [[x, None] for x in columns if x], column_values\n\n def clean_column_name(self, column_name):\n '''Makes sure a column name is formatted correctly by removing reserved\n words, symbols, numbers, etc.'''\n column_name = column_name.lower()\n replace_columns = {old.lower(): new.lower()\n for old, new in self.replace_columns}\n column_name = replace_columns.get(column_name, column_name)\n replace = [\n (\"%\", \"percent\"),\n (\"&\", \"and\"),\n (\"\\xb0\", \"degrees\"),\n (\"group\", \"grp\"),\n (\"order\", \"sporder\"),\n (\"check\", \"checked\"),\n (\"references\", \"refs\"),\n (\"long\", \"lon\"),\n (\"date\", \"record_date\"),\n (\"?\", \"\"),\n ]\n replace += [(x, '') for x in (\")\", \"\\n\", \"\\r\", '\"', \"'\")]\n replace += [(x, '_') for x in (\" \", \"(\", \"/\", \".\", \"-\")]\n column_name = reduce(lambda x, y: x.replace(*y), replace, column_name)\n\n while \"__\" in column_name:\n column_name = column_name.replace(\"__\", \"_\")\n column_name = column_name.lstrip(\"0123456789_\").rstrip(\"_\")\n\n return column_name\n\n def split_on_delimiter(self, line):\n dialect = csv.excel\n dialect.escapechar = \"\\\\\"\n r = csv.reader([line], dialect=dialect, delimiter=self.delimiter)\n return r.next()\n\n def combine_on_delimiter(self, line_as_list):\n \"\"\"Combine a list of values into a line of csv data\"\"\"\n dialect = csv.excel\n dialect.escapechar = \"\\\\\"\n writer_file = StringIO.StringIO()\n writer = csv.writer(writer_file, dialect=dialect, delimiter=self.delimiter)\n writer.writerow(line_as_list)\n return writer_file.getvalue()\n\n def values_from_line(self, line):\n linevalues = []\n if (self.pk and self.contains_pk == False):\n column = 0\n else:\n column = -1\n\n for value in self.extract_values(line):\n column += 1\n try:\n this_column = self.columns[column][1][0]\n\n # If data type is \"skip\" ignore the value\n if this_column == \"skip\":\n pass\n elif this_column == \"combine\":\n # If \"combine\" append value to end of previous column\n linevalues[-1] += \" \" + value\n else:\n # Otherwise, add new value\n linevalues.append(value)\n except:\n # too many values for columns; ignore\n pass\n\n return linevalues\n\n def extract_values(self, line):\n \"\"\"Given a line of data, this function returns a list of the individual\n data values.\"\"\"\n if self.fixed_width:\n pos = 0\n values = []\n for width in self.fixed_width:\n values.append(line[pos:pos+width].strip())\n pos += width\n return values\n else:\n return self.split_on_delimiter(line)\n\n def get_insert_columns(self, join=True):\n \"\"\"Gets a set of column names for insert statements.\"\"\"\n columns = \"\"\n for item in self.columns:\n thistype = item[1][0]\n if ((thistype != \"skip\") and (thistype !=\"combine\") and\n (self.contains_pk == True or thistype[0:3] != \"pk-\")):\n columns += item[0] + \", \"\n columns = columns.rstrip(', ')\n if join:\n return columns\n else:\n return columns.lstrip(\"(\").rstrip(\")\").split(\", \")\n\n def get_column_datatypes(self):\n \"\"\"Gets a set of column names for insert statements.\"\"\"\n columns = []\n for item in self.get_insert_columns(False):\n for column in self.columns:\n if item == column[0]:\n columns.append(column[1][0])\n return columns\n\n", "path": "lib/table.py"}], "after_files": [{"content": "import csv\nimport StringIO\n\nfrom retriever.lib.cleanup import *\n\n\nclass Table:\n \"\"\"Information about a database table.\"\"\"\n def __init__(self, name, **kwargs):\n self.name = name\n self.pk = True\n self.contains_pk = False\n self.delimiter = None\n self.header_rows = 1\n self.column_names_row = 1\n self.fixed_width = False\n self.cleanup = Cleanup()\n self.record_id = 0\n self.columns = []\n self.replace_columns = []\n self.escape_single_quotes=True\n self.escape_double_quotes=True\n self.cleaned_columns = False\n for key, item in kwargs.items():\n setattr(self, key, item[0] if isinstance(item, tuple) else item)\n\n def auto_get_columns(self, header):\n \"\"\"Gets the column names from the header row\n\n Identifies the column names from the header row.\n Replaces database keywords with alternatives.\n Replaces special characters and spaces.\n\n \"\"\"\n if self.fixed_width:\n column_names = self.extract_values(header)\n else:\n # Get column names from header row\n values = self.split_on_delimiter(header)\n column_names = [name.strip() for name in values]\n\n columns = map(lambda x: self.clean_column_name(x), column_names)\n column_values = {x:[] for x in columns if x}\n self.cleaned_columns = True\n return [[x, None] for x in columns if x], column_values\n\n def clean_column_name(self, column_name):\n \"\"\"Clean column names using the expected sql guidelines\n\n remove leading whitespaces, replace sql key words, etc..\n \"\"\"\n column_name = column_name.lower().strip()\n replace_columns = {old.lower(): new.lower()\n for old, new in self.replace_columns}\n\n column_name = replace_columns.get(column_name, column_name).strip()\n replace = [\n (\"%\", \"percent\"),\n (\"&\", \"and\"),\n (\"\\xb0\", \"degrees\"),\n (\"?\", \"\"),\n ]\n replace += [(x, '') for x in (\")\", \"\\n\", \"\\r\", '\"', \"'\")]\n replace += [(x, '_') for x in (\" \", \"(\", \"/\", \".\", \"-\", \"*\", \":\")]\n column_name = reduce(lambda x, y: x.replace(*y), replace, column_name)\n while \"__\" in column_name:\n column_name = column_name.replace(\"__\", \"_\")\n column_name = column_name.lstrip(\"0123456789_\").rstrip(\"_\")\n replace_dict = {\n \"group\": \"grp\",\n \"order\": \"ordered\",\n \"check\": \"checked\",\n \"references\": \"refs\",\n \"long\": \"lon\",\n \"column\": \"columns\",\n \"cursor\": \"cursors\",\n \"delete\": \"deleted\",\n \"insert\": \"inserted\",\n \"join\": \"joins\",\n \"select\": \"selects\",\n \"table\": \"tables\",\n \"update\": \"updates\",\n \"date\": \"record_date\"\n }\n for x in (\")\", \"\\n\", \"\\r\", '\"', \"'\"):\n replace_dict[x] = ''\n for x in (\" \", \"(\", \"/\", \".\", \"-\"):\n replace_dict[x] = '_'\n if column_name in replace_dict:\n column_name = replace_dict[column_name]\n return column_name\n\n def split_on_delimiter(self, line):\n dialect = csv.excel\n dialect.escapechar = \"\\\\\"\n r = csv.reader([line], dialect=dialect, delimiter=self.delimiter)\n return r.next()\n\n def combine_on_delimiter(self, line_as_list):\n \"\"\"Combine a list of values into a line of csv data\"\"\"\n dialect = csv.excel\n dialect.escapechar = \"\\\\\"\n writer_file = StringIO.StringIO()\n writer = csv.writer(writer_file, dialect=dialect, delimiter=self.delimiter)\n writer.writerow(line_as_list)\n return writer_file.getvalue()\n\n def values_from_line(self, line):\n linevalues = []\n if (self.pk and self.contains_pk == False):\n column = 0\n else:\n column = -1\n\n for value in self.extract_values(line):\n column += 1\n try:\n this_column = self.columns[column][1][0]\n\n # If data type is \"skip\" ignore the value\n if this_column == \"skip\":\n pass\n elif this_column == \"combine\":\n # If \"combine\" append value to end of previous column\n linevalues[-1] += \" \" + value\n else:\n # Otherwise, add new value\n linevalues.append(value)\n except:\n # too many values for columns; ignore\n pass\n\n return linevalues\n\n def extract_values(self, line):\n \"\"\"Given a line of data, this function returns a list of the individual\n data values.\"\"\"\n if self.fixed_width:\n pos = 0\n values = []\n for width in self.fixed_width:\n values.append(line[pos:pos+width].strip())\n pos += width\n return values\n else:\n return self.split_on_delimiter(line)\n\n def get_insert_columns(self, join=True):\n \"\"\"Gets a set of column names for insert statements.\"\"\"\n columns = \"\"\n if not self.cleaned_columns:\n column_names = list(self.columns)\n self.columns[:] = []\n self.columns = [(self.clean_column_name(name[0]), name[1]) for name in column_names]\n self.cleaned_columns = True\n for item in self.columns:\n thistype = item[1][0]\n if ((thistype != \"skip\") and (thistype !=\"combine\") and\n (self.contains_pk == True or thistype[0:3] != \"pk-\")):\n columns += item[0] + \", \"\n columns = columns.rstrip(', ')\n if join:\n return columns\n else:\n return columns.lstrip(\"(\").rstrip(\")\").split(\", \")\n\n def get_column_datatypes(self):\n \"\"\"Gets a set of column names for insert statements.\"\"\"\n columns = []\n for item in self.get_insert_columns(False):\n for column in self.columns:\n if item == column[0]:\n columns.append(column[1][0])\n return columns\n", "path": "lib/table.py"}]} | 1,800 | 974 |
gh_patches_debug_13219 | rasdani/github-patches | git_diff | fidals__shopelectro-552 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
models.py:122-123: Move Product.get_siblings method to...
The puzzle `388-509fa51e` from #388 has to be resolved:
https://github.com/fidals/shopelectro/blob/5d0c5e97622abce3a922eda62c1f38810d7da5f8/shopelectro/models.py#L122-L123
The puzzle was created by duker33 on 12-Aug-18.
Estimate: 30 minutes,
If you have any technical questions, don't ask me, submit new tickets instead. The task will be "done" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/models.py`
Content:
```
1 import random
2 import string
3 from itertools import chain, groupby
4 from operator import attrgetter
5 from typing import Dict, List, Optional, Tuple
6 from uuid import uuid4
7
8 from django.conf import settings
9 from django.db import models
10 from django.urls import reverse
11 from django.utils.text import slugify
12 from django.utils.translation import ugettext_lazy as _
13 from mptt.querysets import TreeQuerySet
14 from unidecode import unidecode
15
16 from catalog.models import (
17 AbstractCategory,
18 AbstractProduct,
19 CategoryManager,
20 ProductActiveManager,
21 ProductManager,
22 )
23 from ecommerce.models import Order as ecOrder
24 from pages.models import CustomPage, ModelPage, Page, SyncPageMixin, PageManager
25
26
27 def randomize_slug(slug: str) -> str:
28 slug_hash = ''.join(
29 random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)
30 )
31 return f'{slug}_{slug_hash}'
32
33
34 class SECategoryQuerySet(TreeQuerySet):
35 def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':
36 categories_with_pictures = (
37 self
38 .filter(products__page__images__isnull=False)
39 .distinct()
40 )
41
42 return categories_with_pictures.get_ancestors(include_self=True)
43
44
45 class SECategoryManager(CategoryManager.from_queryset(SECategoryQuerySet)):
46 pass
47
48
49 class Category(AbstractCategory, SyncPageMixin):
50
51 objects = SECategoryManager()
52 uuid = models.UUIDField(default=uuid4, editable=False)
53
54 @classmethod
55 def get_default_parent(cls):
56 return CustomPage.objects.filter(slug='catalog').first()
57
58 @property
59 def image(self):
60 products = self.products.all()
61 return products[0].image if products else None
62
63 def get_absolute_url(self):
64 return reverse('category', args=(self.page.slug,))
65
66
67 class Product(AbstractProduct, SyncPageMixin):
68
69 # That's why we are needed to explicitly add objects manager here
70 # because of Django special managers behaviour.
71 # Se se#480 for details.
72 objects = ProductManager()
73 actives = ProductActiveManager()
74
75 category = models.ForeignKey(
76 Category,
77 on_delete=models.CASCADE,
78 null=True,
79 related_name='products',
80 verbose_name=_('category'),
81 )
82
83 tags = models.ManyToManyField(
84 'Tag',
85 related_name='products',
86 blank=True,
87 verbose_name=_('tags'),
88 )
89
90 vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))
91 uuid = models.UUIDField(default=uuid4, editable=False)
92 purchase_price = models.FloatField(
93 default=0, verbose_name=_('purchase_price'))
94 wholesale_small = models.FloatField(
95 default=0, verbose_name=_('wholesale_small'))
96 wholesale_medium = models.FloatField(
97 default=0, verbose_name=_('wholesale_medium'))
98 wholesale_large = models.FloatField(
99 default=0, verbose_name=_('wholesale_large'))
100
101 def get_absolute_url(self):
102 return reverse('product', args=(self.vendor_code,))
103
104 @property
105 def average_rate(self):
106 """Return rounded to first decimal averaged rating."""
107 rating = self.product_feedbacks.aggregate(
108 avg=models.Avg('rating')).get('avg', 0)
109 return round(rating, 1)
110
111 @property
112 def feedback_count(self):
113 return self.product_feedbacks.count()
114
115 @property
116 def feedback(self):
117 return self.product_feedbacks.all().order_by('-date')
118
119 def get_params(self):
120 return Tag.objects.filter_by_products([self]).get_group_tags_pairs()
121
122 # @todo #388:30m Move Product.get_siblings method to refarm-site
123 # And reuse it on STB.
124 def get_siblings(self, offset):
125 return (
126 self.__class__.actives
127 .filter(category=self.category)
128 .prefetch_related('category')
129 .select_related('page')[:offset]
130 )
131
132 def get_brand_name(self) -> str:
133 brand: Optional['Tag'] = Tag.objects.get_brands([self]).get(self)
134 return brand.name if brand else ''
135
136
137 class ProductFeedback(models.Model):
138 product = models.ForeignKey(
139 Product, on_delete=models.CASCADE, null=True,
140 related_name='product_feedbacks'
141 )
142
143 date = models.DateTimeField(
144 auto_now=True, db_index=True, verbose_name=_('date'))
145 name = models.CharField(
146 max_length=255, db_index=True, verbose_name=_('name'))
147 rating = models.PositiveSmallIntegerField(
148 default=1, db_index=True, verbose_name=_('rating'))
149 dignities = models.TextField(
150 default='', blank=True, verbose_name=_('dignities'))
151 limitations = models.TextField(
152 default='', blank=True, verbose_name=_('limitations'))
153 general = models.TextField(
154 default='', blank=True, verbose_name=_('limitations'))
155
156
157 def _default_payment():
158 """Default payment option is first element of first tuple in options."""
159 assert settings.PAYMENT_OPTIONS[0][0], 'No payment options!'
160 return settings.PAYMENT_OPTIONS[0][0]
161
162
163 class Order(ecOrder):
164 address = models.TextField(blank=True, default='')
165 payment_type = models.CharField(
166 max_length=255,
167 choices=settings.PAYMENT_OPTIONS,
168 default=_default_payment()
169 )
170 comment = models.TextField(blank=True, default='')
171
172 @property
173 def payment_type_name(self):
174 """Return name for an order's payment option."""
175 return next(
176 name for option, name in settings.PAYMENT_OPTIONS
177 if self.payment_type == option
178 )
179
180 def set_positions(self, cart):
181 """Save cart's state into Order instance."""
182 self.save()
183 for id_, position in cart:
184 self.positions.create(
185 order=self,
186 product_id=id_,
187 vendor_code=position['vendor_code'],
188 name=position['name'],
189 price=position['price'],
190 quantity=position['quantity']
191 )
192 return self
193
194
195 class CategoryPage(ModelPage):
196 """Create proxy model for Admin."""
197
198 class Meta(ModelPage.Meta): # Ignore PycodestyleBear (E303)
199 proxy = True
200
201 objects = ModelPage.create_model_page_managers(Category)
202
203
204 class ProductPage(ModelPage):
205 """Create proxy model for Admin."""
206
207 class Meta(ModelPage.Meta): # Ignore PycodestyleBear (E303)
208 proxy = True
209
210 objects = ModelPage.create_model_page_managers(Product)
211
212
213 class TagGroup(models.Model):
214
215 uuid = models.UUIDField(default=uuid4, editable=False) # Ignore CPDBear
216 name = models.CharField(
217 max_length=100, db_index=True, verbose_name=_('name'))
218 position = models.PositiveSmallIntegerField(
219 default=0, blank=True, db_index=True, verbose_name=_('position'),
220 )
221
222 def __str__(self):
223 return self.name
224
225
226 class TagQuerySet(models.QuerySet):
227
228 def filter_by_products(self, products: List[Product]):
229 ordering = settings.TAGS_ORDER
230 distinct = [order.lstrip('-') for order in ordering]
231
232 return (
233 self
234 .filter(products__in=products)
235 .order_by(*ordering)
236 .distinct(*distinct, 'id')
237 )
238
239 def get_group_tags_pairs(self) -> List[Tuple[TagGroup, List['Tag']]]:
240 grouped_tags = groupby(self.prefetch_related('group'), key=attrgetter('group'))
241 return [
242 (group, list(tags_))
243 for group, tags_ in grouped_tags
244 ]
245
246 def get_brands(self, products: List[Product]) -> Dict[Product, 'Tag']:
247 brand_tags = (
248 self.filter(group__name=settings.BRAND_TAG_GROUP_NAME)
249 .prefetch_related('products')
250 .select_related('group')
251 )
252
253 return {
254 product: brand
255 for brand in brand_tags for product in products
256 if product in brand.products.all()
257 }
258
259
260 class TagManager(models.Manager.from_queryset(TagQuerySet)):
261
262 def get_queryset(self):
263 return (
264 super().get_queryset()
265 .order_by(*settings.TAGS_ORDER)
266 )
267
268 def get_group_tags_pairs(self):
269 return self.get_queryset().get_group_tags_pairs()
270
271 def filter_by_products(self, products):
272 return self.get_queryset().filter_by_products(products)
273
274 def get_brands(self, products):
275 """Get a batch of products' brands."""
276 return self.get_queryset().get_brands(products)
277
278
279 class Tag(models.Model):
280
281 # Uncomment it after moving to refarm with rf#162
282 # class Meta:
283 # unique_together = ('name', 'group')
284
285 objects = TagManager()
286
287 uuid = models.UUIDField(default=uuid4, editable=False)
288 name = models.CharField(
289 max_length=100, db_index=True, verbose_name=_('name'))
290 position = models.PositiveSmallIntegerField(
291 default=0, blank=True, db_index=True, verbose_name=_('position'),
292 )
293
294 # Set it as unique with rf#162
295 slug = models.SlugField(default='')
296
297 group = models.ForeignKey(
298 TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',
299 )
300
301 def __str__(self):
302 return self.name
303
304 def save(self, *args, **kwargs):
305 if not self.slug:
306 # same slugify code used in PageMixin object
307 self.slug = slugify(
308 unidecode(self.name.replace('.', '-').replace('+', '-'))
309 )
310 doubled_tag_qs = self.__class__.objects.filter(slug=self.slug)
311 if doubled_tag_qs:
312 self.slug = randomize_slug(self.slug)
313 super(Tag, self).save(*args, **kwargs)
314
315 @staticmethod
316 def parse_url_tags(tags: str) -> list:
317 groups = tags.split(settings.TAGS_URL_DELIMITER)
318 return set(chain.from_iterable(
319 group.split(settings.TAG_GROUPS_URL_DELIMITER) for group in groups
320 ))
321
322
323 def serialize_tags(
324 tags: TagQuerySet,
325 field_name: str,
326 type_delimiter: str,
327 group_delimiter: str,
328 ) -> str:
329 group_tags_map = tags.get_group_tags_pairs()
330
331 _, tags_by_group = zip(*group_tags_map)
332
333 return group_delimiter.join(
334 type_delimiter.join(getattr(tag, field_name) for tag in tags_list)
335 for tags_list in tags_by_group
336 )
337
338
339 def serialize_tags_to_url(tags: TagQuerySet) -> str:
340 return serialize_tags(
341 tags=tags,
342 field_name='slug',
343 type_delimiter=settings.TAGS_URL_DELIMITER,
344 group_delimiter=settings.TAG_GROUPS_URL_DELIMITER
345 )
346
347
348 def serialize_tags_to_title(tags: TagQuerySet) -> str:
349 return serialize_tags(
350 tags=tags,
351 field_name='name',
352 type_delimiter=settings.TAGS_TITLE_DELIMITER,
353 group_delimiter=settings.TAG_GROUPS_TITLE_DELIMITER
354 )
355
356
357 class ExcludedModelTPageManager(PageManager):
358
359 def get_queryset(self):
360 return super().get_queryset().exclude(type=Page.MODEL_TYPE)
361
362
363 class ExcludedModelTPage(Page):
364
365 class Meta(Page.Meta): # Ignore PycodestyleBear (E303)
366 proxy = True
367
368 objects = ExcludedModelTPageManager()
369
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/models.py b/shopelectro/models.py
--- a/shopelectro/models.py
+++ b/shopelectro/models.py
@@ -119,16 +119,6 @@
def get_params(self):
return Tag.objects.filter_by_products([self]).get_group_tags_pairs()
- # @todo #388:30m Move Product.get_siblings method to refarm-site
- # And reuse it on STB.
- def get_siblings(self, offset):
- return (
- self.__class__.actives
- .filter(category=self.category)
- .prefetch_related('category')
- .select_related('page')[:offset]
- )
-
def get_brand_name(self) -> str:
brand: Optional['Tag'] = Tag.objects.get_brands([self]).get(self)
return brand.name if brand else ''
| {"golden_diff": "diff --git a/shopelectro/models.py b/shopelectro/models.py\n--- a/shopelectro/models.py\n+++ b/shopelectro/models.py\n@@ -119,16 +119,6 @@\n def get_params(self):\n return Tag.objects.filter_by_products([self]).get_group_tags_pairs()\n \n- # @todo #388:30m Move Product.get_siblings method to refarm-site\n- # And reuse it on STB.\n- def get_siblings(self, offset):\n- return (\n- self.__class__.actives\n- .filter(category=self.category)\n- .prefetch_related('category')\n- .select_related('page')[:offset]\n- )\n-\n def get_brand_name(self) -> str:\n brand: Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n", "issue": "models.py:122-123: Move Product.get_siblings method to...\nThe puzzle `388-509fa51e` from #388 has to be resolved:\n\nhttps://github.com/fidals/shopelectro/blob/5d0c5e97622abce3a922eda62c1f38810d7da5f8/shopelectro/models.py#L122-L123\n\nThe puzzle was created by duker33 on 12-Aug-18. \n\nEstimate: 30 minutes, \n\nIf you have any technical questions, don't ask me, submit new tickets instead. The task will be \"done\" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).\n", "before_files": [{"content": "import random\nimport string\nfrom itertools import chain, groupby\nfrom operator import attrgetter\nfrom typing import Dict, List, Optional, Tuple\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.text import slugify\nfrom django.utils.translation import ugettext_lazy as _\nfrom mptt.querysets import TreeQuerySet\nfrom unidecode import unidecode\n\nfrom catalog.models import (\n AbstractCategory,\n AbstractProduct,\n CategoryManager,\n ProductActiveManager,\n ProductManager,\n)\nfrom ecommerce.models import Order as ecOrder\nfrom pages.models import CustomPage, ModelPage, Page, SyncPageMixin, PageManager\n\n\ndef randomize_slug(slug: str) -> str:\n slug_hash = ''.join(\n random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)\n )\n return f'{slug}_{slug_hash}'\n\n\nclass SECategoryQuerySet(TreeQuerySet):\n def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':\n categories_with_pictures = (\n self\n .filter(products__page__images__isnull=False)\n .distinct()\n )\n\n return categories_with_pictures.get_ancestors(include_self=True)\n\n\nclass SECategoryManager(CategoryManager.from_queryset(SECategoryQuerySet)):\n pass\n\n\nclass Category(AbstractCategory, SyncPageMixin):\n\n objects = SECategoryManager()\n uuid = models.UUIDField(default=uuid4, editable=False)\n\n @classmethod\n def get_default_parent(cls):\n return CustomPage.objects.filter(slug='catalog').first()\n\n @property\n def image(self):\n products = self.products.all()\n return products[0].image if products else None\n\n def get_absolute_url(self):\n return reverse('category', args=(self.page.slug,))\n\n\nclass Product(AbstractProduct, SyncPageMixin):\n\n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n # Se se#480 for details.\n objects = ProductManager()\n actives = ProductActiveManager()\n\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n null=True,\n related_name='products',\n verbose_name=_('category'),\n )\n\n tags = models.ManyToManyField(\n 'Tag',\n related_name='products',\n blank=True,\n verbose_name=_('tags'),\n )\n\n vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))\n uuid = models.UUIDField(default=uuid4, editable=False)\n purchase_price = models.FloatField(\n default=0, verbose_name=_('purchase_price'))\n wholesale_small = models.FloatField(\n default=0, verbose_name=_('wholesale_small'))\n wholesale_medium = models.FloatField(\n default=0, verbose_name=_('wholesale_medium'))\n wholesale_large = models.FloatField(\n default=0, verbose_name=_('wholesale_large'))\n\n def get_absolute_url(self):\n return reverse('product', args=(self.vendor_code,))\n\n @property\n def average_rate(self):\n \"\"\"Return rounded to first decimal averaged rating.\"\"\"\n rating = self.product_feedbacks.aggregate(\n avg=models.Avg('rating')).get('avg', 0)\n return round(rating, 1)\n\n @property\n def feedback_count(self):\n return self.product_feedbacks.count()\n\n @property\n def feedback(self):\n return self.product_feedbacks.all().order_by('-date')\n\n def get_params(self):\n return Tag.objects.filter_by_products([self]).get_group_tags_pairs()\n\n # @todo #388:30m Move Product.get_siblings method to refarm-site\n # And reuse it on STB.\n def get_siblings(self, offset):\n return (\n self.__class__.actives\n .filter(category=self.category)\n .prefetch_related('category')\n .select_related('page')[:offset]\n )\n\n def get_brand_name(self) -> str:\n brand: Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n\n\nclass ProductFeedback(models.Model):\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, null=True,\n related_name='product_feedbacks'\n )\n\n date = models.DateTimeField(\n auto_now=True, db_index=True, verbose_name=_('date'))\n name = models.CharField(\n max_length=255, db_index=True, verbose_name=_('name'))\n rating = models.PositiveSmallIntegerField(\n default=1, db_index=True, verbose_name=_('rating'))\n dignities = models.TextField(\n default='', blank=True, verbose_name=_('dignities'))\n limitations = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n general = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n\n\ndef _default_payment():\n \"\"\"Default payment option is first element of first tuple in options.\"\"\"\n assert settings.PAYMENT_OPTIONS[0][0], 'No payment options!'\n return settings.PAYMENT_OPTIONS[0][0]\n\n\nclass Order(ecOrder):\n address = models.TextField(blank=True, default='')\n payment_type = models.CharField(\n max_length=255,\n choices=settings.PAYMENT_OPTIONS,\n default=_default_payment()\n )\n comment = models.TextField(blank=True, default='')\n\n @property\n def payment_type_name(self):\n \"\"\"Return name for an order's payment option.\"\"\"\n return next(\n name for option, name in settings.PAYMENT_OPTIONS\n if self.payment_type == option\n )\n\n def set_positions(self, cart):\n \"\"\"Save cart's state into Order instance.\"\"\"\n self.save()\n for id_, position in cart:\n self.positions.create(\n order=self,\n product_id=id_,\n vendor_code=position['vendor_code'],\n name=position['name'],\n price=position['price'],\n quantity=position['quantity']\n )\n return self\n\n\nclass CategoryPage(ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n objects = ModelPage.create_model_page_managers(Category)\n\n\nclass ProductPage(ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n objects = ModelPage.create_model_page_managers(Product)\n\n\nclass TagGroup(models.Model):\n\n uuid = models.UUIDField(default=uuid4, editable=False) # Ignore CPDBear\n name = models.CharField(\n max_length=100, db_index=True, verbose_name=_('name'))\n position = models.PositiveSmallIntegerField(\n default=0, blank=True, db_index=True, verbose_name=_('position'),\n )\n\n def __str__(self):\n return self.name\n\n\nclass TagQuerySet(models.QuerySet):\n\n def filter_by_products(self, products: List[Product]):\n ordering = settings.TAGS_ORDER\n distinct = [order.lstrip('-') for order in ordering]\n\n return (\n self\n .filter(products__in=products)\n .order_by(*ordering)\n .distinct(*distinct, 'id')\n )\n\n def get_group_tags_pairs(self) -> List[Tuple[TagGroup, List['Tag']]]:\n grouped_tags = groupby(self.prefetch_related('group'), key=attrgetter('group'))\n return [\n (group, list(tags_))\n for group, tags_ in grouped_tags\n ]\n\n def get_brands(self, products: List[Product]) -> Dict[Product, 'Tag']:\n brand_tags = (\n self.filter(group__name=settings.BRAND_TAG_GROUP_NAME)\n .prefetch_related('products')\n .select_related('group')\n )\n\n return {\n product: brand\n for brand in brand_tags for product in products\n if product in brand.products.all()\n }\n\n\nclass TagManager(models.Manager.from_queryset(TagQuerySet)):\n\n def get_queryset(self):\n return (\n super().get_queryset()\n .order_by(*settings.TAGS_ORDER)\n )\n\n def get_group_tags_pairs(self):\n return self.get_queryset().get_group_tags_pairs()\n\n def filter_by_products(self, products):\n return self.get_queryset().filter_by_products(products)\n\n def get_brands(self, products):\n \"\"\"Get a batch of products' brands.\"\"\"\n return self.get_queryset().get_brands(products)\n\n\nclass Tag(models.Model):\n\n # Uncomment it after moving to refarm with rf#162\n # class Meta:\n # unique_together = ('name', 'group')\n\n objects = TagManager()\n\n uuid = models.UUIDField(default=uuid4, editable=False)\n name = models.CharField(\n max_length=100, db_index=True, verbose_name=_('name'))\n position = models.PositiveSmallIntegerField(\n default=0, blank=True, db_index=True, verbose_name=_('position'),\n )\n\n # Set it as unique with rf#162\n slug = models.SlugField(default='')\n\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if not self.slug:\n # same slugify code used in PageMixin object\n self.slug = slugify(\n unidecode(self.name.replace('.', '-').replace('+', '-'))\n )\n doubled_tag_qs = self.__class__.objects.filter(slug=self.slug)\n if doubled_tag_qs:\n self.slug = randomize_slug(self.slug)\n super(Tag, self).save(*args, **kwargs)\n\n @staticmethod\n def parse_url_tags(tags: str) -> list:\n groups = tags.split(settings.TAGS_URL_DELIMITER)\n return set(chain.from_iterable(\n group.split(settings.TAG_GROUPS_URL_DELIMITER) for group in groups\n ))\n\n\ndef serialize_tags(\n tags: TagQuerySet,\n field_name: str,\n type_delimiter: str,\n group_delimiter: str,\n) -> str:\n group_tags_map = tags.get_group_tags_pairs()\n\n _, tags_by_group = zip(*group_tags_map)\n\n return group_delimiter.join(\n type_delimiter.join(getattr(tag, field_name) for tag in tags_list)\n for tags_list in tags_by_group\n )\n\n\ndef serialize_tags_to_url(tags: TagQuerySet) -> str:\n return serialize_tags(\n tags=tags,\n field_name='slug',\n type_delimiter=settings.TAGS_URL_DELIMITER,\n group_delimiter=settings.TAG_GROUPS_URL_DELIMITER\n )\n\n\ndef serialize_tags_to_title(tags: TagQuerySet) -> str:\n return serialize_tags(\n tags=tags,\n field_name='name',\n type_delimiter=settings.TAGS_TITLE_DELIMITER,\n group_delimiter=settings.TAG_GROUPS_TITLE_DELIMITER\n )\n\n\nclass ExcludedModelTPageManager(PageManager):\n\n def get_queryset(self):\n return super().get_queryset().exclude(type=Page.MODEL_TYPE)\n\n\nclass ExcludedModelTPage(Page):\n\n class Meta(Page.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n objects = ExcludedModelTPageManager()\n", "path": "shopelectro/models.py"}], "after_files": [{"content": "import random\nimport string\nfrom itertools import chain, groupby\nfrom operator import attrgetter\nfrom typing import Dict, List, Optional, Tuple\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.text import slugify\nfrom django.utils.translation import ugettext_lazy as _\nfrom mptt.querysets import TreeQuerySet\nfrom unidecode import unidecode\n\nfrom catalog.models import (\n AbstractCategory,\n AbstractProduct,\n CategoryManager,\n ProductActiveManager,\n ProductManager,\n)\nfrom ecommerce.models import Order as ecOrder\nfrom pages.models import CustomPage, ModelPage, Page, SyncPageMixin, PageManager\n\n\ndef randomize_slug(slug: str) -> str:\n slug_hash = ''.join(\n random.choices(string.ascii_lowercase, k=settings.SLUG_HASH_SIZE)\n )\n return f'{slug}_{slug_hash}'\n\n\nclass SECategoryQuerySet(TreeQuerySet):\n def get_categories_tree_with_pictures(self) -> 'SECategoryQuerySet':\n categories_with_pictures = (\n self\n .filter(products__page__images__isnull=False)\n .distinct()\n )\n\n return categories_with_pictures.get_ancestors(include_self=True)\n\n\nclass SECategoryManager(CategoryManager.from_queryset(SECategoryQuerySet)):\n pass\n\n\nclass Category(AbstractCategory, SyncPageMixin):\n\n objects = SECategoryManager()\n uuid = models.UUIDField(default=uuid4, editable=False)\n\n @classmethod\n def get_default_parent(cls):\n return CustomPage.objects.filter(slug='catalog').first()\n\n @property\n def image(self):\n products = self.products.all()\n return products[0].image if products else None\n\n def get_absolute_url(self):\n return reverse('category', args=(self.page.slug,))\n\n\nclass Product(AbstractProduct, SyncPageMixin):\n\n # That's why we are needed to explicitly add objects manager here\n # because of Django special managers behaviour.\n # Se se#480 for details.\n objects = ProductManager()\n actives = ProductActiveManager()\n\n category = models.ForeignKey(\n Category,\n on_delete=models.CASCADE,\n null=True,\n related_name='products',\n verbose_name=_('category'),\n )\n\n tags = models.ManyToManyField(\n 'Tag',\n related_name='products',\n blank=True,\n verbose_name=_('tags'),\n )\n\n vendor_code = models.SmallIntegerField(verbose_name=_('vendor_code'))\n uuid = models.UUIDField(default=uuid4, editable=False)\n purchase_price = models.FloatField(\n default=0, verbose_name=_('purchase_price'))\n wholesale_small = models.FloatField(\n default=0, verbose_name=_('wholesale_small'))\n wholesale_medium = models.FloatField(\n default=0, verbose_name=_('wholesale_medium'))\n wholesale_large = models.FloatField(\n default=0, verbose_name=_('wholesale_large'))\n\n def get_absolute_url(self):\n return reverse('product', args=(self.vendor_code,))\n\n @property\n def average_rate(self):\n \"\"\"Return rounded to first decimal averaged rating.\"\"\"\n rating = self.product_feedbacks.aggregate(\n avg=models.Avg('rating')).get('avg', 0)\n return round(rating, 1)\n\n @property\n def feedback_count(self):\n return self.product_feedbacks.count()\n\n @property\n def feedback(self):\n return self.product_feedbacks.all().order_by('-date')\n\n def get_params(self):\n return Tag.objects.filter_by_products([self]).get_group_tags_pairs()\n\n def get_brand_name(self) -> str:\n brand: Optional['Tag'] = Tag.objects.get_brands([self]).get(self)\n return brand.name if brand else ''\n\n\nclass ProductFeedback(models.Model):\n product = models.ForeignKey(\n Product, on_delete=models.CASCADE, null=True,\n related_name='product_feedbacks'\n )\n\n date = models.DateTimeField(\n auto_now=True, db_index=True, verbose_name=_('date'))\n name = models.CharField(\n max_length=255, db_index=True, verbose_name=_('name'))\n rating = models.PositiveSmallIntegerField(\n default=1, db_index=True, verbose_name=_('rating'))\n dignities = models.TextField(\n default='', blank=True, verbose_name=_('dignities'))\n limitations = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n general = models.TextField(\n default='', blank=True, verbose_name=_('limitations'))\n\n\ndef _default_payment():\n \"\"\"Default payment option is first element of first tuple in options.\"\"\"\n assert settings.PAYMENT_OPTIONS[0][0], 'No payment options!'\n return settings.PAYMENT_OPTIONS[0][0]\n\n\nclass Order(ecOrder):\n address = models.TextField(blank=True, default='')\n payment_type = models.CharField(\n max_length=255,\n choices=settings.PAYMENT_OPTIONS,\n default=_default_payment()\n )\n comment = models.TextField(blank=True, default='')\n\n @property\n def payment_type_name(self):\n \"\"\"Return name for an order's payment option.\"\"\"\n return next(\n name for option, name in settings.PAYMENT_OPTIONS\n if self.payment_type == option\n )\n\n def set_positions(self, cart):\n \"\"\"Save cart's state into Order instance.\"\"\"\n self.save()\n for id_, position in cart:\n self.positions.create(\n order=self,\n product_id=id_,\n vendor_code=position['vendor_code'],\n name=position['name'],\n price=position['price'],\n quantity=position['quantity']\n )\n return self\n\n\nclass CategoryPage(ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n objects = ModelPage.create_model_page_managers(Category)\n\n\nclass ProductPage(ModelPage):\n \"\"\"Create proxy model for Admin.\"\"\"\n\n class Meta(ModelPage.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n objects = ModelPage.create_model_page_managers(Product)\n\n\nclass TagGroup(models.Model):\n\n uuid = models.UUIDField(default=uuid4, editable=False) # Ignore CPDBear\n name = models.CharField(\n max_length=100, db_index=True, verbose_name=_('name'))\n position = models.PositiveSmallIntegerField(\n default=0, blank=True, db_index=True, verbose_name=_('position'),\n )\n\n def __str__(self):\n return self.name\n\n\nclass TagQuerySet(models.QuerySet):\n\n def filter_by_products(self, products: List[Product]):\n ordering = settings.TAGS_ORDER\n distinct = [order.lstrip('-') for order in ordering]\n\n return (\n self\n .filter(products__in=products)\n .order_by(*ordering)\n .distinct(*distinct, 'id')\n )\n\n def get_group_tags_pairs(self) -> List[Tuple[TagGroup, List['Tag']]]:\n grouped_tags = groupby(self.prefetch_related('group'), key=attrgetter('group'))\n return [\n (group, list(tags_))\n for group, tags_ in grouped_tags\n ]\n\n def get_brands(self, products: List[Product]) -> Dict[Product, 'Tag']:\n brand_tags = (\n self.filter(group__name=settings.BRAND_TAG_GROUP_NAME)\n .prefetch_related('products')\n .select_related('group')\n )\n\n return {\n product: brand\n for brand in brand_tags for product in products\n if product in brand.products.all()\n }\n\n\nclass TagManager(models.Manager.from_queryset(TagQuerySet)):\n\n def get_queryset(self):\n return (\n super().get_queryset()\n .order_by(*settings.TAGS_ORDER)\n )\n\n def get_group_tags_pairs(self):\n return self.get_queryset().get_group_tags_pairs()\n\n def filter_by_products(self, products):\n return self.get_queryset().filter_by_products(products)\n\n def get_brands(self, products):\n \"\"\"Get a batch of products' brands.\"\"\"\n return self.get_queryset().get_brands(products)\n\n\nclass Tag(models.Model):\n\n # Uncomment it after moving to refarm with rf#162\n # class Meta:\n # unique_together = ('name', 'group')\n\n objects = TagManager()\n\n uuid = models.UUIDField(default=uuid4, editable=False)\n name = models.CharField(\n max_length=100, db_index=True, verbose_name=_('name'))\n position = models.PositiveSmallIntegerField(\n default=0, blank=True, db_index=True, verbose_name=_('position'),\n )\n\n # Set it as unique with rf#162\n slug = models.SlugField(default='')\n\n group = models.ForeignKey(\n TagGroup, on_delete=models.CASCADE, null=True, related_name='tags',\n )\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if not self.slug:\n # same slugify code used in PageMixin object\n self.slug = slugify(\n unidecode(self.name.replace('.', '-').replace('+', '-'))\n )\n doubled_tag_qs = self.__class__.objects.filter(slug=self.slug)\n if doubled_tag_qs:\n self.slug = randomize_slug(self.slug)\n super(Tag, self).save(*args, **kwargs)\n\n @staticmethod\n def parse_url_tags(tags: str) -> list:\n groups = tags.split(settings.TAGS_URL_DELIMITER)\n return set(chain.from_iterable(\n group.split(settings.TAG_GROUPS_URL_DELIMITER) for group in groups\n ))\n\n\ndef serialize_tags(\n tags: TagQuerySet,\n field_name: str,\n type_delimiter: str,\n group_delimiter: str,\n) -> str:\n group_tags_map = tags.get_group_tags_pairs()\n\n _, tags_by_group = zip(*group_tags_map)\n\n return group_delimiter.join(\n type_delimiter.join(getattr(tag, field_name) for tag in tags_list)\n for tags_list in tags_by_group\n )\n\n\ndef serialize_tags_to_url(tags: TagQuerySet) -> str:\n return serialize_tags(\n tags=tags,\n field_name='slug',\n type_delimiter=settings.TAGS_URL_DELIMITER,\n group_delimiter=settings.TAG_GROUPS_URL_DELIMITER\n )\n\n\ndef serialize_tags_to_title(tags: TagQuerySet) -> str:\n return serialize_tags(\n tags=tags,\n field_name='name',\n type_delimiter=settings.TAGS_TITLE_DELIMITER,\n group_delimiter=settings.TAG_GROUPS_TITLE_DELIMITER\n )\n\n\nclass ExcludedModelTPageManager(PageManager):\n\n def get_queryset(self):\n return super().get_queryset().exclude(type=Page.MODEL_TYPE)\n\n\nclass ExcludedModelTPage(Page):\n\n class Meta(Page.Meta): # Ignore PycodestyleBear (E303)\n proxy = True\n\n objects = ExcludedModelTPageManager()\n", "path": "shopelectro/models.py"}]} | 3,943 | 200 |
gh_patches_debug_5869 | rasdani/github-patches | git_diff | vega__altair-423 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Logo for Website
It would be nice to have a logo for the website. I'm personally horribly unskilled at such things...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `doc/conf.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # altair documentation build configuration file, created by
5 # sphinx-quickstart on Wed Sep 7 12:52:48 2016.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 import sys
17 import os
18
19 # If extensions (or modules to document with autodoc) are in another directory,
20 # add these directories to sys.path here. If the directory is relative to the
21 # documentation root, use os.path.abspath to make it absolute, like shown here.
22 sys.path.insert(0, os.path.abspath('sphinxext'))
23
24 # -- General configuration ------------------------------------------------
25
26 # If your documentation needs a minimal Sphinx version, state it here.
27 #needs_sphinx = '1.0'
28
29 # Add any Sphinx extension module names here, as strings. They can be
30 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
31 # ones.
32 extensions = [
33 'sphinx.ext.autodoc',
34 'sphinx.ext.autosummary',
35 'sphinx.ext.doctest',
36 'sphinx.ext.coverage',
37 'sphinx.ext.githubpages',
38 'numpydoc.numpydoc',
39 'altair_ext.altairplot',
40 'altair_ext.altairgallery',
41 'altair_ext.altair_autodoc',
42 ]
43
44 altair_plot_links = {'editor': True, 'source': False, 'export': False}
45
46 autodoc_default_flags = ['members']
47 autodoc_member_order = 'groupwise'
48
49 # Add any paths that contain templates here, relative to this directory.
50 templates_path = ['_templates']
51
52 # The suffix(es) of source filenames.
53 # You can specify multiple suffix as a list of string:
54 # source_suffix = ['.rst', '.md']
55 source_suffix = '.rst'
56
57 # The encoding of source files.
58 #source_encoding = 'utf-8-sig'
59
60 # The master toctree document.
61 master_doc = 'index'
62
63 # General information about the project.
64 project = 'Altair'
65 copyright = '2016, Brian Granger and Jake VanderPlas'
66 author = 'Brian Granger and Jake VanderPlas'
67
68 # The version info for the project you're documenting, acts as replacement for
69 # |version| and |release|, also used in various other places throughout the
70 # built documents.
71 #
72 # The short X.Y version.
73 version = '1.3'
74 # The full version, including alpha/beta/rc tags.
75 release = '1.3.0.dev0'
76
77 # The language for content autogenerated by Sphinx. Refer to documentation
78 # for a list of supported languages.
79 #
80 # This is also used if you do content translation via gettext catalogs.
81 # Usually you set "language" from the command line for these cases.
82 language = None
83
84 # There are two options for replacing |today|: either, you set today to some
85 # non-false value, then it is used:
86 #today = ''
87 # Else, today_fmt is used as the format for a strftime call.
88 #today_fmt = '%B %d, %Y'
89
90 # List of patterns, relative to source directory, that match files and
91 # directories to ignore when looking for source files.
92 # This patterns also effect to html_static_path and html_extra_path
93 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
94
95 # The reST default role (used for this markup: `text`) to use for all
96 # documents.
97 #default_role = None
98
99 # If true, '()' will be appended to :func: etc. cross-reference text.
100 #add_function_parentheses = True
101
102 # If true, the current module name will be prepended to all description
103 # unit titles (such as .. function::).
104 #add_module_names = True
105
106 # If true, sectionauthor and moduleauthor directives will be shown in the
107 # output. They are ignored by default.
108 #show_authors = False
109
110 # The name of the Pygments (syntax highlighting) style to use.
111 pygments_style = 'sphinx'
112
113 # A list of ignored prefixes for module index sorting.
114 #modindex_common_prefix = []
115
116 # If true, keep warnings as "system message" paragraphs in the built documents.
117 #keep_warnings = False
118
119 # If true, `todo` and `todoList` produce output, else they produce nothing.
120 todo_include_todos = False
121
122
123 # -- Options for HTML output ----------------------------------------------
124
125 # The theme to use for HTML and HTML Help pages. See the documentation for
126 # a list of builtin themes.
127 html_theme = 'sphinx_rtd_theme'
128
129 # Theme options are theme-specific and customize the look and feel of a theme
130 # further. For a list of options available for each theme, see the
131 # documentation.
132 #html_theme_options = {}
133
134 # Add any paths that contain custom themes here, relative to this directory.
135 #html_theme_path = []
136
137 # The name for this set of Sphinx documents.
138 # "<project> v<release> documentation" by default.
139 #html_title = 'altair v1.0.0'
140
141 # A shorter title for the navigation bar. Default is the same as html_title.
142 html_short_title = 'Altair'
143
144 # The name of an image file (relative to this directory) to place at the top
145 # of the sidebar.
146 #html_logo = None
147
148 # The name of an image file (relative to this directory) to use as a favicon of
149 # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
150 # pixels large.
151 html_favicon = '_static/favicon.ico'
152
153 # Add any paths that contain custom static files (such as style sheets) here,
154 # relative to this directory. They are copied after the builtin static files,
155 # so a file named "default.css" will overwrite the builtin "default.css".
156 html_static_path = ['_static', '_images']
157
158 # adapted from: http://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html
159 # and
160 # https://github.com/rtfd/sphinx_rtd_theme/issues/117
161 def setup(app):
162 app.add_stylesheet('theme_overrides.css')
163
164 # Add any extra paths that contain custom files (such as robots.txt or
165 # .htaccess) here, relative to this directory. These files are copied
166 # directly to the root of the documentation.
167 #html_extra_path = []
168
169 # If not None, a 'Last updated on:' timestamp is inserted at every page
170 # bottom, using the given strftime format.
171 # The empty string is equivalent to '%b %d, %Y'.
172 #html_last_updated_fmt = None
173
174 # If true, SmartyPants will be used to convert quotes and dashes to
175 # typographically correct entities.
176 #html_use_smartypants = True
177
178 # Custom sidebar templates, maps document names to template names.
179 #html_sidebars = {}
180
181 # Additional templates that should be rendered to pages, maps page names to
182 # template names.
183 #html_additional_pages = {}
184
185 # If false, no module index is generated.
186 #html_domain_indices = True
187
188 # If false, no index is generated.
189 #html_use_index = True
190
191 # If true, the index is split into individual pages for each letter.
192 #html_split_index = False
193
194 # If true, links to the reST sources are added to the pages.
195 #html_show_sourcelink = True
196
197 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
198 #html_show_sphinx = True
199
200 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
201 #html_show_copyright = True
202
203 # If true, an OpenSearch description file will be output, and all pages will
204 # contain a <link> tag referring to it. The value of this option must be the
205 # base URL from which the finished HTML is served.
206 #html_use_opensearch = ''
207
208 # This is the file name suffix for HTML files (e.g. ".xhtml").
209 #html_file_suffix = None
210
211 # Language to be used for generating the HTML full-text search index.
212 # Sphinx supports the following languages:
213 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
214 # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
215 #html_search_language = 'en'
216
217 # A dictionary with options for the search language support, empty by default.
218 # 'ja' uses this config value.
219 # 'zh' user can custom change `jieba` dictionary path.
220 #html_search_options = {'type': 'default'}
221
222 # The name of a javascript file (relative to the configuration directory) that
223 # implements a search results scorer. If empty, the default will be used.
224 #html_search_scorer = 'scorer.js'
225
226 # Output file base name for HTML help builder.
227 htmlhelp_basename = 'altairdoc'
228
229 # -- Options for LaTeX output ---------------------------------------------
230
231 latex_elements = {
232 # The paper size ('letterpaper' or 'a4paper').
233 #'papersize': 'letterpaper',
234
235 # The font size ('10pt', '11pt' or '12pt').
236 #'pointsize': '10pt',
237
238 # Additional stuff for the LaTeX preamble.
239 #'preamble': '',
240
241 # Latex figure (float) alignment
242 #'figure_align': 'htbp',
243 }
244
245 # Grouping the document tree into LaTeX files. List of tuples
246 # (source start file, target name, title,
247 # author, documentclass [howto, manual, or own class]).
248 latex_documents = [
249 (master_doc, 'altair.tex', 'altair Documentation',
250 'Brian Granger and Jake VanderPlas', 'manual'),
251 ]
252
253 # The name of an image file (relative to this directory) to place at the top of
254 # the title page.
255 #latex_logo = None
256
257 # For "manual" documents, if this is true, then toplevel headings are parts,
258 # not chapters.
259 #latex_use_parts = False
260
261 # If true, show page references after internal links.
262 #latex_show_pagerefs = False
263
264 # If true, show URL addresses after external links.
265 #latex_show_urls = False
266
267 # Documents to append as an appendix to all manuals.
268 #latex_appendices = []
269
270 # If false, no module index is generated.
271 #latex_domain_indices = True
272
273
274 # -- Options for manual page output ---------------------------------------
275
276 # One entry per manual page. List of tuples
277 # (source start file, name, description, authors, manual section).
278 man_pages = [
279 (master_doc, 'altair', 'altair Documentation',
280 [author], 1)
281 ]
282
283 # If true, show URL addresses after external links.
284 #man_show_urls = False
285
286
287 # -- Options for Texinfo output -------------------------------------------
288
289 # Grouping the document tree into Texinfo files. List of tuples
290 # (source start file, target name, title, author,
291 # dir menu entry, description, category)
292 texinfo_documents = [
293 (master_doc, 'altair', 'altair Documentation',
294 author, 'altair', 'One line description of project.',
295 'Miscellaneous'),
296 ]
297
298 # Documents to append as an appendix to all manuals.
299 #texinfo_appendices = []
300
301 # If false, no module index is generated.
302 #texinfo_domain_indices = True
303
304 # How to display URL addresses: 'footnote', 'no', or 'inline'.
305 #texinfo_show_urls = 'footnote'
306
307 # If true, do not generate a @detailmenu in the "Top" node's menu.
308 #texinfo_no_detailmenu = False
309
310 # Hide extra class members
311 numpydoc_show_class_members = False
312
313 # For the altairplot extension
314 # altairplot_links = {'editor': True, 'source': True, 'export': True}
315 # altairplot_d3_js_url = "https://d3js.org/d3.v3.min.js"
316 altairplot_vega_js_url = "https://vega.github.io/vega/releases/v2.6.5/vega.js"
317 altairplot_vegalite_js_url = "https://cdnjs.cloudflare.com/ajax/libs/vega-lite/1.2.1/vega-lite.min.js"
318 altairplot_vegaembed_js_url = "https://cdnjs.cloudflare.com/ajax/libs/vega-embed/2.2.0/vega-embed.min.js"
319
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -143,7 +143,7 @@
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
-#html_logo = None
+html_logo = '_images/altair-logo-light.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
| {"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -143,7 +143,7 @@\n \n # The name of an image file (relative to this directory) to place at the top\n # of the sidebar.\n-#html_logo = None\n+html_logo = '_images/altair-logo-light.png'\n \n # The name of an image file (relative to this directory) to use as a favicon of\n # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n", "issue": "Logo for Website\nIt would be nice to have a logo for the website. I'm personally horribly unskilled at such things...\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# altair documentation build configuration file, created by\n# sphinx-quickstart on Wed Sep 7 12:52:48 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('sphinxext'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.coverage',\n 'sphinx.ext.githubpages',\n 'numpydoc.numpydoc',\n 'altair_ext.altairplot',\n 'altair_ext.altairgallery',\n 'altair_ext.altair_autodoc',\n]\n\naltair_plot_links = {'editor': True, 'source': False, 'export': False}\n\nautodoc_default_flags = ['members']\nautodoc_member_order = 'groupwise'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Altair'\ncopyright = '2016, Brian Granger and Jake VanderPlas'\nauthor = 'Brian Granger and Jake VanderPlas'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.3'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.3.0.dev0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#html_title = 'altair v1.0.0'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\nhtml_short_title = 'Altair'\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static', '_images']\n\n# adapted from: http://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html\n# and\n# https://github.com/rtfd/sphinx_rtd_theme/issues/117\ndef setup(app):\n app.add_stylesheet('theme_overrides.css')\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'altairdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'altair.tex', 'altair Documentation',\n 'Brian Granger and Jake VanderPlas', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'altair', 'altair Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'altair', 'altair Documentation',\n author, 'altair', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# Hide extra class members\nnumpydoc_show_class_members = False\n\n# For the altairplot extension\n# altairplot_links = {'editor': True, 'source': True, 'export': True}\n# altairplot_d3_js_url = \"https://d3js.org/d3.v3.min.js\"\naltairplot_vega_js_url = \"https://vega.github.io/vega/releases/v2.6.5/vega.js\"\naltairplot_vegalite_js_url = \"https://cdnjs.cloudflare.com/ajax/libs/vega-lite/1.2.1/vega-lite.min.js\"\naltairplot_vegaembed_js_url = \"https://cdnjs.cloudflare.com/ajax/libs/vega-embed/2.2.0/vega-embed.min.js\"\n", "path": "doc/conf.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# altair documentation build configuration file, created by\n# sphinx-quickstart on Wed Sep 7 12:52:48 2016.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('sphinxext'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.coverage',\n 'sphinx.ext.githubpages',\n 'numpydoc.numpydoc',\n 'altair_ext.altairplot',\n 'altair_ext.altairgallery',\n 'altair_ext.altair_autodoc',\n]\n\naltair_plot_links = {'editor': True, 'source': False, 'export': False}\n\nautodoc_default_flags = ['members']\nautodoc_member_order = 'groupwise'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Altair'\ncopyright = '2016, Brian Granger and Jake VanderPlas'\nauthor = 'Brian Granger and Jake VanderPlas'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '1.3'\n# The full version, including alpha/beta/rc tags.\nrelease = '1.3.0.dev0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents.\n# \"<project> v<release> documentation\" by default.\n#html_title = 'altair v1.0.0'\n\n# A shorter title for the navigation bar. Default is the same as html_title.\nhtml_short_title = 'Altair'\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = '_images/altair-logo-light.png'\n\n# The name of an image file (relative to this directory) to use as a favicon of\n# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static', '_images']\n\n# adapted from: http://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html\n# and\n# https://github.com/rtfd/sphinx_rtd_theme/issues/117\ndef setup(app):\n app.add_stylesheet('theme_overrides.css')\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not None, a 'Last updated on:' timestamp is inserted at every page\n# bottom, using the given strftime format.\n# The empty string is equivalent to '%b %d, %Y'.\n#html_last_updated_fmt = None\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'\n#html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# 'ja' uses this config value.\n# 'zh' user can custom change `jieba` dictionary path.\n#html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n#html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'altairdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n\n# Latex figure (float) alignment\n#'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'altair.tex', 'altair Documentation',\n 'Brian Granger and Jake VanderPlas', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'altair', 'altair Documentation',\n [author], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'altair', 'altair Documentation',\n author, 'altair', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n# Hide extra class members\nnumpydoc_show_class_members = False\n\n# For the altairplot extension\n# altairplot_links = {'editor': True, 'source': True, 'export': True}\n# altairplot_d3_js_url = \"https://d3js.org/d3.v3.min.js\"\naltairplot_vega_js_url = \"https://vega.github.io/vega/releases/v2.6.5/vega.js\"\naltairplot_vegalite_js_url = \"https://cdnjs.cloudflare.com/ajax/libs/vega-lite/1.2.1/vega-lite.min.js\"\naltairplot_vegaembed_js_url = \"https://cdnjs.cloudflare.com/ajax/libs/vega-embed/2.2.0/vega-embed.min.js\"\n", "path": "doc/conf.py"}]} | 3,875 | 135 |
gh_patches_debug_37715 | rasdani/github-patches | git_diff | opendatacube__datacube-core-440 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Time range strings behave differently between datacube and xarray
When specifying a time range in a datacube:
`dc.load(..., time=('2003-01', '2003-03'))`
is interpreted to be
_2003-01-01 00:00:00 to 2003-03-01 00:00:00_
i.e. values rounds down
vs
In xarray:
`data.sel(time=slice('2003-01', '2003-02'))`
is interpreted to be:
_2003-01-01 00:00:00 to 2003-03-31 23:59:59.999_
i.e. values round out
Ideally they would behave the same.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datacube/api/query.py`
Content:
```
1 #
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """
15 Storage Query and Access API module
16 """
17
18 from __future__ import absolute_import, division, print_function
19
20 import logging
21 import datetime
22 import collections
23 import warnings
24
25 from dateutil import tz
26 from pandas import to_datetime as pandas_to_datetime
27 import numpy as np
28
29 from ..compat import string_types, integer_types
30 from ..model import Range
31 from ..utils import geometry, datetime_to_seconds_since_1970
32
33 _LOG = logging.getLogger(__name__)
34
35
36 GroupBy = collections.namedtuple('GroupBy', ['dimension', 'group_by_func', 'units', 'sort_key'])
37
38 FLOAT_TOLERANCE = 0.0000001 # TODO: For DB query, use some sort of 'contains' query, rather than range overlap.
39 SPATIAL_KEYS = ('latitude', 'lat', 'y', 'longitude', 'lon', 'long', 'x')
40 CRS_KEYS = ('crs', 'coordinate_reference_system')
41 OTHER_KEYS = ('measurements', 'group_by', 'output_crs', 'resolution', 'set_nan', 'product', 'geopolygon', 'like',
42 'source_filter')
43
44
45 class Query(object):
46 def __init__(self, index=None, product=None, geopolygon=None, like=None, **search_terms):
47 """Parses search terms in preparation for querying the Data Cube Index.
48
49 Create a :class:`Query` object by passing it a set of search terms as keyword arguments.
50
51 >>> query = Query(product='ls5_nbar_albers', time=('2001-01-01', '2002-01-01'))
52
53 Use by accessing :attr:`search_terms`:
54
55 >>> query.search_terms['time'] # doctest: +NORMALIZE_WHITESPACE
56 Range(begin=datetime.datetime(2001, 1, 1, 0, 0, tzinfo=<UTC>), \
57 end=datetime.datetime(2002, 1, 1, 0, 0, tzinfo=<UTC>))
58
59 By passing in an ``index``, the search parameters will be validated as existing on the ``product``.
60
61 Used by :meth:`datacube.Datacube.find_datasets` and :meth:`datacube.Datacube.load`.
62
63 :param datacube.index.Index index: An optional `index` object, if checking of field names is desired.
64 :param str product: name of product
65 :param geopolygon: spatial bounds of the search
66 :type geopolygon: geometry.Geometry or None
67 :param xarray.Dataset like: spatio-temporal bounds of `like` are used for the search
68 :param search_terms:
69 * `measurements` - list of measurements to retrieve
70 * `latitude`, `lat`, `y`, `longitude`, `lon`, `long`, `x` - tuples (min, max) bounding spatial dimensions
71 * `crs` - spatial coordinate reference system to interpret the spatial bounds
72 * `group_by` - observation grouping method. One of `time`, `solar_day`. Default is `time`
73 """
74 self.product = product
75 self.geopolygon = query_geopolygon(geopolygon=geopolygon, **search_terms)
76 if 'source_filter' in search_terms and search_terms['source_filter'] is not None:
77 self.source_filter = Query(**search_terms['source_filter'])
78 else:
79 self.source_filter = None
80
81 remaining_keys = set(search_terms.keys()) - set(SPATIAL_KEYS + CRS_KEYS + OTHER_KEYS)
82 if index:
83 unknown_keys = remaining_keys - set(index.datasets.get_field_names())
84 # TODO: What about keys source filters, and what if the keys don't match up with this product...
85 if unknown_keys:
86 raise LookupError('Unknown arguments: ', unknown_keys)
87
88 self.search = {}
89 for key in remaining_keys:
90 self.search.update(_values_to_search(**{key: search_terms[key]}))
91
92 if like:
93 assert self.geopolygon is None, "'like' with other spatial bounding parameters is not supported"
94 self.geopolygon = getattr(like, 'extent', self.geopolygon)
95
96 if 'time' not in self.search:
97 time_coord = like.coords.get('time')
98 if time_coord is not None:
99 self.search['time'] = _time_to_search_dims(
100 (pandas_to_datetime(time_coord.values[0]).to_pydatetime(),
101 pandas_to_datetime(time_coord.values[-1]).to_pydatetime()
102 + datetime.timedelta(milliseconds=1)) # TODO: inclusive time searches
103 )
104
105 @property
106 def search_terms(self):
107 """
108 Access the search terms as a dictionary.
109
110 :type: dict
111 """
112 kwargs = {}
113 kwargs.update(self.search)
114 if self.geopolygon:
115 geo_bb = self.geopolygon.to_crs(geometry.CRS('EPSG:4326')).boundingbox
116 if geo_bb.bottom != geo_bb.top:
117 kwargs['lat'] = Range(geo_bb.bottom, geo_bb.top)
118 else:
119 kwargs['lat'] = geo_bb.bottom
120 if geo_bb.left != geo_bb.right:
121 kwargs['lon'] = Range(geo_bb.left, geo_bb.right)
122 else:
123 kwargs['lon'] = geo_bb.left
124 if self.product:
125 kwargs['product'] = self.product
126 if self.source_filter:
127 kwargs['source_filter'] = self.source_filter.search_terms
128 return kwargs
129
130 def __repr__(self):
131 return self.__str__()
132
133 def __str__(self):
134 return """Datacube Query:
135 type = {type}
136 search = {search}
137 geopolygon = {geopolygon}
138 """.format(type=self.product,
139 search=self.search,
140 geopolygon=self.geopolygon)
141
142
143 def query_geopolygon(geopolygon=None, **kwargs):
144 spatial_dims = {dim: v for dim, v in kwargs.items() if dim in SPATIAL_KEYS}
145 crs = {v for k, v in kwargs.items() if k in CRS_KEYS}
146 if len(crs) == 1:
147 spatial_dims['crs'] = crs.pop()
148 elif len(crs) > 1:
149 raise ValueError('Spatial dimensions must be in the same coordinate reference system: {}'.format(crs))
150
151 if geopolygon is not None and len(spatial_dims) > 0:
152 raise ValueError('Cannot specify "geopolygon" and one of %s at the same time' % (SPATIAL_KEYS + CRS_KEYS,))
153
154 if geopolygon is None:
155 return _range_to_geopolygon(**spatial_dims)
156
157 return geopolygon
158
159
160 def query_group_by(group_by='time', **kwargs):
161 time_grouper = GroupBy(dimension='time',
162 group_by_func=lambda ds: ds.center_time,
163 units='seconds since 1970-01-01 00:00:00',
164 sort_key=lambda ds: ds.center_time)
165
166 solar_day_grouper = GroupBy(dimension='time',
167 group_by_func=solar_day,
168 units='seconds since 1970-01-01 00:00:00',
169 sort_key=lambda ds: ds.center_time)
170
171 group_by_map = {
172 None: time_grouper,
173 'time': time_grouper,
174 'solar_day': solar_day_grouper
175 }
176
177 try:
178 return group_by_map[group_by]
179 except KeyError:
180 raise LookupError('No group by function for', group_by)
181
182
183 def _range_to_geopolygon(**kwargs):
184 input_crs = None
185 input_coords = {'left': None, 'bottom': None, 'right': None, 'top': None}
186 for key, value in kwargs.items():
187 if value is None:
188 continue
189 key = key.lower()
190 if key in ['latitude', 'lat', 'y']:
191 input_coords['top'], input_coords['bottom'] = _value_to_range(value)
192 if key in ['longitude', 'lon', 'long', 'x']:
193 input_coords['left'], input_coords['right'] = _value_to_range(value)
194 if key in ['crs', 'coordinate_reference_system']:
195 input_crs = geometry.CRS(value)
196 input_crs = input_crs or geometry.CRS('EPSG:4326')
197 if any(v is not None for v in input_coords.values()):
198 if input_coords['left'] == input_coords['right']:
199 if input_coords['top'] == input_coords['bottom']:
200 return geometry.point(input_coords['left'], input_coords['top'], crs=input_crs)
201 else:
202 points = [(input_coords['left'], input_coords['bottom']),
203 (input_coords['left'], input_coords['top'])]
204 return geometry.line(points, crs=input_crs)
205 else:
206 if input_coords['top'] == input_coords['bottom']:
207 points = [(input_coords['left'], input_coords['top']),
208 (input_coords['right'], input_coords['top'])]
209 return geometry.line(points, crs=input_crs)
210 else:
211 points = [
212 (input_coords['left'], input_coords['top']),
213 (input_coords['right'], input_coords['top']),
214 (input_coords['right'], input_coords['bottom']),
215 (input_coords['left'], input_coords['bottom']),
216 (input_coords['left'], input_coords['top'])
217 ]
218 return geometry.polygon(points, crs=input_crs)
219 return None
220
221
222 def _value_to_range(value):
223 if isinstance(value, string_types + integer_types + (float,)):
224 value = float(value)
225 return value, value
226 else:
227 return float(value[0]), float(value[-1])
228
229
230 def _values_to_search(**kwargs):
231 search = {}
232 for key, value in kwargs.items():
233 if key.lower() in ('time', 't'):
234 search['time'] = _time_to_search_dims(value)
235 elif key not in ['latitude', 'lat', 'y'] + ['longitude', 'lon', 'x']:
236 if isinstance(value, collections.Sequence) and len(value) == 2:
237 search[key] = Range(*value)
238 else:
239 search[key] = value
240 return search
241
242
243 def _datetime_to_timestamp(dt):
244 if not isinstance(dt, datetime.datetime) and not isinstance(dt, datetime.date):
245 dt = _to_datetime(dt)
246 return datetime_to_seconds_since_1970(dt)
247
248
249 def _to_datetime(t):
250 if isinstance(t, integer_types + (float,)):
251 t = datetime.datetime.fromtimestamp(t, tz=tz.tzutc())
252
253 if isinstance(t, tuple):
254 t = datetime.datetime(*t, tzinfo=tz.tzutc())
255 elif isinstance(t, string_types):
256 try:
257 t = datetime.datetime.strptime(t, "%Y-%m-%dT%H:%M:%S.%fZ")
258 except ValueError:
259 pass
260 elif isinstance(t, datetime.datetime):
261 if t.tzinfo is None:
262 t = t.replace(tzinfo=tz.tzutc())
263 return t
264
265 return pandas_to_datetime(t, utc=True, infer_datetime_format=True).to_pydatetime()
266
267
268 def _time_to_search_dims(time_range):
269 if hasattr(time_range, '__iter__') and len(time_range) == 2:
270 time_range = Range(_to_datetime(time_range[0]), _to_datetime(time_range[1]))
271 if time_range[0] == time_range[1]:
272 return time_range[0]
273 return time_range
274 else:
275 return _to_datetime(time_range)
276
277
278 def _convert_to_solar_time(utc, longitude):
279 seconds_per_degree = 240
280 offset_seconds = int(longitude * seconds_per_degree)
281 offset = datetime.timedelta(seconds=offset_seconds)
282 return utc + offset
283
284
285 def solar_day(dataset):
286 utc = dataset.center_time
287 bb = dataset.extent.to_crs(geometry.CRS('WGS84')).boundingbox
288 assert bb.left < bb.right # TODO: Handle dateline?
289 longitude = (bb.left + bb.right) * 0.5
290 solar_time = _convert_to_solar_time(utc, longitude)
291 return np.datetime64(solar_time.date(), 'D')
292
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/datacube/api/query.py b/datacube/api/query.py
--- a/datacube/api/query.py
+++ b/datacube/api/query.py
@@ -21,9 +21,13 @@
import datetime
import collections
import warnings
+import calendar
+import re
+import pandas
from dateutil import tz
from pandas import to_datetime as pandas_to_datetime
+from pypeg2 import word, attr, List, maybe_some, parse as peg_parse
import numpy as np
from ..compat import string_types, integer_types
@@ -54,7 +58,7 @@
>>> query.search_terms['time'] # doctest: +NORMALIZE_WHITESPACE
Range(begin=datetime.datetime(2001, 1, 1, 0, 0, tzinfo=<UTC>), \
- end=datetime.datetime(2002, 1, 1, 0, 0, tzinfo=<UTC>))
+ end=datetime.datetime(2002, 1, 1, 23, 59, 59, 999999, tzinfo=tzutc()))
By passing in an ``index``, the search parameters will be validated as existing on the ``product``.
@@ -264,15 +268,34 @@
return pandas_to_datetime(t, utc=True, infer_datetime_format=True).to_pydatetime()
-
def _time_to_search_dims(time_range):
- if hasattr(time_range, '__iter__') and len(time_range) == 2:
- time_range = Range(_to_datetime(time_range[0]), _to_datetime(time_range[1]))
- if time_range[0] == time_range[1]:
- return time_range[0]
- return time_range
- else:
- return _to_datetime(time_range)
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore", UserWarning)
+ if hasattr(time_range, '__iter__') and len(time_range) == 2:
+ if all(isinstance(n, datetime.datetime) for n in time_range):
+ timelist = list(time_range)
+ timelist[0], timelist[1] = timelist[0].isoformat(), timelist[0].isoformat()
+ time_range = tuple(timelist)
+ time_range = Range(_to_datetime(time_range[0]),
+ _to_datetime(pandas.Period(time_range[1]).end_time.to_pydatetime()))
+ if time_range[0] == time_range[1]:
+ return time_range[0]
+ return time_range
+
+ elif isinstance(time_range, str):
+ start_time, end_time = Range(_to_datetime(time_range),
+ _to_datetime(pandas.Period(time_range).end_time.to_pydatetime()))
+ if start_time == end_time:
+ return start_time
+ time_range = Range(start_time, end_time)
+ return time_range
+ else:
+ timelist = list(time_range)
+ timelist[0], timelist[1] = timelist[0].isoformat(), timelist[0].isoformat()
+ time_range = tuple(timelist)
+ time_range = Range(_to_datetime(time_range[0]),
+ _to_datetime(pandas.Period(time_range[1]).end_time.to_pydatetime()))
+ return time_range
def _convert_to_solar_time(utc, longitude):
| {"golden_diff": "diff --git a/datacube/api/query.py b/datacube/api/query.py\n--- a/datacube/api/query.py\n+++ b/datacube/api/query.py\n@@ -21,9 +21,13 @@\n import datetime\n import collections\n import warnings\n+import calendar\n+import re\n+import pandas\n \n from dateutil import tz\n from pandas import to_datetime as pandas_to_datetime\n+from pypeg2 import word, attr, List, maybe_some, parse as peg_parse\n import numpy as np\n \n from ..compat import string_types, integer_types\n@@ -54,7 +58,7 @@\n \n >>> query.search_terms['time'] # doctest: +NORMALIZE_WHITESPACE\n Range(begin=datetime.datetime(2001, 1, 1, 0, 0, tzinfo=<UTC>), \\\n- end=datetime.datetime(2002, 1, 1, 0, 0, tzinfo=<UTC>))\n+ end=datetime.datetime(2002, 1, 1, 23, 59, 59, 999999, tzinfo=tzutc()))\n \n By passing in an ``index``, the search parameters will be validated as existing on the ``product``.\n \n@@ -264,15 +268,34 @@\n \n return pandas_to_datetime(t, utc=True, infer_datetime_format=True).to_pydatetime()\n \n-\n def _time_to_search_dims(time_range):\n- if hasattr(time_range, '__iter__') and len(time_range) == 2:\n- time_range = Range(_to_datetime(time_range[0]), _to_datetime(time_range[1]))\n- if time_range[0] == time_range[1]:\n- return time_range[0]\n- return time_range\n- else:\n- return _to_datetime(time_range)\n+ with warnings.catch_warnings():\n+ warnings.simplefilter(\"ignore\", UserWarning)\n+ if hasattr(time_range, '__iter__') and len(time_range) == 2:\n+ if all(isinstance(n, datetime.datetime) for n in time_range):\n+ timelist = list(time_range)\n+ timelist[0], timelist[1] = timelist[0].isoformat(), timelist[0].isoformat()\n+ time_range = tuple(timelist)\n+ time_range = Range(_to_datetime(time_range[0]),\n+ _to_datetime(pandas.Period(time_range[1]).end_time.to_pydatetime()))\n+ if time_range[0] == time_range[1]:\n+ return time_range[0]\n+ return time_range\n+\n+ elif isinstance(time_range, str):\n+ start_time, end_time = Range(_to_datetime(time_range),\n+ _to_datetime(pandas.Period(time_range).end_time.to_pydatetime()))\n+ if start_time == end_time:\n+ return start_time\n+ time_range = Range(start_time, end_time)\n+ return time_range\n+ else:\n+ timelist = list(time_range)\n+ timelist[0], timelist[1] = timelist[0].isoformat(), timelist[0].isoformat()\n+ time_range = tuple(timelist)\n+ time_range = Range(_to_datetime(time_range[0]),\n+ _to_datetime(pandas.Period(time_range[1]).end_time.to_pydatetime()))\n+ return time_range\n \n \n def _convert_to_solar_time(utc, longitude):\n", "issue": "Time range strings behave differently between datacube and xarray\nWhen specifying a time range in a datacube:\r\n`dc.load(..., time=('2003-01', '2003-03'))`\r\nis interpreted to be \r\n_2003-01-01 00:00:00 to 2003-03-01 00:00:00_\r\ni.e. values rounds down\r\n\r\nvs\r\n\r\nIn xarray:\r\n`data.sel(time=slice('2003-01', '2003-02'))`\r\nis interpreted to be:\r\n_2003-01-01 00:00:00 to 2003-03-31 23:59:59.999_\r\ni.e. values round out\r\n\r\nIdeally they would behave the same.\r\n\n", "before_files": [{"content": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStorage Query and Access API module\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport logging\nimport datetime\nimport collections\nimport warnings\n\nfrom dateutil import tz\nfrom pandas import to_datetime as pandas_to_datetime\nimport numpy as np\n\nfrom ..compat import string_types, integer_types\nfrom ..model import Range\nfrom ..utils import geometry, datetime_to_seconds_since_1970\n\n_LOG = logging.getLogger(__name__)\n\n\nGroupBy = collections.namedtuple('GroupBy', ['dimension', 'group_by_func', 'units', 'sort_key'])\n\nFLOAT_TOLERANCE = 0.0000001 # TODO: For DB query, use some sort of 'contains' query, rather than range overlap.\nSPATIAL_KEYS = ('latitude', 'lat', 'y', 'longitude', 'lon', 'long', 'x')\nCRS_KEYS = ('crs', 'coordinate_reference_system')\nOTHER_KEYS = ('measurements', 'group_by', 'output_crs', 'resolution', 'set_nan', 'product', 'geopolygon', 'like',\n 'source_filter')\n\n\nclass Query(object):\n def __init__(self, index=None, product=None, geopolygon=None, like=None, **search_terms):\n \"\"\"Parses search terms in preparation for querying the Data Cube Index.\n\n Create a :class:`Query` object by passing it a set of search terms as keyword arguments.\n\n >>> query = Query(product='ls5_nbar_albers', time=('2001-01-01', '2002-01-01'))\n\n Use by accessing :attr:`search_terms`:\n\n >>> query.search_terms['time'] # doctest: +NORMALIZE_WHITESPACE\n Range(begin=datetime.datetime(2001, 1, 1, 0, 0, tzinfo=<UTC>), \\\n end=datetime.datetime(2002, 1, 1, 0, 0, tzinfo=<UTC>))\n\n By passing in an ``index``, the search parameters will be validated as existing on the ``product``.\n\n Used by :meth:`datacube.Datacube.find_datasets` and :meth:`datacube.Datacube.load`.\n\n :param datacube.index.Index index: An optional `index` object, if checking of field names is desired.\n :param str product: name of product\n :param geopolygon: spatial bounds of the search\n :type geopolygon: geometry.Geometry or None\n :param xarray.Dataset like: spatio-temporal bounds of `like` are used for the search\n :param search_terms:\n * `measurements` - list of measurements to retrieve\n * `latitude`, `lat`, `y`, `longitude`, `lon`, `long`, `x` - tuples (min, max) bounding spatial dimensions\n * `crs` - spatial coordinate reference system to interpret the spatial bounds\n * `group_by` - observation grouping method. One of `time`, `solar_day`. Default is `time`\n \"\"\"\n self.product = product\n self.geopolygon = query_geopolygon(geopolygon=geopolygon, **search_terms)\n if 'source_filter' in search_terms and search_terms['source_filter'] is not None:\n self.source_filter = Query(**search_terms['source_filter'])\n else:\n self.source_filter = None\n\n remaining_keys = set(search_terms.keys()) - set(SPATIAL_KEYS + CRS_KEYS + OTHER_KEYS)\n if index:\n unknown_keys = remaining_keys - set(index.datasets.get_field_names())\n # TODO: What about keys source filters, and what if the keys don't match up with this product...\n if unknown_keys:\n raise LookupError('Unknown arguments: ', unknown_keys)\n\n self.search = {}\n for key in remaining_keys:\n self.search.update(_values_to_search(**{key: search_terms[key]}))\n\n if like:\n assert self.geopolygon is None, \"'like' with other spatial bounding parameters is not supported\"\n self.geopolygon = getattr(like, 'extent', self.geopolygon)\n\n if 'time' not in self.search:\n time_coord = like.coords.get('time')\n if time_coord is not None:\n self.search['time'] = _time_to_search_dims(\n (pandas_to_datetime(time_coord.values[0]).to_pydatetime(),\n pandas_to_datetime(time_coord.values[-1]).to_pydatetime()\n + datetime.timedelta(milliseconds=1)) # TODO: inclusive time searches\n )\n\n @property\n def search_terms(self):\n \"\"\"\n Access the search terms as a dictionary.\n\n :type: dict\n \"\"\"\n kwargs = {}\n kwargs.update(self.search)\n if self.geopolygon:\n geo_bb = self.geopolygon.to_crs(geometry.CRS('EPSG:4326')).boundingbox\n if geo_bb.bottom != geo_bb.top:\n kwargs['lat'] = Range(geo_bb.bottom, geo_bb.top)\n else:\n kwargs['lat'] = geo_bb.bottom\n if geo_bb.left != geo_bb.right:\n kwargs['lon'] = Range(geo_bb.left, geo_bb.right)\n else:\n kwargs['lon'] = geo_bb.left\n if self.product:\n kwargs['product'] = self.product\n if self.source_filter:\n kwargs['source_filter'] = self.source_filter.search_terms\n return kwargs\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"\"\"Datacube Query:\n type = {type}\n search = {search}\n geopolygon = {geopolygon}\n \"\"\".format(type=self.product,\n search=self.search,\n geopolygon=self.geopolygon)\n\n\ndef query_geopolygon(geopolygon=None, **kwargs):\n spatial_dims = {dim: v for dim, v in kwargs.items() if dim in SPATIAL_KEYS}\n crs = {v for k, v in kwargs.items() if k in CRS_KEYS}\n if len(crs) == 1:\n spatial_dims['crs'] = crs.pop()\n elif len(crs) > 1:\n raise ValueError('Spatial dimensions must be in the same coordinate reference system: {}'.format(crs))\n\n if geopolygon is not None and len(spatial_dims) > 0:\n raise ValueError('Cannot specify \"geopolygon\" and one of %s at the same time' % (SPATIAL_KEYS + CRS_KEYS,))\n\n if geopolygon is None:\n return _range_to_geopolygon(**spatial_dims)\n\n return geopolygon\n\n\ndef query_group_by(group_by='time', **kwargs):\n time_grouper = GroupBy(dimension='time',\n group_by_func=lambda ds: ds.center_time,\n units='seconds since 1970-01-01 00:00:00',\n sort_key=lambda ds: ds.center_time)\n\n solar_day_grouper = GroupBy(dimension='time',\n group_by_func=solar_day,\n units='seconds since 1970-01-01 00:00:00',\n sort_key=lambda ds: ds.center_time)\n\n group_by_map = {\n None: time_grouper,\n 'time': time_grouper,\n 'solar_day': solar_day_grouper\n }\n\n try:\n return group_by_map[group_by]\n except KeyError:\n raise LookupError('No group by function for', group_by)\n\n\ndef _range_to_geopolygon(**kwargs):\n input_crs = None\n input_coords = {'left': None, 'bottom': None, 'right': None, 'top': None}\n for key, value in kwargs.items():\n if value is None:\n continue\n key = key.lower()\n if key in ['latitude', 'lat', 'y']:\n input_coords['top'], input_coords['bottom'] = _value_to_range(value)\n if key in ['longitude', 'lon', 'long', 'x']:\n input_coords['left'], input_coords['right'] = _value_to_range(value)\n if key in ['crs', 'coordinate_reference_system']:\n input_crs = geometry.CRS(value)\n input_crs = input_crs or geometry.CRS('EPSG:4326')\n if any(v is not None for v in input_coords.values()):\n if input_coords['left'] == input_coords['right']:\n if input_coords['top'] == input_coords['bottom']:\n return geometry.point(input_coords['left'], input_coords['top'], crs=input_crs)\n else:\n points = [(input_coords['left'], input_coords['bottom']),\n (input_coords['left'], input_coords['top'])]\n return geometry.line(points, crs=input_crs)\n else:\n if input_coords['top'] == input_coords['bottom']:\n points = [(input_coords['left'], input_coords['top']),\n (input_coords['right'], input_coords['top'])]\n return geometry.line(points, crs=input_crs)\n else:\n points = [\n (input_coords['left'], input_coords['top']),\n (input_coords['right'], input_coords['top']),\n (input_coords['right'], input_coords['bottom']),\n (input_coords['left'], input_coords['bottom']),\n (input_coords['left'], input_coords['top'])\n ]\n return geometry.polygon(points, crs=input_crs)\n return None\n\n\ndef _value_to_range(value):\n if isinstance(value, string_types + integer_types + (float,)):\n value = float(value)\n return value, value\n else:\n return float(value[0]), float(value[-1])\n\n\ndef _values_to_search(**kwargs):\n search = {}\n for key, value in kwargs.items():\n if key.lower() in ('time', 't'):\n search['time'] = _time_to_search_dims(value)\n elif key not in ['latitude', 'lat', 'y'] + ['longitude', 'lon', 'x']:\n if isinstance(value, collections.Sequence) and len(value) == 2:\n search[key] = Range(*value)\n else:\n search[key] = value\n return search\n\n\ndef _datetime_to_timestamp(dt):\n if not isinstance(dt, datetime.datetime) and not isinstance(dt, datetime.date):\n dt = _to_datetime(dt)\n return datetime_to_seconds_since_1970(dt)\n\n\ndef _to_datetime(t):\n if isinstance(t, integer_types + (float,)):\n t = datetime.datetime.fromtimestamp(t, tz=tz.tzutc())\n\n if isinstance(t, tuple):\n t = datetime.datetime(*t, tzinfo=tz.tzutc())\n elif isinstance(t, string_types):\n try:\n t = datetime.datetime.strptime(t, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n except ValueError:\n pass\n elif isinstance(t, datetime.datetime):\n if t.tzinfo is None:\n t = t.replace(tzinfo=tz.tzutc())\n return t\n\n return pandas_to_datetime(t, utc=True, infer_datetime_format=True).to_pydatetime()\n\n\ndef _time_to_search_dims(time_range):\n if hasattr(time_range, '__iter__') and len(time_range) == 2:\n time_range = Range(_to_datetime(time_range[0]), _to_datetime(time_range[1]))\n if time_range[0] == time_range[1]:\n return time_range[0]\n return time_range\n else:\n return _to_datetime(time_range)\n\n\ndef _convert_to_solar_time(utc, longitude):\n seconds_per_degree = 240\n offset_seconds = int(longitude * seconds_per_degree)\n offset = datetime.timedelta(seconds=offset_seconds)\n return utc + offset\n\n\ndef solar_day(dataset):\n utc = dataset.center_time\n bb = dataset.extent.to_crs(geometry.CRS('WGS84')).boundingbox\n assert bb.left < bb.right # TODO: Handle dateline?\n longitude = (bb.left + bb.right) * 0.5\n solar_time = _convert_to_solar_time(utc, longitude)\n return np.datetime64(solar_time.date(), 'D')\n", "path": "datacube/api/query.py"}], "after_files": [{"content": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStorage Query and Access API module\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport logging\nimport datetime\nimport collections\nimport warnings\nimport calendar\nimport re\nimport pandas\n\nfrom dateutil import tz\nfrom pandas import to_datetime as pandas_to_datetime\nfrom pypeg2 import word, attr, List, maybe_some, parse as peg_parse\nimport numpy as np\n\nfrom ..compat import string_types, integer_types\nfrom ..model import Range\nfrom ..utils import geometry, datetime_to_seconds_since_1970\n\n_LOG = logging.getLogger(__name__)\n\n\nGroupBy = collections.namedtuple('GroupBy', ['dimension', 'group_by_func', 'units', 'sort_key'])\n\nFLOAT_TOLERANCE = 0.0000001 # TODO: For DB query, use some sort of 'contains' query, rather than range overlap.\nSPATIAL_KEYS = ('latitude', 'lat', 'y', 'longitude', 'lon', 'long', 'x')\nCRS_KEYS = ('crs', 'coordinate_reference_system')\nOTHER_KEYS = ('measurements', 'group_by', 'output_crs', 'resolution', 'set_nan', 'product', 'geopolygon', 'like',\n 'source_filter')\n\n\nclass Query(object):\n def __init__(self, index=None, product=None, geopolygon=None, like=None, **search_terms):\n \"\"\"Parses search terms in preparation for querying the Data Cube Index.\n\n Create a :class:`Query` object by passing it a set of search terms as keyword arguments.\n\n >>> query = Query(product='ls5_nbar_albers', time=('2001-01-01', '2002-01-01'))\n\n Use by accessing :attr:`search_terms`:\n\n >>> query.search_terms['time'] # doctest: +NORMALIZE_WHITESPACE\n Range(begin=datetime.datetime(2001, 1, 1, 0, 0, tzinfo=<UTC>), \\\n end=datetime.datetime(2002, 1, 1, 23, 59, 59, 999999, tzinfo=tzutc()))\n\n By passing in an ``index``, the search parameters will be validated as existing on the ``product``.\n\n Used by :meth:`datacube.Datacube.find_datasets` and :meth:`datacube.Datacube.load`.\n\n :param datacube.index.Index index: An optional `index` object, if checking of field names is desired.\n :param str product: name of product\n :param geopolygon: spatial bounds of the search\n :type geopolygon: geometry.Geometry or None\n :param xarray.Dataset like: spatio-temporal bounds of `like` are used for the search\n :param search_terms:\n * `measurements` - list of measurements to retrieve\n * `latitude`, `lat`, `y`, `longitude`, `lon`, `long`, `x` - tuples (min, max) bounding spatial dimensions\n * `crs` - spatial coordinate reference system to interpret the spatial bounds\n * `group_by` - observation grouping method. One of `time`, `solar_day`. Default is `time`\n \"\"\"\n self.product = product\n self.geopolygon = query_geopolygon(geopolygon=geopolygon, **search_terms)\n if 'source_filter' in search_terms and search_terms['source_filter'] is not None:\n self.source_filter = Query(**search_terms['source_filter'])\n else:\n self.source_filter = None\n\n remaining_keys = set(search_terms.keys()) - set(SPATIAL_KEYS + CRS_KEYS + OTHER_KEYS)\n if index:\n unknown_keys = remaining_keys - set(index.datasets.get_field_names())\n # TODO: What about keys source filters, and what if the keys don't match up with this product...\n if unknown_keys:\n raise LookupError('Unknown arguments: ', unknown_keys)\n\n self.search = {}\n for key in remaining_keys:\n self.search.update(_values_to_search(**{key: search_terms[key]}))\n\n if like:\n assert self.geopolygon is None, \"'like' with other spatial bounding parameters is not supported\"\n self.geopolygon = getattr(like, 'extent', self.geopolygon)\n\n if 'time' not in self.search:\n time_coord = like.coords.get('time')\n if time_coord is not None:\n self.search['time'] = _time_to_search_dims(\n (pandas_to_datetime(time_coord.values[0]).to_pydatetime(),\n pandas_to_datetime(time_coord.values[-1]).to_pydatetime()\n + datetime.timedelta(milliseconds=1)) # TODO: inclusive time searches\n )\n\n @property\n def search_terms(self):\n \"\"\"\n Access the search terms as a dictionary.\n\n :type: dict\n \"\"\"\n kwargs = {}\n kwargs.update(self.search)\n if self.geopolygon:\n geo_bb = self.geopolygon.to_crs(geometry.CRS('EPSG:4326')).boundingbox\n if geo_bb.bottom != geo_bb.top:\n kwargs['lat'] = Range(geo_bb.bottom, geo_bb.top)\n else:\n kwargs['lat'] = geo_bb.bottom\n if geo_bb.left != geo_bb.right:\n kwargs['lon'] = Range(geo_bb.left, geo_bb.right)\n else:\n kwargs['lon'] = geo_bb.left\n if self.product:\n kwargs['product'] = self.product\n if self.source_filter:\n kwargs['source_filter'] = self.source_filter.search_terms\n return kwargs\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return \"\"\"Datacube Query:\n type = {type}\n search = {search}\n geopolygon = {geopolygon}\n \"\"\".format(type=self.product,\n search=self.search,\n geopolygon=self.geopolygon)\n\n\ndef query_geopolygon(geopolygon=None, **kwargs):\n spatial_dims = {dim: v for dim, v in kwargs.items() if dim in SPATIAL_KEYS}\n crs = {v for k, v in kwargs.items() if k in CRS_KEYS}\n if len(crs) == 1:\n spatial_dims['crs'] = crs.pop()\n elif len(crs) > 1:\n raise ValueError('Spatial dimensions must be in the same coordinate reference system: {}'.format(crs))\n\n if geopolygon is not None and len(spatial_dims) > 0:\n raise ValueError('Cannot specify \"geopolygon\" and one of %s at the same time' % (SPATIAL_KEYS + CRS_KEYS,))\n\n if geopolygon is None:\n return _range_to_geopolygon(**spatial_dims)\n\n return geopolygon\n\n\ndef query_group_by(group_by='time', **kwargs):\n time_grouper = GroupBy(dimension='time',\n group_by_func=lambda ds: ds.center_time,\n units='seconds since 1970-01-01 00:00:00',\n sort_key=lambda ds: ds.center_time)\n\n solar_day_grouper = GroupBy(dimension='time',\n group_by_func=solar_day,\n units='seconds since 1970-01-01 00:00:00',\n sort_key=lambda ds: ds.center_time)\n\n group_by_map = {\n None: time_grouper,\n 'time': time_grouper,\n 'solar_day': solar_day_grouper\n }\n\n try:\n return group_by_map[group_by]\n except KeyError:\n raise LookupError('No group by function for', group_by)\n\n\ndef _range_to_geopolygon(**kwargs):\n input_crs = None\n input_coords = {'left': None, 'bottom': None, 'right': None, 'top': None}\n for key, value in kwargs.items():\n if value is None:\n continue\n key = key.lower()\n if key in ['latitude', 'lat', 'y']:\n input_coords['top'], input_coords['bottom'] = _value_to_range(value)\n if key in ['longitude', 'lon', 'long', 'x']:\n input_coords['left'], input_coords['right'] = _value_to_range(value)\n if key in ['crs', 'coordinate_reference_system']:\n input_crs = geometry.CRS(value)\n input_crs = input_crs or geometry.CRS('EPSG:4326')\n if any(v is not None for v in input_coords.values()):\n if input_coords['left'] == input_coords['right']:\n if input_coords['top'] == input_coords['bottom']:\n return geometry.point(input_coords['left'], input_coords['top'], crs=input_crs)\n else:\n points = [(input_coords['left'], input_coords['bottom']),\n (input_coords['left'], input_coords['top'])]\n return geometry.line(points, crs=input_crs)\n else:\n if input_coords['top'] == input_coords['bottom']:\n points = [(input_coords['left'], input_coords['top']),\n (input_coords['right'], input_coords['top'])]\n return geometry.line(points, crs=input_crs)\n else:\n points = [\n (input_coords['left'], input_coords['top']),\n (input_coords['right'], input_coords['top']),\n (input_coords['right'], input_coords['bottom']),\n (input_coords['left'], input_coords['bottom']),\n (input_coords['left'], input_coords['top'])\n ]\n return geometry.polygon(points, crs=input_crs)\n return None\n\n\ndef _value_to_range(value):\n if isinstance(value, string_types + integer_types + (float,)):\n value = float(value)\n return value, value\n else:\n return float(value[0]), float(value[-1])\n\n\ndef _values_to_search(**kwargs):\n search = {}\n for key, value in kwargs.items():\n if key.lower() in ('time', 't'):\n search['time'] = _time_to_search_dims(value)\n elif key not in ['latitude', 'lat', 'y'] + ['longitude', 'lon', 'x']:\n if isinstance(value, collections.Sequence) and len(value) == 2:\n search[key] = Range(*value)\n else:\n search[key] = value\n return search\n\n\ndef _datetime_to_timestamp(dt):\n if not isinstance(dt, datetime.datetime) and not isinstance(dt, datetime.date):\n dt = _to_datetime(dt)\n return datetime_to_seconds_since_1970(dt)\n\n\ndef _to_datetime(t):\n if isinstance(t, integer_types + (float,)):\n t = datetime.datetime.fromtimestamp(t, tz=tz.tzutc())\n\n if isinstance(t, tuple):\n t = datetime.datetime(*t, tzinfo=tz.tzutc())\n elif isinstance(t, string_types):\n try:\n t = datetime.datetime.strptime(t, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n except ValueError:\n pass\n elif isinstance(t, datetime.datetime):\n if t.tzinfo is None:\n t = t.replace(tzinfo=tz.tzutc())\n return t\n\n return pandas_to_datetime(t, utc=True, infer_datetime_format=True).to_pydatetime()\n\ndef _time_to_search_dims(time_range):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", UserWarning)\n if hasattr(time_range, '__iter__') and len(time_range) == 2:\n if all(isinstance(n, datetime.datetime) for n in time_range):\n timelist = list(time_range)\n timelist[0], timelist[1] = timelist[0].isoformat(), timelist[0].isoformat()\n time_range = tuple(timelist)\n time_range = Range(_to_datetime(time_range[0]),\n _to_datetime(pandas.Period(time_range[1]).end_time.to_pydatetime()))\n if time_range[0] == time_range[1]:\n return time_range[0]\n return time_range\n\n elif isinstance(time_range, str):\n start_time, end_time = Range(_to_datetime(time_range),\n _to_datetime(pandas.Period(time_range).end_time.to_pydatetime()))\n if start_time == end_time:\n return start_time\n time_range = Range(start_time, end_time)\n return time_range\n else:\n timelist = list(time_range)\n timelist[0], timelist[1] = timelist[0].isoformat(), timelist[0].isoformat()\n time_range = tuple(timelist)\n time_range = Range(_to_datetime(time_range[0]),\n _to_datetime(pandas.Period(time_range[1]).end_time.to_pydatetime()))\n return time_range\n\n\ndef _convert_to_solar_time(utc, longitude):\n seconds_per_degree = 240\n offset_seconds = int(longitude * seconds_per_degree)\n offset = datetime.timedelta(seconds=offset_seconds)\n return utc + offset\n\n\ndef solar_day(dataset):\n utc = dataset.center_time\n bb = dataset.extent.to_crs(geometry.CRS('WGS84')).boundingbox\n assert bb.left < bb.right # TODO: Handle dateline?\n longitude = (bb.left + bb.right) * 0.5\n solar_time = _convert_to_solar_time(utc, longitude)\n return np.datetime64(solar_time.date(), 'D')\n", "path": "datacube/api/query.py"}]} | 4,013 | 752 |
gh_patches_debug_6313 | rasdani/github-patches | git_diff | pypa__pip-5789 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError on tab completion of `pip install -U ...`
**Environment**
* pip version: 18.0
* Python version: 3.7.0
* OS: macOS High Sierra
Using iTerm2 3.1.7 or Terminal.app.
<!-- Feel free to add more information about your environment here -->
**Description**
<!-- A clear and concise description of what the bug is. -->
Attempt `pip install -U projectname` with tab completion.
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
Tab completion works, or at least no error.
**How to Reproduce**
<!-- Describe the steps to reproduce this bug. -->
1. Type `pip install -U` or `pip install --upgrade` then a few letters of a project name (eg. just `p` below), then press the tab key.
3. An error occurs:
**Output**
```console
$ pip install --upgrade pTraceback (most recent call last):
File "/usr/local/bin/pip3", line 11, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py", line 293, in main
autocomplete()
File "/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py", line 121, in autocomplete
cwords, cword, subcommand.parser.option_list_all,
File "/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py", line 170, in get_path_completion_type
for x in opt.metavar.split('/')):
AttributeError: 'NoneType' object has no attribute 'split'
```
**Notes**
* When omitting `-U` or `--upgrade`, pressing tab doesn't complete anything but more importantly there's no error.
* No error with pip 18.0 on Python 2.7.15. Pressing tab suggests files/directories in current dir (with and without `-U`).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/cli/autocompletion.py`
Content:
```
1 """Logic that powers autocompletion installed by ``pip completion``.
2 """
3
4 import optparse
5 import os
6 import sys
7
8 from pip._internal.cli.main_parser import create_main_parser
9 from pip._internal.commands import commands_dict, get_summaries
10 from pip._internal.utils.misc import get_installed_distributions
11
12
13 def autocomplete():
14 """Entry Point for completion of main and subcommand options.
15 """
16 # Don't complete if user hasn't sourced bash_completion file.
17 if 'PIP_AUTO_COMPLETE' not in os.environ:
18 return
19 cwords = os.environ['COMP_WORDS'].split()[1:]
20 cword = int(os.environ['COMP_CWORD'])
21 try:
22 current = cwords[cword - 1]
23 except IndexError:
24 current = ''
25
26 subcommands = [cmd for cmd, summary in get_summaries()]
27 options = []
28 # subcommand
29 try:
30 subcommand_name = [w for w in cwords if w in subcommands][0]
31 except IndexError:
32 subcommand_name = None
33
34 parser = create_main_parser()
35 # subcommand options
36 if subcommand_name:
37 # special case: 'help' subcommand has no options
38 if subcommand_name == 'help':
39 sys.exit(1)
40 # special case: list locally installed dists for show and uninstall
41 should_list_installed = (
42 subcommand_name in ['show', 'uninstall'] and
43 not current.startswith('-')
44 )
45 if should_list_installed:
46 installed = []
47 lc = current.lower()
48 for dist in get_installed_distributions(local_only=True):
49 if dist.key.startswith(lc) and dist.key not in cwords[1:]:
50 installed.append(dist.key)
51 # if there are no dists installed, fall back to option completion
52 if installed:
53 for dist in installed:
54 print(dist)
55 sys.exit(1)
56
57 subcommand = commands_dict[subcommand_name]()
58
59 for opt in subcommand.parser.option_list_all:
60 if opt.help != optparse.SUPPRESS_HELP:
61 for opt_str in opt._long_opts + opt._short_opts:
62 options.append((opt_str, opt.nargs))
63
64 # filter out previously specified options from available options
65 prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
66 options = [(x, v) for (x, v) in options if x not in prev_opts]
67 # filter options by current input
68 options = [(k, v) for k, v in options if k.startswith(current)]
69 # get completion type given cwords and available subcommand options
70 completion_type = get_path_completion_type(
71 cwords, cword, subcommand.parser.option_list_all,
72 )
73 # get completion files and directories if ``completion_type`` is
74 # ``<file>``, ``<dir>`` or ``<path>``
75 if completion_type:
76 options = auto_complete_paths(current, completion_type)
77 options = ((opt, 0) for opt in options)
78 for option in options:
79 opt_label = option[0]
80 # append '=' to options which require args
81 if option[1] and option[0][:2] == "--":
82 opt_label += '='
83 print(opt_label)
84 else:
85 # show main parser options only when necessary
86
87 opts = [i.option_list for i in parser.option_groups]
88 opts.append(parser.option_list)
89 opts = (o for it in opts for o in it)
90 if current.startswith('-'):
91 for opt in opts:
92 if opt.help != optparse.SUPPRESS_HELP:
93 subcommands += opt._long_opts + opt._short_opts
94 else:
95 # get completion type given cwords and all available options
96 completion_type = get_path_completion_type(cwords, cword, opts)
97 if completion_type:
98 subcommands = auto_complete_paths(current, completion_type)
99
100 print(' '.join([x for x in subcommands if x.startswith(current)]))
101 sys.exit(1)
102
103
104 def get_path_completion_type(cwords, cword, opts):
105 """Get the type of path completion (``file``, ``dir``, ``path`` or None)
106
107 :param cwords: same as the environmental variable ``COMP_WORDS``
108 :param cword: same as the environmental variable ``COMP_CWORD``
109 :param opts: The available options to check
110 :return: path completion type (``file``, ``dir``, ``path`` or None)
111 """
112 if cword < 2 or not cwords[cword - 2].startswith('-'):
113 return
114 for opt in opts:
115 if opt.help == optparse.SUPPRESS_HELP:
116 continue
117 for o in str(opt).split('/'):
118 if cwords[cword - 2].split('=')[0] == o:
119 if any(x in ('path', 'file', 'dir')
120 for x in opt.metavar.split('/')):
121 return opt.metavar
122
123
124 def auto_complete_paths(current, completion_type):
125 """If ``completion_type`` is ``file`` or ``path``, list all regular files
126 and directories starting with ``current``; otherwise only list directories
127 starting with ``current``.
128
129 :param current: The word to be completed
130 :param completion_type: path completion type(`file`, `path` or `dir`)i
131 :return: A generator of regular files and/or directories
132 """
133 directory, filename = os.path.split(current)
134 current_path = os.path.abspath(directory)
135 # Don't complete paths if they can't be accessed
136 if not os.access(current_path, os.R_OK):
137 return
138 filename = os.path.normcase(filename)
139 # list all files that start with ``filename``
140 file_list = (x for x in os.listdir(current_path)
141 if os.path.normcase(x).startswith(filename))
142 for f in file_list:
143 opt = os.path.join(current_path, f)
144 comp_file = os.path.normcase(os.path.join(directory, f))
145 # complete regular files when there is not ``<dir>`` after option
146 # complete directories when there is ``<file>``, ``<path>`` or
147 # ``<dir>``after option
148 if completion_type != 'dir' and os.path.isfile(opt):
149 yield comp_file
150 elif os.path.isdir(opt):
151 yield os.path.join(comp_file, '')
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/cli/autocompletion.py b/src/pip/_internal/cli/autocompletion.py
--- a/src/pip/_internal/cli/autocompletion.py
+++ b/src/pip/_internal/cli/autocompletion.py
@@ -116,7 +116,8 @@
continue
for o in str(opt).split('/'):
if cwords[cword - 2].split('=')[0] == o:
- if any(x in ('path', 'file', 'dir')
+ if not opt.metavar or any(
+ x in ('path', 'file', 'dir')
for x in opt.metavar.split('/')):
return opt.metavar
| {"golden_diff": "diff --git a/src/pip/_internal/cli/autocompletion.py b/src/pip/_internal/cli/autocompletion.py\n--- a/src/pip/_internal/cli/autocompletion.py\n+++ b/src/pip/_internal/cli/autocompletion.py\n@@ -116,7 +116,8 @@\n continue\n for o in str(opt).split('/'):\n if cwords[cword - 2].split('=')[0] == o:\n- if any(x in ('path', 'file', 'dir')\n+ if not opt.metavar or any(\n+ x in ('path', 'file', 'dir')\n for x in opt.metavar.split('/')):\n return opt.metavar\n", "issue": "AttributeError on tab completion of `pip install -U ...`\n**Environment**\r\n\r\n* pip version: 18.0\r\n* Python version: 3.7.0\r\n* OS: macOS High Sierra\r\n\r\nUsing iTerm2 3.1.7 or Terminal.app.\r\n\r\n<!-- Feel free to add more information about your environment here -->\r\n\r\n**Description**\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nAttempt `pip install -U projectname` with tab completion.\r\n\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nTab completion works, or at least no error.\r\n\r\n**How to Reproduce**\r\n<!-- Describe the steps to reproduce this bug. -->\r\n\r\n1. Type `pip install -U` or `pip install --upgrade` then a few letters of a project name (eg. just `p` below), then press the tab key.\r\n3. An error occurs:\r\n\r\n**Output**\r\n\r\n```console\r\n$ pip install --upgrade pTraceback (most recent call last):\r\n File \"/usr/local/bin/pip3\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py\", line 293, in main\r\n autocomplete()\r\n File \"/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py\", line 121, in autocomplete\r\n cwords, cword, subcommand.parser.option_list_all,\r\n File \"/usr/local/lib/python3.7/site-packages/pip/_internal/__init__.py\", line 170, in get_path_completion_type\r\n for x in opt.metavar.split('/')):\r\nAttributeError: 'NoneType' object has no attribute 'split'\r\n```\r\n\r\n**Notes**\r\n\r\n* When omitting `-U` or `--upgrade`, pressing tab doesn't complete anything but more importantly there's no error.\r\n\r\n* No error with pip 18.0 on Python 2.7.15. Pressing tab suggests files/directories in current dir (with and without `-U`).\n", "before_files": [{"content": "\"\"\"Logic that powers autocompletion installed by ``pip completion``.\n\"\"\"\n\nimport optparse\nimport os\nimport sys\n\nfrom pip._internal.cli.main_parser import create_main_parser\nfrom pip._internal.commands import commands_dict, get_summaries\nfrom pip._internal.utils.misc import get_installed_distributions\n\n\ndef autocomplete():\n \"\"\"Entry Point for completion of main and subcommand options.\n \"\"\"\n # Don't complete if user hasn't sourced bash_completion file.\n if 'PIP_AUTO_COMPLETE' not in os.environ:\n return\n cwords = os.environ['COMP_WORDS'].split()[1:]\n cword = int(os.environ['COMP_CWORD'])\n try:\n current = cwords[cword - 1]\n except IndexError:\n current = ''\n\n subcommands = [cmd for cmd, summary in get_summaries()]\n options = []\n # subcommand\n try:\n subcommand_name = [w for w in cwords if w in subcommands][0]\n except IndexError:\n subcommand_name = None\n\n parser = create_main_parser()\n # subcommand options\n if subcommand_name:\n # special case: 'help' subcommand has no options\n if subcommand_name == 'help':\n sys.exit(1)\n # special case: list locally installed dists for show and uninstall\n should_list_installed = (\n subcommand_name in ['show', 'uninstall'] and\n not current.startswith('-')\n )\n if should_list_installed:\n installed = []\n lc = current.lower()\n for dist in get_installed_distributions(local_only=True):\n if dist.key.startswith(lc) and dist.key not in cwords[1:]:\n installed.append(dist.key)\n # if there are no dists installed, fall back to option completion\n if installed:\n for dist in installed:\n print(dist)\n sys.exit(1)\n\n subcommand = commands_dict[subcommand_name]()\n\n for opt in subcommand.parser.option_list_all:\n if opt.help != optparse.SUPPRESS_HELP:\n for opt_str in opt._long_opts + opt._short_opts:\n options.append((opt_str, opt.nargs))\n\n # filter out previously specified options from available options\n prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]\n options = [(x, v) for (x, v) in options if x not in prev_opts]\n # filter options by current input\n options = [(k, v) for k, v in options if k.startswith(current)]\n # get completion type given cwords and available subcommand options\n completion_type = get_path_completion_type(\n cwords, cword, subcommand.parser.option_list_all,\n )\n # get completion files and directories if ``completion_type`` is\n # ``<file>``, ``<dir>`` or ``<path>``\n if completion_type:\n options = auto_complete_paths(current, completion_type)\n options = ((opt, 0) for opt in options)\n for option in options:\n opt_label = option[0]\n # append '=' to options which require args\n if option[1] and option[0][:2] == \"--\":\n opt_label += '='\n print(opt_label)\n else:\n # show main parser options only when necessary\n\n opts = [i.option_list for i in parser.option_groups]\n opts.append(parser.option_list)\n opts = (o for it in opts for o in it)\n if current.startswith('-'):\n for opt in opts:\n if opt.help != optparse.SUPPRESS_HELP:\n subcommands += opt._long_opts + opt._short_opts\n else:\n # get completion type given cwords and all available options\n completion_type = get_path_completion_type(cwords, cword, opts)\n if completion_type:\n subcommands = auto_complete_paths(current, completion_type)\n\n print(' '.join([x for x in subcommands if x.startswith(current)]))\n sys.exit(1)\n\n\ndef get_path_completion_type(cwords, cword, opts):\n \"\"\"Get the type of path completion (``file``, ``dir``, ``path`` or None)\n\n :param cwords: same as the environmental variable ``COMP_WORDS``\n :param cword: same as the environmental variable ``COMP_CWORD``\n :param opts: The available options to check\n :return: path completion type (``file``, ``dir``, ``path`` or None)\n \"\"\"\n if cword < 2 or not cwords[cword - 2].startswith('-'):\n return\n for opt in opts:\n if opt.help == optparse.SUPPRESS_HELP:\n continue\n for o in str(opt).split('/'):\n if cwords[cword - 2].split('=')[0] == o:\n if any(x in ('path', 'file', 'dir')\n for x in opt.metavar.split('/')):\n return opt.metavar\n\n\ndef auto_complete_paths(current, completion_type):\n \"\"\"If ``completion_type`` is ``file`` or ``path``, list all regular files\n and directories starting with ``current``; otherwise only list directories\n starting with ``current``.\n\n :param current: The word to be completed\n :param completion_type: path completion type(`file`, `path` or `dir`)i\n :return: A generator of regular files and/or directories\n \"\"\"\n directory, filename = os.path.split(current)\n current_path = os.path.abspath(directory)\n # Don't complete paths if they can't be accessed\n if not os.access(current_path, os.R_OK):\n return\n filename = os.path.normcase(filename)\n # list all files that start with ``filename``\n file_list = (x for x in os.listdir(current_path)\n if os.path.normcase(x).startswith(filename))\n for f in file_list:\n opt = os.path.join(current_path, f)\n comp_file = os.path.normcase(os.path.join(directory, f))\n # complete regular files when there is not ``<dir>`` after option\n # complete directories when there is ``<file>``, ``<path>`` or\n # ``<dir>``after option\n if completion_type != 'dir' and os.path.isfile(opt):\n yield comp_file\n elif os.path.isdir(opt):\n yield os.path.join(comp_file, '')\n", "path": "src/pip/_internal/cli/autocompletion.py"}], "after_files": [{"content": "\"\"\"Logic that powers autocompletion installed by ``pip completion``.\n\"\"\"\n\nimport optparse\nimport os\nimport sys\n\nfrom pip._internal.cli.main_parser import create_main_parser\nfrom pip._internal.commands import commands_dict, get_summaries\nfrom pip._internal.utils.misc import get_installed_distributions\n\n\ndef autocomplete():\n \"\"\"Entry Point for completion of main and subcommand options.\n \"\"\"\n # Don't complete if user hasn't sourced bash_completion file.\n if 'PIP_AUTO_COMPLETE' not in os.environ:\n return\n cwords = os.environ['COMP_WORDS'].split()[1:]\n cword = int(os.environ['COMP_CWORD'])\n try:\n current = cwords[cword - 1]\n except IndexError:\n current = ''\n\n subcommands = [cmd for cmd, summary in get_summaries()]\n options = []\n # subcommand\n try:\n subcommand_name = [w for w in cwords if w in subcommands][0]\n except IndexError:\n subcommand_name = None\n\n parser = create_main_parser()\n # subcommand options\n if subcommand_name:\n # special case: 'help' subcommand has no options\n if subcommand_name == 'help':\n sys.exit(1)\n # special case: list locally installed dists for show and uninstall\n should_list_installed = (\n subcommand_name in ['show', 'uninstall'] and\n not current.startswith('-')\n )\n if should_list_installed:\n installed = []\n lc = current.lower()\n for dist in get_installed_distributions(local_only=True):\n if dist.key.startswith(lc) and dist.key not in cwords[1:]:\n installed.append(dist.key)\n # if there are no dists installed, fall back to option completion\n if installed:\n for dist in installed:\n print(dist)\n sys.exit(1)\n\n subcommand = commands_dict[subcommand_name]()\n\n for opt in subcommand.parser.option_list_all:\n if opt.help != optparse.SUPPRESS_HELP:\n for opt_str in opt._long_opts + opt._short_opts:\n options.append((opt_str, opt.nargs))\n\n # filter out previously specified options from available options\n prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]\n options = [(x, v) for (x, v) in options if x not in prev_opts]\n # filter options by current input\n options = [(k, v) for k, v in options if k.startswith(current)]\n # get completion type given cwords and available subcommand options\n completion_type = get_path_completion_type(\n cwords, cword, subcommand.parser.option_list_all,\n )\n # get completion files and directories if ``completion_type`` is\n # ``<file>``, ``<dir>`` or ``<path>``\n if completion_type:\n options = auto_complete_paths(current, completion_type)\n options = ((opt, 0) for opt in options)\n for option in options:\n opt_label = option[0]\n # append '=' to options which require args\n if option[1] and option[0][:2] == \"--\":\n opt_label += '='\n print(opt_label)\n else:\n # show main parser options only when necessary\n\n opts = [i.option_list for i in parser.option_groups]\n opts.append(parser.option_list)\n opts = (o for it in opts for o in it)\n if current.startswith('-'):\n for opt in opts:\n if opt.help != optparse.SUPPRESS_HELP:\n subcommands += opt._long_opts + opt._short_opts\n else:\n # get completion type given cwords and all available options\n completion_type = get_path_completion_type(cwords, cword, opts)\n if completion_type:\n subcommands = auto_complete_paths(current, completion_type)\n\n print(' '.join([x for x in subcommands if x.startswith(current)]))\n sys.exit(1)\n\n\ndef get_path_completion_type(cwords, cword, opts):\n \"\"\"Get the type of path completion (``file``, ``dir``, ``path`` or None)\n\n :param cwords: same as the environmental variable ``COMP_WORDS``\n :param cword: same as the environmental variable ``COMP_CWORD``\n :param opts: The available options to check\n :return: path completion type (``file``, ``dir``, ``path`` or None)\n \"\"\"\n if cword < 2 or not cwords[cword - 2].startswith('-'):\n return\n for opt in opts:\n if opt.help == optparse.SUPPRESS_HELP:\n continue\n for o in str(opt).split('/'):\n if cwords[cword - 2].split('=')[0] == o:\n if not opt.metavar or any(\n x in ('path', 'file', 'dir')\n for x in opt.metavar.split('/')):\n return opt.metavar\n\n\ndef auto_complete_paths(current, completion_type):\n \"\"\"If ``completion_type`` is ``file`` or ``path``, list all regular files\n and directories starting with ``current``; otherwise only list directories\n starting with ``current``.\n\n :param current: The word to be completed\n :param completion_type: path completion type(`file`, `path` or `dir`)i\n :return: A generator of regular files and/or directories\n \"\"\"\n directory, filename = os.path.split(current)\n current_path = os.path.abspath(directory)\n # Don't complete paths if they can't be accessed\n if not os.access(current_path, os.R_OK):\n return\n filename = os.path.normcase(filename)\n # list all files that start with ``filename``\n file_list = (x for x in os.listdir(current_path)\n if os.path.normcase(x).startswith(filename))\n for f in file_list:\n opt = os.path.join(current_path, f)\n comp_file = os.path.normcase(os.path.join(directory, f))\n # complete regular files when there is not ``<dir>`` after option\n # complete directories when there is ``<file>``, ``<path>`` or\n # ``<dir>``after option\n if completion_type != 'dir' and os.path.isfile(opt):\n yield comp_file\n elif os.path.isdir(opt):\n yield os.path.join(comp_file, '')\n", "path": "src/pip/_internal/cli/autocompletion.py"}]} | 2,423 | 155 |
gh_patches_debug_2178 | rasdani/github-patches | git_diff | rucio__rucio-1028 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move conveyor transfers to third_party_copy_operation
Motivation
----------
At the moment read is used for source and write is used for destination, for both third_party_copy should be used.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/vcsversion.py`
Content:
```
1
2 '''
3 This file is automatically generated; Do not edit it. :)
4 '''
5 VERSION_INFO = {
6 'final': True,
7 'version': '1.15.3',
8 'branch_nick': 'patch-0-Rucio_1_15_3_preparation',
9 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8',
10 'revno': 6357
11 }
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py
--- a/lib/rucio/vcsversion.py
+++ b/lib/rucio/vcsversion.py
@@ -4,8 +4,8 @@
'''
VERSION_INFO = {
'final': True,
- 'version': '1.15.3',
- 'branch_nick': 'patch-0-Rucio_1_15_3_preparation',
- 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8',
- 'revno': 6357
+ 'version': '1.15.4',
+ 'branch_nick': 'patch-0-Rucio_1_15_4_preparation',
+ 'revision_id': 'c2972be03297dc73a65d35bc9ffab1516efffebb',
+ 'revno': 6402
}
| {"golden_diff": "diff --git a/lib/rucio/vcsversion.py b/lib/rucio/vcsversion.py\n--- a/lib/rucio/vcsversion.py\n+++ b/lib/rucio/vcsversion.py\n@@ -4,8 +4,8 @@\n '''\n VERSION_INFO = {\n 'final': True,\n- 'version': '1.15.3',\n- 'branch_nick': 'patch-0-Rucio_1_15_3_preparation',\n- 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8',\n- 'revno': 6357\n+ 'version': '1.15.4',\n+ 'branch_nick': 'patch-0-Rucio_1_15_4_preparation',\n+ 'revision_id': 'c2972be03297dc73a65d35bc9ffab1516efffebb',\n+ 'revno': 6402\n }\n", "issue": "Move conveyor transfers to third_party_copy_operation\nMotivation\r\n----------\r\nAt the moment read is used for source and write is used for destination, for both third_party_copy should be used.\n", "before_files": [{"content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.15.3',\n 'branch_nick': 'patch-0-Rucio_1_15_3_preparation',\n 'revision_id': 'cd14416223d0b81a940312cb180a07778f85f1f8',\n 'revno': 6357\n}\n", "path": "lib/rucio/vcsversion.py"}], "after_files": [{"content": "\n'''\nThis file is automatically generated; Do not edit it. :)\n'''\nVERSION_INFO = {\n 'final': True,\n 'version': '1.15.4',\n 'branch_nick': 'patch-0-Rucio_1_15_4_preparation',\n 'revision_id': 'c2972be03297dc73a65d35bc9ffab1516efffebb',\n 'revno': 6402\n}\n", "path": "lib/rucio/vcsversion.py"}]} | 430 | 247 |
gh_patches_debug_16727 | rasdani/github-patches | git_diff | learningequality__kolibri-6191 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
recipients column in coach tools in not accurate
### Observed behavior
recipients column always indicates assignment to the entire class, even when that's not true:

### Expected behavior
recipients column should be accurate
### User-facing consequences
confusion
### Errors and logs
none observed
### Steps to reproduce
create a lesson and assign it to a group
### Context
0.13.0 beta 3
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/core/lessons/viewsets.py`
Content:
```
1 import json
2 from functools import partial
3 from itertools import chain
4
5 from django.db import connection
6 from django.db.models import CharField
7 from django_filters.rest_framework import DjangoFilterBackend
8
9 from .serializers import LessonSerializer
10 from kolibri.core.api import ValuesViewset
11 from kolibri.core.auth.api import KolibriAuthPermissions
12 from kolibri.core.auth.api import KolibriAuthPermissionsFilter
13 from kolibri.core.lessons.models import Lesson
14 from kolibri.core.lessons.models import LessonAssignment
15 from kolibri.core.query import ArrayAgg
16 from kolibri.core.query import GroupConcat
17 from kolibri.core.query import process_uuid_aggregate
18
19
20 def _ensure_raw_dict(d):
21 if hasattr(d, "dict"):
22 d = d.dict()
23 return dict(d)
24
25
26 class LessonPermissions(KolibriAuthPermissions):
27 # Overrides the default validator to sanitize the Lesson POST Payload
28 # before validation
29 def validator(self, request, view, datum):
30 model = view.get_serializer_class().Meta.model
31 validated_data = view.get_serializer().to_internal_value(
32 _ensure_raw_dict(datum)
33 )
34 # Cannot have create assignments without creating the Lesson first,
35 # so this doesn't try to validate the Lesson with a non-empty lesson_assignments list
36 validated_data.pop("lesson_assignments")
37 return request.user.can_create(model, validated_data)
38
39
40 def _map_lesson_classroom(item):
41 return {
42 "id": item.pop("collection__id"),
43 "name": item.pop("collection__name"),
44 "parent": item.pop("collection__parent_id"),
45 }
46
47
48 class LessonViewset(ValuesViewset):
49 serializer_class = LessonSerializer
50 filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)
51 filter_fields = ("collection",)
52 permission_classes = (LessonPermissions,)
53 queryset = Lesson.objects.all().order_by("-date_created")
54
55 values = (
56 "id",
57 "title",
58 "description",
59 "resources",
60 "is_active",
61 "collection", # classroom
62 "collection__id",
63 "collection__name",
64 "collection__parent_id",
65 "created_by",
66 "assignment_ids",
67 )
68
69 field_map = {
70 "classroom": _map_lesson_classroom,
71 "resources": lambda x: json.loads(x["resources"]),
72 "assignment_ids": partial(process_uuid_aggregate, key="assignment_ids"),
73 }
74
75 def consolidate(self, items):
76 assignment_ids = []
77 for item in items:
78 assignment_ids.extend(item["assignment_ids"])
79 assignments = LessonAssignment.objects.filter(id__in=assignment_ids)
80 if connection.vendor == "postgresql" and ArrayAgg is not None:
81 assignments = assignments.annotate(
82 learner_ids=ArrayAgg("collection__membership__user__id"),
83 )
84
85 def _process_item(item):
86 item["learner_ids"] = map(
87 lambda x: x.hex, filter(lambda x: x, item["learner_ids"])
88 )
89 return item
90
91 else:
92 assignments = assignments.values("id").annotate(
93 learner_ids=GroupConcat(
94 "collection__membership__user__id", output_field=CharField(),
95 ),
96 )
97
98 def _process_item(item):
99 item["learner_ids"] = (
100 item["learner_ids"].split(",") if item["learner_ids"] else []
101 )
102 return item
103
104 assignments = {
105 a["id"]: _process_item(a)
106 for a in assignments.values(
107 "id", "collection", "collection__kind", "learner_ids", "assigned_by"
108 )
109 }
110 for item in items:
111 item_ids = item.pop("assignment_ids")
112 item["lesson_assignments"] = [assignments[a] for a in item_ids]
113 item["learner_ids"] = list(
114 set(
115 chain.from_iterable(
116 a.pop("learner_ids") for a in item["lesson_assignments"]
117 )
118 )
119 )
120 return items
121
122 def annotate_queryset(self, queryset):
123 if connection.vendor == "postgresql" and ArrayAgg is not None:
124 return queryset.annotate(assignment_ids=ArrayAgg("lesson_assignments__id"),)
125 return queryset.values("id").annotate(
126 assignment_ids=GroupConcat(
127 "lesson_assignments__id", output_field=CharField(),
128 ),
129 )
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/core/lessons/viewsets.py b/kolibri/core/lessons/viewsets.py
--- a/kolibri/core/lessons/viewsets.py
+++ b/kolibri/core/lessons/viewsets.py
@@ -4,6 +4,7 @@
from django.db import connection
from django.db.models import CharField
+from django.db.models import F
from django_filters.rest_framework import DjangoFilterBackend
from .serializers import LessonSerializer
@@ -104,7 +105,11 @@
assignments = {
a["id"]: _process_item(a)
for a in assignments.values(
- "id", "collection", "collection__kind", "learner_ids", "assigned_by"
+ "id",
+ "collection",
+ "learner_ids",
+ "assigned_by",
+ collection_kind=F("collection__kind"),
)
}
for item in items:
| {"golden_diff": "diff --git a/kolibri/core/lessons/viewsets.py b/kolibri/core/lessons/viewsets.py\n--- a/kolibri/core/lessons/viewsets.py\n+++ b/kolibri/core/lessons/viewsets.py\n@@ -4,6 +4,7 @@\n \n from django.db import connection\n from django.db.models import CharField\n+from django.db.models import F\n from django_filters.rest_framework import DjangoFilterBackend\n \n from .serializers import LessonSerializer\n@@ -104,7 +105,11 @@\n assignments = {\n a[\"id\"]: _process_item(a)\n for a in assignments.values(\n- \"id\", \"collection\", \"collection__kind\", \"learner_ids\", \"assigned_by\"\n+ \"id\",\n+ \"collection\",\n+ \"learner_ids\",\n+ \"assigned_by\",\n+ collection_kind=F(\"collection__kind\"),\n )\n }\n for item in items:\n", "issue": "recipients column in coach tools in not accurate\n\r\n### Observed behavior\r\n\r\nrecipients column always indicates assignment to the entire class, even when that's not true:\r\n\r\n\r\n\r\n\r\n### Expected behavior\r\n\r\nrecipients column should be accurate\r\n\r\n### User-facing consequences\r\n\r\nconfusion\r\n\r\n### Errors and logs\r\n\r\nnone observed\r\n\r\n### Steps to reproduce\r\n\r\ncreate a lesson and assign it to a group\r\n\r\n### Context\r\n\r\n0.13.0 beta 3\r\n\n", "before_files": [{"content": "import json\nfrom functools import partial\nfrom itertools import chain\n\nfrom django.db import connection\nfrom django.db.models import CharField\nfrom django_filters.rest_framework import DjangoFilterBackend\n\nfrom .serializers import LessonSerializer\nfrom kolibri.core.api import ValuesViewset\nfrom kolibri.core.auth.api import KolibriAuthPermissions\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.lessons.models import LessonAssignment\nfrom kolibri.core.query import ArrayAgg\nfrom kolibri.core.query import GroupConcat\nfrom kolibri.core.query import process_uuid_aggregate\n\n\ndef _ensure_raw_dict(d):\n if hasattr(d, \"dict\"):\n d = d.dict()\n return dict(d)\n\n\nclass LessonPermissions(KolibriAuthPermissions):\n # Overrides the default validator to sanitize the Lesson POST Payload\n # before validation\n def validator(self, request, view, datum):\n model = view.get_serializer_class().Meta.model\n validated_data = view.get_serializer().to_internal_value(\n _ensure_raw_dict(datum)\n )\n # Cannot have create assignments without creating the Lesson first,\n # so this doesn't try to validate the Lesson with a non-empty lesson_assignments list\n validated_data.pop(\"lesson_assignments\")\n return request.user.can_create(model, validated_data)\n\n\ndef _map_lesson_classroom(item):\n return {\n \"id\": item.pop(\"collection__id\"),\n \"name\": item.pop(\"collection__name\"),\n \"parent\": item.pop(\"collection__parent_id\"),\n }\n\n\nclass LessonViewset(ValuesViewset):\n serializer_class = LessonSerializer\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n filter_fields = (\"collection\",)\n permission_classes = (LessonPermissions,)\n queryset = Lesson.objects.all().order_by(\"-date_created\")\n\n values = (\n \"id\",\n \"title\",\n \"description\",\n \"resources\",\n \"is_active\",\n \"collection\", # classroom\n \"collection__id\",\n \"collection__name\",\n \"collection__parent_id\",\n \"created_by\",\n \"assignment_ids\",\n )\n\n field_map = {\n \"classroom\": _map_lesson_classroom,\n \"resources\": lambda x: json.loads(x[\"resources\"]),\n \"assignment_ids\": partial(process_uuid_aggregate, key=\"assignment_ids\"),\n }\n\n def consolidate(self, items):\n assignment_ids = []\n for item in items:\n assignment_ids.extend(item[\"assignment_ids\"])\n assignments = LessonAssignment.objects.filter(id__in=assignment_ids)\n if connection.vendor == \"postgresql\" and ArrayAgg is not None:\n assignments = assignments.annotate(\n learner_ids=ArrayAgg(\"collection__membership__user__id\"),\n )\n\n def _process_item(item):\n item[\"learner_ids\"] = map(\n lambda x: x.hex, filter(lambda x: x, item[\"learner_ids\"])\n )\n return item\n\n else:\n assignments = assignments.values(\"id\").annotate(\n learner_ids=GroupConcat(\n \"collection__membership__user__id\", output_field=CharField(),\n ),\n )\n\n def _process_item(item):\n item[\"learner_ids\"] = (\n item[\"learner_ids\"].split(\",\") if item[\"learner_ids\"] else []\n )\n return item\n\n assignments = {\n a[\"id\"]: _process_item(a)\n for a in assignments.values(\n \"id\", \"collection\", \"collection__kind\", \"learner_ids\", \"assigned_by\"\n )\n }\n for item in items:\n item_ids = item.pop(\"assignment_ids\")\n item[\"lesson_assignments\"] = [assignments[a] for a in item_ids]\n item[\"learner_ids\"] = list(\n set(\n chain.from_iterable(\n a.pop(\"learner_ids\") for a in item[\"lesson_assignments\"]\n )\n )\n )\n return items\n\n def annotate_queryset(self, queryset):\n if connection.vendor == \"postgresql\" and ArrayAgg is not None:\n return queryset.annotate(assignment_ids=ArrayAgg(\"lesson_assignments__id\"),)\n return queryset.values(\"id\").annotate(\n assignment_ids=GroupConcat(\n \"lesson_assignments__id\", output_field=CharField(),\n ),\n )\n", "path": "kolibri/core/lessons/viewsets.py"}], "after_files": [{"content": "import json\nfrom functools import partial\nfrom itertools import chain\n\nfrom django.db import connection\nfrom django.db.models import CharField\nfrom django.db.models import F\nfrom django_filters.rest_framework import DjangoFilterBackend\n\nfrom .serializers import LessonSerializer\nfrom kolibri.core.api import ValuesViewset\nfrom kolibri.core.auth.api import KolibriAuthPermissions\nfrom kolibri.core.auth.api import KolibriAuthPermissionsFilter\nfrom kolibri.core.lessons.models import Lesson\nfrom kolibri.core.lessons.models import LessonAssignment\nfrom kolibri.core.query import ArrayAgg\nfrom kolibri.core.query import GroupConcat\nfrom kolibri.core.query import process_uuid_aggregate\n\n\ndef _ensure_raw_dict(d):\n if hasattr(d, \"dict\"):\n d = d.dict()\n return dict(d)\n\n\nclass LessonPermissions(KolibriAuthPermissions):\n # Overrides the default validator to sanitize the Lesson POST Payload\n # before validation\n def validator(self, request, view, datum):\n model = view.get_serializer_class().Meta.model\n validated_data = view.get_serializer().to_internal_value(\n _ensure_raw_dict(datum)\n )\n # Cannot have create assignments without creating the Lesson first,\n # so this doesn't try to validate the Lesson with a non-empty lesson_assignments list\n validated_data.pop(\"lesson_assignments\")\n return request.user.can_create(model, validated_data)\n\n\ndef _map_lesson_classroom(item):\n return {\n \"id\": item.pop(\"collection__id\"),\n \"name\": item.pop(\"collection__name\"),\n \"parent\": item.pop(\"collection__parent_id\"),\n }\n\n\nclass LessonViewset(ValuesViewset):\n serializer_class = LessonSerializer\n filter_backends = (KolibriAuthPermissionsFilter, DjangoFilterBackend)\n filter_fields = (\"collection\",)\n permission_classes = (LessonPermissions,)\n queryset = Lesson.objects.all().order_by(\"-date_created\")\n\n values = (\n \"id\",\n \"title\",\n \"description\",\n \"resources\",\n \"is_active\",\n \"collection\", # classroom\n \"collection__id\",\n \"collection__name\",\n \"collection__parent_id\",\n \"created_by\",\n \"assignment_ids\",\n )\n\n field_map = {\n \"classroom\": _map_lesson_classroom,\n \"resources\": lambda x: json.loads(x[\"resources\"]),\n \"assignment_ids\": partial(process_uuid_aggregate, key=\"assignment_ids\"),\n }\n\n def consolidate(self, items):\n assignment_ids = []\n for item in items:\n assignment_ids.extend(item[\"assignment_ids\"])\n assignments = LessonAssignment.objects.filter(id__in=assignment_ids)\n if connection.vendor == \"postgresql\" and ArrayAgg is not None:\n assignments = assignments.annotate(\n learner_ids=ArrayAgg(\"collection__membership__user__id\"),\n )\n\n def _process_item(item):\n item[\"learner_ids\"] = map(\n lambda x: x.hex, filter(lambda x: x, item[\"learner_ids\"])\n )\n return item\n\n else:\n assignments = assignments.values(\"id\").annotate(\n learner_ids=GroupConcat(\n \"collection__membership__user__id\", output_field=CharField(),\n ),\n )\n\n def _process_item(item):\n item[\"learner_ids\"] = (\n item[\"learner_ids\"].split(\",\") if item[\"learner_ids\"] else []\n )\n return item\n\n assignments = {\n a[\"id\"]: _process_item(a)\n for a in assignments.values(\n \"id\",\n \"collection\",\n \"learner_ids\",\n \"assigned_by\",\n collection_kind=F(\"collection__kind\"),\n )\n }\n for item in items:\n item_ids = item.pop(\"assignment_ids\")\n item[\"lesson_assignments\"] = [assignments[a] for a in item_ids]\n item[\"learner_ids\"] = list(\n set(\n chain.from_iterable(\n a.pop(\"learner_ids\") for a in item[\"lesson_assignments\"]\n )\n )\n )\n return items\n\n def annotate_queryset(self, queryset):\n if connection.vendor == \"postgresql\" and ArrayAgg is not None:\n return queryset.annotate(assignment_ids=ArrayAgg(\"lesson_assignments__id\"),)\n return queryset.values(\"id\").annotate(\n assignment_ids=GroupConcat(\n \"lesson_assignments__id\", output_field=CharField(),\n ),\n )\n", "path": "kolibri/core/lessons/viewsets.py"}]} | 1,614 | 198 |
gh_patches_debug_134 | rasdani/github-patches | git_diff | mkdocs__mkdocs-636 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Site description not working
Hi,
I have the following configuration
``` yml
site_name: embed.js
site_url: http://embedjs.com
site_author: Ritesh Kumar
site_description: A jQuery plugin that analyses the string and automatically embeds emojis, media, maps, tweets, code and services.
pages:
- Home: index.md
- Getting Started: gettingStarted.md
- How to use ?: usage.md
- Documentation: documentation.md
- Working Examples: examples.md
- Changelogs: history.md
theme: readthedocs
extra_css: ["custom/custom.css"]
site_favicon: img/favicon.ico
repo_url: https://github.com/ritz078/embed.js
google_analytics: ['UA-60624235-2', 'rkritesh.in/embed.js']
```
But sadly author and description are not appearing in the served file. other properties are working fine. Any help will be great.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/cli.py`
Content:
```
1 #!/usr/bin/env python
2 # coding: utf-8
3
4 from __future__ import unicode_literals
5 import logging
6 import click
7 import socket
8
9 from mkdocs import __version__
10 from mkdocs import build
11 from mkdocs import gh_deploy
12 from mkdocs import new
13 from mkdocs import serve
14 from mkdocs import utils
15 from mkdocs import exceptions
16 from mkdocs.config import load_config
17
18 log = logging.getLogger(__name__)
19
20
21 class State(object):
22 ''' Maintain logging level.'''
23
24 def __init__(self, log_name='mkdocs', level=logging.INFO):
25 self.logger = logging.getLogger(log_name)
26 self.logger.propagate = False
27 stream = logging.StreamHandler()
28 formatter = logging.Formatter("%(levelname)-7s - %(message)s ")
29 stream.setFormatter(formatter)
30 self.logger.addHandler(stream)
31
32 self.logger.setLevel(level)
33
34
35 pass_state = click.make_pass_decorator(State, ensure=True)
36
37
38 def verbose_option(f):
39 def callback(ctx, param, value):
40 state = ctx.ensure_object(State)
41 if value:
42 state.logger.setLevel(logging.DEBUG)
43 return click.option('-v', '--verbose',
44 is_flag=True,
45 expose_value=False,
46 help='Enable verbose output',
47 callback=callback)(f)
48
49
50 def quiet_option(f):
51 def callback(ctx, param, value):
52 state = ctx.ensure_object(State)
53 if value:
54 state.logger.setLevel(logging.ERROR)
55 return click.option('-q', '--quiet',
56 is_flag=True,
57 expose_value=False,
58 help='Silence warnings',
59 callback=callback)(f)
60
61
62 def common_options(f):
63 f = verbose_option(f)
64 f = quiet_option(f)
65 return f
66
67
68 clean_help = "Remove old files from the site_dir before building"
69 config_file_help = "Provide a specific MkDocs config"
70 dev_addr_help = ("IP address and port to serve documentation locally (default: "
71 "localhost:8000)")
72 strict_help = ("Enable strict mode. This will cause MkDocs to abort the build "
73 "on any warnings.")
74 theme_help = "The theme to use when building your documentation."
75 theme_choices = utils.get_theme_names()
76 site_dir_help = "The directory to output the result of the documentation build."
77 reload_help = "Enable and disable the live reloading in the development server."
78 commit_message_help = ("A commit message to use when commiting to the "
79 "Github Pages remote branch")
80 remote_branch_help = ("The remote branch to commit to for Github Pages. This "
81 "overrides the value specified in config")
82
83
84 @click.group(context_settings={'help_option_names': ['-h', '--help']})
85 @click.version_option(__version__, '-V', '--version')
86 @common_options
87 def cli():
88 """
89 MkDocs - Project documentation with Markdown.
90 """
91 pass
92
93
94 @cli.command(name="serve")
95 @click.option('-f', '--config-file', type=click.File('rb'), help=config_file_help)
96 @click.option('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')
97 @click.option('-s', '--strict', is_flag=True, help=strict_help)
98 @click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)
99 @click.option('--livereload/--no-livereload', default=True, help=reload_help)
100 @common_options
101 def serve_command(dev_addr, config_file, strict, theme, livereload):
102 """Run the builtin development server"""
103
104 logging.getLogger('tornado').setLevel(logging.WARNING)
105
106 try:
107 serve.serve(
108 config_file=config_file,
109 dev_addr=dev_addr,
110 strict=strict,
111 theme=theme,
112 livereload=livereload,
113 )
114 except (exceptions.ConfigurationError, socket.error) as e:
115 # Avoid ugly, unhelpful traceback
116 raise SystemExit('\n' + str(e))
117
118
119 @cli.command(name="build")
120 @click.option('-c', '--clean', is_flag=True, help=clean_help)
121 @click.option('-f', '--config-file', type=click.File('rb'), help=config_file_help)
122 @click.option('-s', '--strict', is_flag=True, help=strict_help)
123 @click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)
124 @click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
125 @common_options
126 def build_command(clean, config_file, strict, theme, site_dir):
127 """Build the MkDocs documentation"""
128 try:
129 build.build(load_config(
130 config_file=config_file,
131 strict=strict,
132 theme=theme,
133 site_dir=site_dir
134 ), clean_site_dir=clean)
135 except exceptions.ConfigurationError as e:
136 # Avoid ugly, unhelpful traceback
137 raise SystemExit('\n' + str(e))
138
139
140 @cli.command(name="json")
141 @click.option('-c', '--clean', is_flag=True, help=clean_help)
142 @click.option('-f', '--config-file', type=click.File('rb'), help=config_file_help)
143 @click.option('-s', '--strict', is_flag=True, help=strict_help)
144 @click.option('-d', '--site-dir', type=click.Path(), help=site_dir_help)
145 @common_options
146 def json_command(clean, config_file, strict, site_dir):
147 """Build the MkDocs documentation to JSON files
148
149 Rather than building your documentation to HTML pages, this
150 outputs each page in a simple JSON format. This command is
151 useful if you want to index your documentation in an external
152 search engine.
153 """
154
155 log.warning("The json command is deprcated and will be removed in a future "
156 "MkDocs release. For details on updating: "
157 "http://www.mkdocs.org/about/release-notes/")
158
159 try:
160 build.build(load_config(
161 config_file=config_file,
162 strict=strict,
163 site_dir=site_dir
164 ), dump_json=True, clean_site_dir=clean)
165 except exceptions.ConfigurationError as e:
166 # Avoid ugly, unhelpful traceback
167 raise SystemExit('\n' + str(e))
168
169
170 @cli.command(name="gh-deploy")
171 @click.option('-c', '--clean', is_flag=True, help=clean_help)
172 @click.option('-f', '--config-file', type=click.File('rb'), help=config_file_help)
173 @click.option('-m', '--message', help=commit_message_help)
174 @click.option('-b', '--remote-branch', help=remote_branch_help)
175 @click.option('-r', '--remote-name', help=remote_branch_help)
176 @common_options
177 def gh_deploy_command(config_file, clean, message, remote_branch, remote_name):
178 """Deploy your documentation to GitHub Pages"""
179 try:
180 config = load_config(
181 config_file=config_file,
182 remote_branch=remote_branch,
183 remote_name=remote_name
184 )
185 build.build(config, clean_site_dir=clean)
186 gh_deploy.gh_deploy(config, message=message)
187 except exceptions.ConfigurationError as e:
188 # Avoid ugly, unhelpful traceback
189 raise SystemExit('\n' + str(e))
190
191
192 @cli.command(name="new")
193 @click.argument("project_directory")
194 @common_options
195 def new_command(project_directory):
196 """Create a new MkDocs project"""
197 new.new(project_directory)
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/cli.py b/mkdocs/cli.py
--- a/mkdocs/cli.py
+++ b/mkdocs/cli.py
@@ -88,7 +88,6 @@
"""
MkDocs - Project documentation with Markdown.
"""
- pass
@cli.command(name="serve")
| {"golden_diff": "diff --git a/mkdocs/cli.py b/mkdocs/cli.py\n--- a/mkdocs/cli.py\n+++ b/mkdocs/cli.py\n@@ -88,7 +88,6 @@\n \"\"\"\n MkDocs - Project documentation with Markdown.\n \"\"\"\n- pass\n \n \n @cli.command(name=\"serve\")\n", "issue": "Site description not working\nHi,\n\nI have the following configuration\n\n``` yml\nsite_name: embed.js\nsite_url: http://embedjs.com\nsite_author: Ritesh Kumar\nsite_description: A jQuery plugin that analyses the string and automatically embeds emojis, media, maps, tweets, code and services.\npages:\n- Home: index.md\n- Getting Started: gettingStarted.md\n- How to use ?: usage.md\n- Documentation: documentation.md\n- Working Examples: examples.md\n- Changelogs: history.md\ntheme: readthedocs\nextra_css: [\"custom/custom.css\"]\nsite_favicon: img/favicon.ico\nrepo_url: https://github.com/ritz078/embed.js\ngoogle_analytics: ['UA-60624235-2', 'rkritesh.in/embed.js']\n```\n\nBut sadly author and description are not appearing in the served file. other properties are working fine. Any help will be great.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import unicode_literals\nimport logging\nimport click\nimport socket\n\nfrom mkdocs import __version__\nfrom mkdocs import build\nfrom mkdocs import gh_deploy\nfrom mkdocs import new\nfrom mkdocs import serve\nfrom mkdocs import utils\nfrom mkdocs import exceptions\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\nclass State(object):\n ''' Maintain logging level.'''\n\n def __init__(self, log_name='mkdocs', level=logging.INFO):\n self.logger = logging.getLogger(log_name)\n self.logger.propagate = False\n stream = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-7s - %(message)s \")\n stream.setFormatter(formatter)\n self.logger.addHandler(stream)\n\n self.logger.setLevel(level)\n\n\npass_state = click.make_pass_decorator(State, ensure=True)\n\n\ndef verbose_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.DEBUG)\n return click.option('-v', '--verbose',\n is_flag=True,\n expose_value=False,\n help='Enable verbose output',\n callback=callback)(f)\n\n\ndef quiet_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.ERROR)\n return click.option('-q', '--quiet',\n is_flag=True,\n expose_value=False,\n help='Silence warnings',\n callback=callback)(f)\n\n\ndef common_options(f):\n f = verbose_option(f)\n f = quiet_option(f)\n return f\n\n\nclean_help = \"Remove old files from the site_dir before building\"\nconfig_file_help = \"Provide a specific MkDocs config\"\ndev_addr_help = (\"IP address and port to serve documentation locally (default: \"\n \"localhost:8000)\")\nstrict_help = (\"Enable strict mode. This will cause MkDocs to abort the build \"\n \"on any warnings.\")\ntheme_help = \"The theme to use when building your documentation.\"\ntheme_choices = utils.get_theme_names()\nsite_dir_help = \"The directory to output the result of the documentation build.\"\nreload_help = \"Enable and disable the live reloading in the development server.\"\ncommit_message_help = (\"A commit message to use when commiting to the \"\n \"Github Pages remote branch\")\nremote_branch_help = (\"The remote branch to commit to for Github Pages. This \"\n \"overrides the value specified in config\")\n\n\[email protected](context_settings={'help_option_names': ['-h', '--help']})\[email protected]_option(__version__, '-V', '--version')\n@common_options\ndef cli():\n \"\"\"\n MkDocs - Project documentation with Markdown.\n \"\"\"\n pass\n\n\[email protected](name=\"serve\")\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('--livereload/--no-livereload', default=True, help=reload_help)\n@common_options\ndef serve_command(dev_addr, config_file, strict, theme, livereload):\n \"\"\"Run the builtin development server\"\"\"\n\n logging.getLogger('tornado').setLevel(logging.WARNING)\n\n try:\n serve.serve(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n livereload=livereload,\n )\n except (exceptions.ConfigurationError, socket.error) as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"build\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef build_command(clean, config_file, strict, theme, site_dir):\n \"\"\"Build the MkDocs documentation\"\"\"\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n theme=theme,\n site_dir=site_dir\n ), clean_site_dir=clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"json\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef json_command(clean, config_file, strict, site_dir):\n \"\"\"Build the MkDocs documentation to JSON files\n\n Rather than building your documentation to HTML pages, this\n outputs each page in a simple JSON format. This command is\n useful if you want to index your documentation in an external\n search engine.\n \"\"\"\n\n log.warning(\"The json command is deprcated and will be removed in a future \"\n \"MkDocs release. For details on updating: \"\n \"http://www.mkdocs.org/about/release-notes/\")\n\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n site_dir=site_dir\n ), dump_json=True, clean_site_dir=clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"gh-deploy\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-m', '--message', help=commit_message_help)\[email protected]('-b', '--remote-branch', help=remote_branch_help)\[email protected]('-r', '--remote-name', help=remote_branch_help)\n@common_options\ndef gh_deploy_command(config_file, clean, message, remote_branch, remote_name):\n \"\"\"Deploy your documentation to GitHub Pages\"\"\"\n try:\n config = load_config(\n config_file=config_file,\n remote_branch=remote_branch,\n remote_name=remote_name\n )\n build.build(config, clean_site_dir=clean)\n gh_deploy.gh_deploy(config, message=message)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"new\")\[email protected](\"project_directory\")\n@common_options\ndef new_command(project_directory):\n \"\"\"Create a new MkDocs project\"\"\"\n new.new(project_directory)\n", "path": "mkdocs/cli.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import unicode_literals\nimport logging\nimport click\nimport socket\n\nfrom mkdocs import __version__\nfrom mkdocs import build\nfrom mkdocs import gh_deploy\nfrom mkdocs import new\nfrom mkdocs import serve\nfrom mkdocs import utils\nfrom mkdocs import exceptions\nfrom mkdocs.config import load_config\n\nlog = logging.getLogger(__name__)\n\n\nclass State(object):\n ''' Maintain logging level.'''\n\n def __init__(self, log_name='mkdocs', level=logging.INFO):\n self.logger = logging.getLogger(log_name)\n self.logger.propagate = False\n stream = logging.StreamHandler()\n formatter = logging.Formatter(\"%(levelname)-7s - %(message)s \")\n stream.setFormatter(formatter)\n self.logger.addHandler(stream)\n\n self.logger.setLevel(level)\n\n\npass_state = click.make_pass_decorator(State, ensure=True)\n\n\ndef verbose_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.DEBUG)\n return click.option('-v', '--verbose',\n is_flag=True,\n expose_value=False,\n help='Enable verbose output',\n callback=callback)(f)\n\n\ndef quiet_option(f):\n def callback(ctx, param, value):\n state = ctx.ensure_object(State)\n if value:\n state.logger.setLevel(logging.ERROR)\n return click.option('-q', '--quiet',\n is_flag=True,\n expose_value=False,\n help='Silence warnings',\n callback=callback)(f)\n\n\ndef common_options(f):\n f = verbose_option(f)\n f = quiet_option(f)\n return f\n\n\nclean_help = \"Remove old files from the site_dir before building\"\nconfig_file_help = \"Provide a specific MkDocs config\"\ndev_addr_help = (\"IP address and port to serve documentation locally (default: \"\n \"localhost:8000)\")\nstrict_help = (\"Enable strict mode. This will cause MkDocs to abort the build \"\n \"on any warnings.\")\ntheme_help = \"The theme to use when building your documentation.\"\ntheme_choices = utils.get_theme_names()\nsite_dir_help = \"The directory to output the result of the documentation build.\"\nreload_help = \"Enable and disable the live reloading in the development server.\"\ncommit_message_help = (\"A commit message to use when commiting to the \"\n \"Github Pages remote branch\")\nremote_branch_help = (\"The remote branch to commit to for Github Pages. This \"\n \"overrides the value specified in config\")\n\n\[email protected](context_settings={'help_option_names': ['-h', '--help']})\[email protected]_option(__version__, '-V', '--version')\n@common_options\ndef cli():\n \"\"\"\n MkDocs - Project documentation with Markdown.\n \"\"\"\n\n\[email protected](name=\"serve\")\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-a', '--dev-addr', help=dev_addr_help, metavar='<IP:PORT>')\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('--livereload/--no-livereload', default=True, help=reload_help)\n@common_options\ndef serve_command(dev_addr, config_file, strict, theme, livereload):\n \"\"\"Run the builtin development server\"\"\"\n\n logging.getLogger('tornado').setLevel(logging.WARNING)\n\n try:\n serve.serve(\n config_file=config_file,\n dev_addr=dev_addr,\n strict=strict,\n theme=theme,\n livereload=livereload,\n )\n except (exceptions.ConfigurationError, socket.error) as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"build\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-t', '--theme', type=click.Choice(theme_choices), help=theme_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef build_command(clean, config_file, strict, theme, site_dir):\n \"\"\"Build the MkDocs documentation\"\"\"\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n theme=theme,\n site_dir=site_dir\n ), clean_site_dir=clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"json\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-s', '--strict', is_flag=True, help=strict_help)\[email protected]('-d', '--site-dir', type=click.Path(), help=site_dir_help)\n@common_options\ndef json_command(clean, config_file, strict, site_dir):\n \"\"\"Build the MkDocs documentation to JSON files\n\n Rather than building your documentation to HTML pages, this\n outputs each page in a simple JSON format. This command is\n useful if you want to index your documentation in an external\n search engine.\n \"\"\"\n\n log.warning(\"The json command is deprcated and will be removed in a future \"\n \"MkDocs release. For details on updating: \"\n \"http://www.mkdocs.org/about/release-notes/\")\n\n try:\n build.build(load_config(\n config_file=config_file,\n strict=strict,\n site_dir=site_dir\n ), dump_json=True, clean_site_dir=clean)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"gh-deploy\")\[email protected]('-c', '--clean', is_flag=True, help=clean_help)\[email protected]('-f', '--config-file', type=click.File('rb'), help=config_file_help)\[email protected]('-m', '--message', help=commit_message_help)\[email protected]('-b', '--remote-branch', help=remote_branch_help)\[email protected]('-r', '--remote-name', help=remote_branch_help)\n@common_options\ndef gh_deploy_command(config_file, clean, message, remote_branch, remote_name):\n \"\"\"Deploy your documentation to GitHub Pages\"\"\"\n try:\n config = load_config(\n config_file=config_file,\n remote_branch=remote_branch,\n remote_name=remote_name\n )\n build.build(config, clean_site_dir=clean)\n gh_deploy.gh_deploy(config, message=message)\n except exceptions.ConfigurationError as e:\n # Avoid ugly, unhelpful traceback\n raise SystemExit('\\n' + str(e))\n\n\[email protected](name=\"new\")\[email protected](\"project_directory\")\n@common_options\ndef new_command(project_directory):\n \"\"\"Create a new MkDocs project\"\"\"\n new.new(project_directory)\n", "path": "mkdocs/cli.py"}]} | 2,504 | 69 |
gh_patches_debug_4596 | rasdani/github-patches | git_diff | liberapay__liberapay.com-979 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Currency exchange rate between EUR and USD are mixed up
If I give euros to an account using dollar as main currency, the currency rate gets mixed up somewhere.
For instance if I give 5€, on the creator's page it only displays 4.05$ instead of 6.18$.
Current exchange rate:
* 5 EUR to USD: 6.18$
* 5 USD to EUR: 4.05€
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liberapay/models/_mixin_team.py`
Content:
```
1 """Teams are groups of participants.
2 """
3 from __future__ import division, print_function, unicode_literals
4
5 from collections import OrderedDict
6 from statistics import median
7
8 from mangopay.utils import Money
9
10 from liberapay.constants import ZERO, TAKE_THROTTLING_THRESHOLD
11 from liberapay.utils import NS
12
13
14 class MemberLimitReached(Exception): pass
15
16
17 class InactiveParticipantAdded(Exception): pass
18
19
20 class MixinTeam(object):
21
22 def invite(self, invitee, inviter):
23 assert self.kind == 'group'
24 with self.db.get_cursor() as c:
25 n_id = invitee.notify(
26 'team_invite',
27 team=self.username,
28 team_url=self.url(),
29 inviter=inviter.username,
30 )
31 payload = dict(invitee=invitee.id, notification_id=n_id)
32 self.add_event(c, 'invite', payload, inviter.id)
33
34 def add_member(self, member, cursor=None):
35 """Add a member to this team.
36 """
37 if self.nmembers >= 149:
38 raise MemberLimitReached
39 if member.status != 'active':
40 raise InactiveParticipantAdded
41 self.set_take_for(member, ZERO[self.main_currency], self, cursor=cursor)
42
43 def remove_all_members(self, cursor=None):
44 (cursor or self.db).run("""
45 INSERT INTO takes
46 (ctime, member, team, amount, actual_amount, recorder)
47 SELECT ctime, member, %(id)s, NULL, NULL, %(id)s
48 FROM current_takes
49 WHERE team=%(id)s
50 """, dict(id=self.id))
51
52 def member_of(self, team):
53 """Given a Participant object, return a boolean.
54 """
55 assert team.kind == 'group'
56 return self.db.one("""
57 SELECT true
58 FROM current_takes
59 WHERE team=%s AND member=%s
60 """, (team.id, self.id), default=False)
61
62 def get_takes_last_week(self):
63 """Get the users' nominal takes last week. Used in throttling.
64 """
65 assert self.kind == 'group'
66 takes = {t.member: t.amount for t in self.db.all("""
67
68 SELECT DISTINCT ON (member) member, amount, mtime
69 FROM takes
70 WHERE team=%s
71 AND mtime < (
72 SELECT ts_start
73 FROM paydays
74 WHERE ts_end > ts_start
75 ORDER BY ts_start DESC LIMIT 1
76 )
77 ORDER BY member, mtime DESC
78
79 """, (self.id,)) if t.amount}
80 return takes
81
82 def get_take_for(self, member):
83 """Return the nominal take for this member, or None.
84 """
85 return self.db.one(
86 "SELECT amount FROM current_takes WHERE member = %s AND team = %s",
87 (member.id, self.id)
88 )
89
90 def compute_max_this_week(self, member_id, last_week):
91 """2x the member's take last week, or the member's take last week + the
92 leftover, or last week's median take, or 1.00, or infinity if the takes
93 were all zero last week or if throttling is disabled.
94 """
95 if not self.throttle_takes:
96 return
97 sum_last_week = sum(last_week.values())
98 if sum_last_week == 0:
99 return
100 sum_last_week = sum_last_week.convert(self.main_currency)
101 initial_leftover = self.receiving - sum_last_week
102 nonzero_last_week = [a.convert(self.main_currency).amount for a in last_week.values() if a]
103 member_last_week = last_week.get(member_id, ZERO[self.main_currency]).convert(self.main_currency)
104 return max(
105 member_last_week * 2,
106 member_last_week + initial_leftover,
107 Money(median(nonzero_last_week or (0,)), self.main_currency),
108 TAKE_THROTTLING_THRESHOLD[self.main_currency]
109 )
110
111 def set_take_for(self, member, take, recorder, check_max=True, cursor=None):
112 """Sets member's take from the team pool.
113 """
114 assert self.kind == 'group'
115
116 if recorder.id != self.id:
117 cur_take = self.get_take_for(member)
118 if cur_take is None:
119 return None
120
121 if not isinstance(take, (None.__class__, Money)):
122 take = Money(take, self.main_currency) # TODO drop this
123
124 with self.db.get_cursor(cursor) as cursor:
125 # Lock to avoid race conditions
126 cursor.run("LOCK TABLE takes IN EXCLUSIVE MODE")
127 # Throttle the new take, if there is more than one member
128 threshold = TAKE_THROTTLING_THRESHOLD[self.main_currency]
129 if take and check_max and self.nmembers > 1 and take > threshold:
130 last_week = self.get_takes_last_week()
131 max_this_week = self.compute_max_this_week(member.id, last_week)
132 if max_this_week is not None and take > max_this_week:
133 take = max_this_week
134 # Insert the new take
135 cursor.run("""
136
137 INSERT INTO takes
138 (ctime, member, team, amount, actual_amount, recorder)
139 SELECT COALESCE((
140 SELECT ctime
141 FROM takes
142 WHERE member=%(member)s
143 AND team=%(team)s
144 LIMIT 1
145 ), current_timestamp)
146 , %(member)s
147 , %(team)s
148 , %(amount)s
149 , CASE WHEN %(amount)s IS NULL THEN NULL ELSE
150 COALESCE((
151 SELECT actual_amount
152 FROM takes
153 WHERE member=%(member)s
154 AND team=%(team)s
155 ORDER BY mtime DESC
156 LIMIT 1
157 ), zero(%(amount)s::currency_amount))
158 END
159 , %(recorder)s
160
161 """, dict(member=member.id, team=self.id, amount=take,
162 recorder=recorder.id))
163 # Recompute the actual takes and update the cached amounts
164 self.recompute_actual_takes(cursor, member=member)
165 # Update is_funded on member's tips
166 member.update_giving(cursor)
167
168 return take
169
170 def get_current_takes(self, cursor=None):
171 """Return a list of member takes for a team.
172 """
173 assert self.kind == 'group'
174 TAKES = """
175 SELECT p.id AS member_id, p.username AS member_name, p.avatar_url
176 , (p.mangopay_user_id IS NOT NULL) AS is_identified, p.is_suspended
177 , t.amount, t.actual_amount, t.ctime, t.mtime
178 FROM current_takes t
179 JOIN participants p ON p.id = member
180 WHERE t.team=%(team)s
181 ORDER BY p.username
182 """
183 records = (cursor or self.db).all(TAKES, dict(team=self.id))
184 return [r._asdict() for r in records]
185
186 def recompute_actual_takes(self, cursor, member=None):
187 """Get the tips and takes for this team and recompute the actual amounts.
188
189 To avoid deadlocks the given `cursor` should have already acquired an
190 exclusive lock on the `takes` table.
191 """
192 from liberapay.billing.payday import Payday
193 tips = [NS(t._asdict()) for t in cursor.all("""
194 SELECT t.id, t.tipper, t.amount AS full_amount
195 , coalesce_currency_amount((
196 SELECT sum(tr.amount, t.amount::currency)
197 FROM transfers tr
198 WHERE tr.tipper = t.tipper
199 AND tr.team = %(team_id)s
200 AND tr.context = 'take'
201 AND tr.status = 'succeeded'
202 ), t.amount::currency) AS past_transfers_sum
203 FROM current_tips t
204 JOIN participants p ON p.id = t.tipper
205 WHERE t.tippee = %(team_id)s
206 AND t.is_funded
207 AND p.is_suspended IS NOT true
208 """, dict(team_id=self.id))]
209 takes = [NS(r._asdict()) for r in (cursor or self.db).all("""
210 SELECT t.*
211 FROM current_takes t
212 JOIN participants p ON p.id = t.member
213 WHERE t.team = %s
214 AND p.is_suspended IS NOT true
215 AND p.mangopay_user_id IS NOT NULL
216 """, (self.id,))]
217 # Recompute the takes
218 takes_sum = {}
219 tippers = {}
220 transfers, new_leftover = Payday.resolve_takes(tips, takes, self.main_currency)
221 for t in transfers:
222 if t.member in takes_sum:
223 takes_sum[t.member] += t.amount
224 else:
225 takes_sum[t.member] = t.amount
226 if t.member in tippers:
227 tippers[t.member].add(t.tipper)
228 else:
229 tippers[t.member] = set((t.tipper,))
230 # Update the leftover
231 cursor.run("UPDATE participants SET leftover = %s WHERE id = %s",
232 (new_leftover, self.id))
233 self.set_attributes(leftover=new_leftover)
234 # Update the cached amounts (actual_amount, taking, and receiving)
235 zero = ZERO[self.main_currency]
236 for take in takes:
237 member_id = take.member
238 old_amount = take.actual_amount or zero
239 new_amount = takes_sum.get(take.member, zero)
240 diff = new_amount - old_amount
241 if diff != 0:
242 take.actual_amount = new_amount
243 cursor.run("""
244 UPDATE takes
245 SET actual_amount = %(actual_amount)s
246 WHERE id = %(id)s
247 """, take.__dict__)
248 ntippers = len(tippers.get(member_id, ()))
249 member_currency, old_taking = cursor.one(
250 "SELECT main_currency, taking FROM participants WHERE id = %s", (member_id,)
251 )
252 diff = diff.convert(member_currency)
253 if old_taking + diff < 0:
254 # Make sure currency fluctuation doesn't result in a negative number
255 diff = -old_taking
256 cursor.run("""
257 UPDATE participants
258 SET taking = (taking + %(diff)s)
259 , receiving = (receiving + %(diff)s)
260 , nteampatrons = (
261 CASE WHEN (receiving + %(diff)s) = 0 THEN 0
262 WHEN nteampatrons < %(ntippers)s THEN %(ntippers)s
263 ELSE nteampatrons
264 END
265 )
266 WHERE id=%(member_id)s
267 """, dict(member_id=member_id, diff=diff, ntippers=ntippers))
268 if member and member.id == member_id:
269 r = cursor.one(
270 "SELECT taking, receiving FROM participants WHERE id = %s",
271 (member_id,)
272 )
273 member.set_attributes(**r._asdict())
274 return takes
275
276 @property
277 def nmembers(self):
278 assert self.kind == 'group'
279 return self.db.one("""
280 SELECT COUNT(*)
281 FROM current_takes
282 WHERE team=%s
283 """, (self.id,))
284
285 def get_members(self):
286 """Return an OrderedDict of member dicts.
287 """
288 takes = self.get_current_takes()
289 last_week = self.get_takes_last_week()
290 members = OrderedDict()
291 members.leftover = self.leftover
292 zero = ZERO[self.main_currency]
293 for take in takes:
294 member = {}
295 m_id = member['id'] = take['member_id']
296 member['username'] = take['member_name']
297 member['nominal_take'] = take['amount'].amount
298 member['actual_amount'] = take['actual_amount']
299 member['last_week'] = last_week.get(m_id, zero).amount
300 x = self.compute_max_this_week(m_id, last_week)
301 member['max_this_week'] = x
302 members[member['id']] = member
303 return members
304
305 @property
306 def closed_by(self):
307 assert self.status == 'closed'
308 return self.db.one("""
309 SELECT member
310 FROM takes
311 WHERE team = %s
312 ORDER BY mtime DESC
313 LIMIT 1
314 """, (self.id,))
315
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/liberapay/models/_mixin_team.py b/liberapay/models/_mixin_team.py
--- a/liberapay/models/_mixin_team.py
+++ b/liberapay/models/_mixin_team.py
@@ -118,8 +118,7 @@
if cur_take is None:
return None
- if not isinstance(take, (None.__class__, Money)):
- take = Money(take, self.main_currency) # TODO drop this
+ assert isinstance(take, (None.__class__, Money))
with self.db.get_cursor(cursor) as cursor:
# Lock to avoid race conditions
| {"golden_diff": "diff --git a/liberapay/models/_mixin_team.py b/liberapay/models/_mixin_team.py\n--- a/liberapay/models/_mixin_team.py\n+++ b/liberapay/models/_mixin_team.py\n@@ -118,8 +118,7 @@\n if cur_take is None:\n return None\n \n- if not isinstance(take, (None.__class__, Money)):\n- take = Money(take, self.main_currency) # TODO drop this\n+ assert isinstance(take, (None.__class__, Money))\n \n with self.db.get_cursor(cursor) as cursor:\n # Lock to avoid race conditions\n", "issue": "Currency exchange rate between EUR and USD are mixed up\nIf I give euros to an account using dollar as main currency, the currency rate gets mixed up somewhere.\r\nFor instance if I give 5\u20ac, on the creator's page it only displays 4.05$ instead of 6.18$.\r\n\r\nCurrent exchange rate:\r\n* 5 EUR to USD: 6.18$\r\n* 5 USD to EUR: 4.05\u20ac\n", "before_files": [{"content": "\"\"\"Teams are groups of participants.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom statistics import median\n\nfrom mangopay.utils import Money\n\nfrom liberapay.constants import ZERO, TAKE_THROTTLING_THRESHOLD\nfrom liberapay.utils import NS\n\n\nclass MemberLimitReached(Exception): pass\n\n\nclass InactiveParticipantAdded(Exception): pass\n\n\nclass MixinTeam(object):\n\n def invite(self, invitee, inviter):\n assert self.kind == 'group'\n with self.db.get_cursor() as c:\n n_id = invitee.notify(\n 'team_invite',\n team=self.username,\n team_url=self.url(),\n inviter=inviter.username,\n )\n payload = dict(invitee=invitee.id, notification_id=n_id)\n self.add_event(c, 'invite', payload, inviter.id)\n\n def add_member(self, member, cursor=None):\n \"\"\"Add a member to this team.\n \"\"\"\n if self.nmembers >= 149:\n raise MemberLimitReached\n if member.status != 'active':\n raise InactiveParticipantAdded\n self.set_take_for(member, ZERO[self.main_currency], self, cursor=cursor)\n\n def remove_all_members(self, cursor=None):\n (cursor or self.db).run(\"\"\"\n INSERT INTO takes\n (ctime, member, team, amount, actual_amount, recorder)\n SELECT ctime, member, %(id)s, NULL, NULL, %(id)s\n FROM current_takes\n WHERE team=%(id)s\n \"\"\", dict(id=self.id))\n\n def member_of(self, team):\n \"\"\"Given a Participant object, return a boolean.\n \"\"\"\n assert team.kind == 'group'\n return self.db.one(\"\"\"\n SELECT true\n FROM current_takes\n WHERE team=%s AND member=%s\n \"\"\", (team.id, self.id), default=False)\n\n def get_takes_last_week(self):\n \"\"\"Get the users' nominal takes last week. Used in throttling.\n \"\"\"\n assert self.kind == 'group'\n takes = {t.member: t.amount for t in self.db.all(\"\"\"\n\n SELECT DISTINCT ON (member) member, amount, mtime\n FROM takes\n WHERE team=%s\n AND mtime < (\n SELECT ts_start\n FROM paydays\n WHERE ts_end > ts_start\n ORDER BY ts_start DESC LIMIT 1\n )\n ORDER BY member, mtime DESC\n\n \"\"\", (self.id,)) if t.amount}\n return takes\n\n def get_take_for(self, member):\n \"\"\"Return the nominal take for this member, or None.\n \"\"\"\n return self.db.one(\n \"SELECT amount FROM current_takes WHERE member = %s AND team = %s\",\n (member.id, self.id)\n )\n\n def compute_max_this_week(self, member_id, last_week):\n \"\"\"2x the member's take last week, or the member's take last week + the\n leftover, or last week's median take, or 1.00, or infinity if the takes\n were all zero last week or if throttling is disabled.\n \"\"\"\n if not self.throttle_takes:\n return\n sum_last_week = sum(last_week.values())\n if sum_last_week == 0:\n return\n sum_last_week = sum_last_week.convert(self.main_currency)\n initial_leftover = self.receiving - sum_last_week\n nonzero_last_week = [a.convert(self.main_currency).amount for a in last_week.values() if a]\n member_last_week = last_week.get(member_id, ZERO[self.main_currency]).convert(self.main_currency)\n return max(\n member_last_week * 2,\n member_last_week + initial_leftover,\n Money(median(nonzero_last_week or (0,)), self.main_currency),\n TAKE_THROTTLING_THRESHOLD[self.main_currency]\n )\n\n def set_take_for(self, member, take, recorder, check_max=True, cursor=None):\n \"\"\"Sets member's take from the team pool.\n \"\"\"\n assert self.kind == 'group'\n\n if recorder.id != self.id:\n cur_take = self.get_take_for(member)\n if cur_take is None:\n return None\n\n if not isinstance(take, (None.__class__, Money)):\n take = Money(take, self.main_currency) # TODO drop this\n\n with self.db.get_cursor(cursor) as cursor:\n # Lock to avoid race conditions\n cursor.run(\"LOCK TABLE takes IN EXCLUSIVE MODE\")\n # Throttle the new take, if there is more than one member\n threshold = TAKE_THROTTLING_THRESHOLD[self.main_currency]\n if take and check_max and self.nmembers > 1 and take > threshold:\n last_week = self.get_takes_last_week()\n max_this_week = self.compute_max_this_week(member.id, last_week)\n if max_this_week is not None and take > max_this_week:\n take = max_this_week\n # Insert the new take\n cursor.run(\"\"\"\n\n INSERT INTO takes\n (ctime, member, team, amount, actual_amount, recorder)\n SELECT COALESCE((\n SELECT ctime\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n LIMIT 1\n ), current_timestamp)\n , %(member)s\n , %(team)s\n , %(amount)s\n , CASE WHEN %(amount)s IS NULL THEN NULL ELSE\n COALESCE((\n SELECT actual_amount\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n ORDER BY mtime DESC\n LIMIT 1\n ), zero(%(amount)s::currency_amount))\n END\n , %(recorder)s\n\n \"\"\", dict(member=member.id, team=self.id, amount=take,\n recorder=recorder.id))\n # Recompute the actual takes and update the cached amounts\n self.recompute_actual_takes(cursor, member=member)\n # Update is_funded on member's tips\n member.update_giving(cursor)\n\n return take\n\n def get_current_takes(self, cursor=None):\n \"\"\"Return a list of member takes for a team.\n \"\"\"\n assert self.kind == 'group'\n TAKES = \"\"\"\n SELECT p.id AS member_id, p.username AS member_name, p.avatar_url\n , (p.mangopay_user_id IS NOT NULL) AS is_identified, p.is_suspended\n , t.amount, t.actual_amount, t.ctime, t.mtime\n FROM current_takes t\n JOIN participants p ON p.id = member\n WHERE t.team=%(team)s\n ORDER BY p.username\n \"\"\"\n records = (cursor or self.db).all(TAKES, dict(team=self.id))\n return [r._asdict() for r in records]\n\n def recompute_actual_takes(self, cursor, member=None):\n \"\"\"Get the tips and takes for this team and recompute the actual amounts.\n\n To avoid deadlocks the given `cursor` should have already acquired an\n exclusive lock on the `takes` table.\n \"\"\"\n from liberapay.billing.payday import Payday\n tips = [NS(t._asdict()) for t in cursor.all(\"\"\"\n SELECT t.id, t.tipper, t.amount AS full_amount\n , coalesce_currency_amount((\n SELECT sum(tr.amount, t.amount::currency)\n FROM transfers tr\n WHERE tr.tipper = t.tipper\n AND tr.team = %(team_id)s\n AND tr.context = 'take'\n AND tr.status = 'succeeded'\n ), t.amount::currency) AS past_transfers_sum\n FROM current_tips t\n JOIN participants p ON p.id = t.tipper\n WHERE t.tippee = %(team_id)s\n AND t.is_funded\n AND p.is_suspended IS NOT true\n \"\"\", dict(team_id=self.id))]\n takes = [NS(r._asdict()) for r in (cursor or self.db).all(\"\"\"\n SELECT t.*\n FROM current_takes t\n JOIN participants p ON p.id = t.member\n WHERE t.team = %s\n AND p.is_suspended IS NOT true\n AND p.mangopay_user_id IS NOT NULL\n \"\"\", (self.id,))]\n # Recompute the takes\n takes_sum = {}\n tippers = {}\n transfers, new_leftover = Payday.resolve_takes(tips, takes, self.main_currency)\n for t in transfers:\n if t.member in takes_sum:\n takes_sum[t.member] += t.amount\n else:\n takes_sum[t.member] = t.amount\n if t.member in tippers:\n tippers[t.member].add(t.tipper)\n else:\n tippers[t.member] = set((t.tipper,))\n # Update the leftover\n cursor.run(\"UPDATE participants SET leftover = %s WHERE id = %s\",\n (new_leftover, self.id))\n self.set_attributes(leftover=new_leftover)\n # Update the cached amounts (actual_amount, taking, and receiving)\n zero = ZERO[self.main_currency]\n for take in takes:\n member_id = take.member\n old_amount = take.actual_amount or zero\n new_amount = takes_sum.get(take.member, zero)\n diff = new_amount - old_amount\n if diff != 0:\n take.actual_amount = new_amount\n cursor.run(\"\"\"\n UPDATE takes\n SET actual_amount = %(actual_amount)s\n WHERE id = %(id)s\n \"\"\", take.__dict__)\n ntippers = len(tippers.get(member_id, ()))\n member_currency, old_taking = cursor.one(\n \"SELECT main_currency, taking FROM participants WHERE id = %s\", (member_id,)\n )\n diff = diff.convert(member_currency)\n if old_taking + diff < 0:\n # Make sure currency fluctuation doesn't result in a negative number\n diff = -old_taking\n cursor.run(\"\"\"\n UPDATE participants\n SET taking = (taking + %(diff)s)\n , receiving = (receiving + %(diff)s)\n , nteampatrons = (\n CASE WHEN (receiving + %(diff)s) = 0 THEN 0\n WHEN nteampatrons < %(ntippers)s THEN %(ntippers)s\n ELSE nteampatrons\n END\n )\n WHERE id=%(member_id)s\n \"\"\", dict(member_id=member_id, diff=diff, ntippers=ntippers))\n if member and member.id == member_id:\n r = cursor.one(\n \"SELECT taking, receiving FROM participants WHERE id = %s\",\n (member_id,)\n )\n member.set_attributes(**r._asdict())\n return takes\n\n @property\n def nmembers(self):\n assert self.kind == 'group'\n return self.db.one(\"\"\"\n SELECT COUNT(*)\n FROM current_takes\n WHERE team=%s\n \"\"\", (self.id,))\n\n def get_members(self):\n \"\"\"Return an OrderedDict of member dicts.\n \"\"\"\n takes = self.get_current_takes()\n last_week = self.get_takes_last_week()\n members = OrderedDict()\n members.leftover = self.leftover\n zero = ZERO[self.main_currency]\n for take in takes:\n member = {}\n m_id = member['id'] = take['member_id']\n member['username'] = take['member_name']\n member['nominal_take'] = take['amount'].amount\n member['actual_amount'] = take['actual_amount']\n member['last_week'] = last_week.get(m_id, zero).amount\n x = self.compute_max_this_week(m_id, last_week)\n member['max_this_week'] = x\n members[member['id']] = member\n return members\n\n @property\n def closed_by(self):\n assert self.status == 'closed'\n return self.db.one(\"\"\"\n SELECT member\n FROM takes\n WHERE team = %s\n ORDER BY mtime DESC\n LIMIT 1\n \"\"\", (self.id,))\n", "path": "liberapay/models/_mixin_team.py"}], "after_files": [{"content": "\"\"\"Teams are groups of participants.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom statistics import median\n\nfrom mangopay.utils import Money\n\nfrom liberapay.constants import ZERO, TAKE_THROTTLING_THRESHOLD\nfrom liberapay.utils import NS\n\n\nclass MemberLimitReached(Exception): pass\n\n\nclass InactiveParticipantAdded(Exception): pass\n\n\nclass MixinTeam(object):\n\n def invite(self, invitee, inviter):\n assert self.kind == 'group'\n with self.db.get_cursor() as c:\n n_id = invitee.notify(\n 'team_invite',\n team=self.username,\n team_url=self.url(),\n inviter=inviter.username,\n )\n payload = dict(invitee=invitee.id, notification_id=n_id)\n self.add_event(c, 'invite', payload, inviter.id)\n\n def add_member(self, member, cursor=None):\n \"\"\"Add a member to this team.\n \"\"\"\n if self.nmembers >= 149:\n raise MemberLimitReached\n if member.status != 'active':\n raise InactiveParticipantAdded\n self.set_take_for(member, ZERO[self.main_currency], self, cursor=cursor)\n\n def remove_all_members(self, cursor=None):\n (cursor or self.db).run(\"\"\"\n INSERT INTO takes\n (ctime, member, team, amount, actual_amount, recorder)\n SELECT ctime, member, %(id)s, NULL, NULL, %(id)s\n FROM current_takes\n WHERE team=%(id)s\n \"\"\", dict(id=self.id))\n\n def member_of(self, team):\n \"\"\"Given a Participant object, return a boolean.\n \"\"\"\n assert team.kind == 'group'\n return self.db.one(\"\"\"\n SELECT true\n FROM current_takes\n WHERE team=%s AND member=%s\n \"\"\", (team.id, self.id), default=False)\n\n def get_takes_last_week(self):\n \"\"\"Get the users' nominal takes last week. Used in throttling.\n \"\"\"\n assert self.kind == 'group'\n takes = {t.member: t.amount for t in self.db.all(\"\"\"\n\n SELECT DISTINCT ON (member) member, amount, mtime\n FROM takes\n WHERE team=%s\n AND mtime < (\n SELECT ts_start\n FROM paydays\n WHERE ts_end > ts_start\n ORDER BY ts_start DESC LIMIT 1\n )\n ORDER BY member, mtime DESC\n\n \"\"\", (self.id,)) if t.amount}\n return takes\n\n def get_take_for(self, member):\n \"\"\"Return the nominal take for this member, or None.\n \"\"\"\n return self.db.one(\n \"SELECT amount FROM current_takes WHERE member = %s AND team = %s\",\n (member.id, self.id)\n )\n\n def compute_max_this_week(self, member_id, last_week):\n \"\"\"2x the member's take last week, or the member's take last week + the\n leftover, or last week's median take, or 1.00, or infinity if the takes\n were all zero last week or if throttling is disabled.\n \"\"\"\n if not self.throttle_takes:\n return\n sum_last_week = sum(last_week.values())\n if sum_last_week == 0:\n return\n sum_last_week = sum_last_week.convert(self.main_currency)\n initial_leftover = self.receiving - sum_last_week\n nonzero_last_week = [a.convert(self.main_currency).amount for a in last_week.values() if a]\n member_last_week = last_week.get(member_id, ZERO[self.main_currency]).convert(self.main_currency)\n return max(\n member_last_week * 2,\n member_last_week + initial_leftover,\n Money(median(nonzero_last_week or (0,)), self.main_currency),\n TAKE_THROTTLING_THRESHOLD[self.main_currency]\n )\n\n def set_take_for(self, member, take, recorder, check_max=True, cursor=None):\n \"\"\"Sets member's take from the team pool.\n \"\"\"\n assert self.kind == 'group'\n\n if recorder.id != self.id:\n cur_take = self.get_take_for(member)\n if cur_take is None:\n return None\n\n assert isinstance(take, (None.__class__, Money))\n\n with self.db.get_cursor(cursor) as cursor:\n # Lock to avoid race conditions\n cursor.run(\"LOCK TABLE takes IN EXCLUSIVE MODE\")\n # Throttle the new take, if there is more than one member\n threshold = TAKE_THROTTLING_THRESHOLD[self.main_currency]\n if take and check_max and self.nmembers > 1 and take > threshold:\n last_week = self.get_takes_last_week()\n max_this_week = self.compute_max_this_week(member.id, last_week)\n if max_this_week is not None and take > max_this_week:\n take = max_this_week\n # Insert the new take\n cursor.run(\"\"\"\n\n INSERT INTO takes\n (ctime, member, team, amount, actual_amount, recorder)\n SELECT COALESCE((\n SELECT ctime\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n LIMIT 1\n ), current_timestamp)\n , %(member)s\n , %(team)s\n , %(amount)s\n , CASE WHEN %(amount)s IS NULL THEN NULL ELSE\n COALESCE((\n SELECT actual_amount\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n ORDER BY mtime DESC\n LIMIT 1\n ), zero(%(amount)s::currency_amount))\n END\n , %(recorder)s\n\n \"\"\", dict(member=member.id, team=self.id, amount=take,\n recorder=recorder.id))\n # Recompute the actual takes and update the cached amounts\n self.recompute_actual_takes(cursor, member=member)\n # Update is_funded on member's tips\n member.update_giving(cursor)\n\n return take\n\n def get_current_takes(self, cursor=None):\n \"\"\"Return a list of member takes for a team.\n \"\"\"\n assert self.kind == 'group'\n TAKES = \"\"\"\n SELECT p.id AS member_id, p.username AS member_name, p.avatar_url\n , (p.mangopay_user_id IS NOT NULL) AS is_identified, p.is_suspended\n , t.amount, t.actual_amount, t.ctime, t.mtime\n FROM current_takes t\n JOIN participants p ON p.id = member\n WHERE t.team=%(team)s\n ORDER BY p.username\n \"\"\"\n records = (cursor or self.db).all(TAKES, dict(team=self.id))\n return [r._asdict() for r in records]\n\n def recompute_actual_takes(self, cursor, member=None):\n \"\"\"Get the tips and takes for this team and recompute the actual amounts.\n\n To avoid deadlocks the given `cursor` should have already acquired an\n exclusive lock on the `takes` table.\n \"\"\"\n from liberapay.billing.payday import Payday\n tips = [NS(t._asdict()) for t in cursor.all(\"\"\"\n SELECT t.id, t.tipper, t.amount AS full_amount\n , coalesce_currency_amount((\n SELECT sum(tr.amount, t.amount::currency)\n FROM transfers tr\n WHERE tr.tipper = t.tipper\n AND tr.team = %(team_id)s\n AND tr.context = 'take'\n AND tr.status = 'succeeded'\n ), t.amount::currency) AS past_transfers_sum\n FROM current_tips t\n JOIN participants p ON p.id = t.tipper\n WHERE t.tippee = %(team_id)s\n AND t.is_funded\n AND p.is_suspended IS NOT true\n \"\"\", dict(team_id=self.id))]\n takes = [NS(r._asdict()) for r in (cursor or self.db).all(\"\"\"\n SELECT t.*\n FROM current_takes t\n JOIN participants p ON p.id = t.member\n WHERE t.team = %s\n AND p.is_suspended IS NOT true\n AND p.mangopay_user_id IS NOT NULL\n \"\"\", (self.id,))]\n # Recompute the takes\n takes_sum = {}\n tippers = {}\n transfers, new_leftover = Payday.resolve_takes(tips, takes, self.main_currency)\n for t in transfers:\n if t.member in takes_sum:\n takes_sum[t.member] += t.amount\n else:\n takes_sum[t.member] = t.amount\n if t.member in tippers:\n tippers[t.member].add(t.tipper)\n else:\n tippers[t.member] = set((t.tipper,))\n # Update the leftover\n cursor.run(\"UPDATE participants SET leftover = %s WHERE id = %s\",\n (new_leftover, self.id))\n self.set_attributes(leftover=new_leftover)\n # Update the cached amounts (actual_amount, taking, and receiving)\n zero = ZERO[self.main_currency]\n for take in takes:\n member_id = take.member\n old_amount = take.actual_amount or zero\n new_amount = takes_sum.get(take.member, zero)\n diff = new_amount - old_amount\n if diff != 0:\n take.actual_amount = new_amount\n cursor.run(\"\"\"\n UPDATE takes\n SET actual_amount = %(actual_amount)s\n WHERE id = %(id)s\n \"\"\", take.__dict__)\n ntippers = len(tippers.get(member_id, ()))\n member_currency, old_taking = cursor.one(\n \"SELECT main_currency, taking FROM participants WHERE id = %s\", (member_id,)\n )\n diff = diff.convert(member_currency)\n if old_taking + diff < 0:\n # Make sure currency fluctuation doesn't result in a negative number\n diff = -old_taking\n cursor.run(\"\"\"\n UPDATE participants\n SET taking = (taking + %(diff)s)\n , receiving = (receiving + %(diff)s)\n , nteampatrons = (\n CASE WHEN (receiving + %(diff)s) = 0 THEN 0\n WHEN nteampatrons < %(ntippers)s THEN %(ntippers)s\n ELSE nteampatrons\n END\n )\n WHERE id=%(member_id)s\n \"\"\", dict(member_id=member_id, diff=diff, ntippers=ntippers))\n if member and member.id == member_id:\n r = cursor.one(\n \"SELECT taking, receiving FROM participants WHERE id = %s\",\n (member_id,)\n )\n member.set_attributes(**r._asdict())\n return takes\n\n @property\n def nmembers(self):\n assert self.kind == 'group'\n return self.db.one(\"\"\"\n SELECT COUNT(*)\n FROM current_takes\n WHERE team=%s\n \"\"\", (self.id,))\n\n def get_members(self):\n \"\"\"Return an OrderedDict of member dicts.\n \"\"\"\n takes = self.get_current_takes()\n last_week = self.get_takes_last_week()\n members = OrderedDict()\n members.leftover = self.leftover\n zero = ZERO[self.main_currency]\n for take in takes:\n member = {}\n m_id = member['id'] = take['member_id']\n member['username'] = take['member_name']\n member['nominal_take'] = take['amount'].amount\n member['actual_amount'] = take['actual_amount']\n member['last_week'] = last_week.get(m_id, zero).amount\n x = self.compute_max_this_week(m_id, last_week)\n member['max_this_week'] = x\n members[member['id']] = member\n return members\n\n @property\n def closed_by(self):\n assert self.status == 'closed'\n return self.db.one(\"\"\"\n SELECT member\n FROM takes\n WHERE team = %s\n ORDER BY mtime DESC\n LIMIT 1\n \"\"\", (self.id,))\n", "path": "liberapay/models/_mixin_team.py"}]} | 3,801 | 144 |
gh_patches_debug_26403 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-2188 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Set version for develop branch.
According to [PEP 440](https://www.python.org/dev/peps/pep-0440/#developmental-releases), we should set version `X.YaN.devM`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup_preprocessing.py`
Content:
```
1 # Copyright 2020 The ElasticDL Authors. All rights reserved.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from setuptools import find_packages, setup
15
16 with open("elasticdl_preprocessing/requirements.txt") as f:
17 required_deps = f.read().splitlines()
18
19 extras = {}
20 with open("elasticdl_preprocessing/requirements-dev.txt") as f:
21 extras["develop"] = f.read().splitlines()
22
23 setup(
24 name="elasticdl_preprocessing",
25 version="develop",
26 description="A feature preprocessing library.",
27 long_description="This is an extension of the native Keras Preprocessing"
28 " Layers and Feature Column API from TensorFlow. We can develop our model"
29 " using the native high-level API from TensorFlow and our library."
30 " We can train this model using native TensorFlow or ElasticDL.",
31 long_description_content_type="text/markdown",
32 author="Ant Financial",
33 url="https://elasticdl.org",
34 install_requires=required_deps,
35 extras_require=extras,
36 python_requires=">=3.5",
37 packages=find_packages(
38 include=["elasticdl_preprocessing*"], exclude=["*test*"]
39 ),
40 package_data={"": ["requirements.txt"]},
41 )
42
```
Path: `setup.py`
Content:
```
1 # Copyright 2020 The ElasticDL Authors. All rights reserved.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from setuptools import find_packages, setup
15
16 with open("elasticdl/requirements.txt") as f:
17 required_deps = f.read().splitlines()
18 required_deps.append("elasticdl_client")
19 required_deps.append("elasticdl_preprocessing")
20
21 extras = {}
22 with open("elasticdl/requirements-dev.txt") as f:
23 extras["develop"] = f.read().splitlines()
24
25 setup(
26 name="elasticdl",
27 version="develop",
28 description="A Kubernetes-native Deep Learning Framework",
29 long_description="ElasticDL is a Kubernetes-native deep learning framework"
30 " built on top of TensorFlow 2.0 that supports"
31 " fault-tolerance and elastic scheduling.",
32 long_description_content_type="text/markdown",
33 author="Ant Financial",
34 url="https://elasticdl.org",
35 install_requires=required_deps,
36 extras_require=extras,
37 python_requires=">=3.5",
38 packages=find_packages(
39 exclude=[
40 "*test*",
41 "elasticdl_client*",
42 "elasticdl_preprocessing*",
43 "model_zoo*",
44 ]
45 ),
46 package_data={
47 "": [
48 "proto/*.proto",
49 "docker/*",
50 "Makefile",
51 "requirements.txt",
52 "go/bin/elasticdl_ps",
53 "go/pkg/kernel/capi/*",
54 ]
55 },
56 )
57
```
Path: `setup_client.py`
Content:
```
1 # Copyright 2020 The ElasticDL Authors. All rights reserved.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from setuptools import find_packages, setup
15
16 with open("elasticdl_client/requirements.txt") as f:
17 required_deps = f.read().splitlines()
18
19 setup(
20 name="elasticdl_client",
21 version="develop",
22 description="The client command line tool for ElasticDL.",
23 long_description="ElasticDL Client is the client command line tool for"
24 " ElasticDL. Users can use it to submit distributed ElasticDL jobs to"
25 " a Kubernetes cluster. It also provides an easy way to build and push"
26 " Docker images for distributed ElasticDL jobs.",
27 long_description_content_type="text/markdown",
28 author="Ant Financial",
29 url="https://elasticdl.org",
30 install_requires=required_deps,
31 python_requires=">=3.5",
32 packages=find_packages(include=["elasticdl_client*"]),
33 package_data={"": ["requirements.txt"]},
34 entry_points={"console_scripts": ["elasticdl=elasticdl_client.main:main"]},
35 )
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@
setup(
name="elasticdl",
- version="develop",
+ version="0.2.0rc3.dev0",
description="A Kubernetes-native Deep Learning Framework",
long_description="ElasticDL is a Kubernetes-native deep learning framework"
" built on top of TensorFlow 2.0 that supports"
diff --git a/setup_client.py b/setup_client.py
--- a/setup_client.py
+++ b/setup_client.py
@@ -18,7 +18,7 @@
setup(
name="elasticdl_client",
- version="develop",
+ version="0.2.0rc3.dev0",
description="The client command line tool for ElasticDL.",
long_description="ElasticDL Client is the client command line tool for"
" ElasticDL. Users can use it to submit distributed ElasticDL jobs to"
diff --git a/setup_preprocessing.py b/setup_preprocessing.py
--- a/setup_preprocessing.py
+++ b/setup_preprocessing.py
@@ -22,7 +22,7 @@
setup(
name="elasticdl_preprocessing",
- version="develop",
+ version="0.2.0rc3.dev0",
description="A feature preprocessing library.",
long_description="This is an extension of the native Keras Preprocessing"
" Layers and Feature Column API from TensorFlow. We can develop our model"
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,7 +24,7 @@\n \n setup(\n name=\"elasticdl\",\n- version=\"develop\",\n+ version=\"0.2.0rc3.dev0\",\n description=\"A Kubernetes-native Deep Learning Framework\",\n long_description=\"ElasticDL is a Kubernetes-native deep learning framework\"\n \" built on top of TensorFlow 2.0 that supports\"\ndiff --git a/setup_client.py b/setup_client.py\n--- a/setup_client.py\n+++ b/setup_client.py\n@@ -18,7 +18,7 @@\n \n setup(\n name=\"elasticdl_client\",\n- version=\"develop\",\n+ version=\"0.2.0rc3.dev0\",\n description=\"The client command line tool for ElasticDL.\",\n long_description=\"ElasticDL Client is the client command line tool for\"\n \" ElasticDL. Users can use it to submit distributed ElasticDL jobs to\"\ndiff --git a/setup_preprocessing.py b/setup_preprocessing.py\n--- a/setup_preprocessing.py\n+++ b/setup_preprocessing.py\n@@ -22,7 +22,7 @@\n \n setup(\n name=\"elasticdl_preprocessing\",\n- version=\"develop\",\n+ version=\"0.2.0rc3.dev0\",\n description=\"A feature preprocessing library.\",\n long_description=\"This is an extension of the native Keras Preprocessing\"\n \" Layers and Feature Column API from TensorFlow. We can develop our model\"\n", "issue": "Set version for develop branch.\nAccording to [PEP 440](https://www.python.org/dev/peps/pep-0440/#developmental-releases), we should set version `X.YaN.devM`.\n", "before_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl_preprocessing/requirements.txt\") as f:\n required_deps = f.read().splitlines()\n\nextras = {}\nwith open(\"elasticdl_preprocessing/requirements-dev.txt\") as f:\n extras[\"develop\"] = f.read().splitlines()\n\nsetup(\n name=\"elasticdl_preprocessing\",\n version=\"develop\",\n description=\"A feature preprocessing library.\",\n long_description=\"This is an extension of the native Keras Preprocessing\"\n \" Layers and Feature Column API from TensorFlow. We can develop our model\"\n \" using the native high-level API from TensorFlow and our library.\"\n \" We can train this model using native TensorFlow or ElasticDL.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n extras_require=extras,\n python_requires=\">=3.5\",\n packages=find_packages(\n include=[\"elasticdl_preprocessing*\"], exclude=[\"*test*\"]\n ),\n package_data={\"\": [\"requirements.txt\"]},\n)\n", "path": "setup_preprocessing.py"}, {"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl/requirements.txt\") as f:\n required_deps = f.read().splitlines()\nrequired_deps.append(\"elasticdl_client\")\nrequired_deps.append(\"elasticdl_preprocessing\")\n\nextras = {}\nwith open(\"elasticdl/requirements-dev.txt\") as f:\n extras[\"develop\"] = f.read().splitlines()\n\nsetup(\n name=\"elasticdl\",\n version=\"develop\",\n description=\"A Kubernetes-native Deep Learning Framework\",\n long_description=\"ElasticDL is a Kubernetes-native deep learning framework\"\n \" built on top of TensorFlow 2.0 that supports\"\n \" fault-tolerance and elastic scheduling.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n extras_require=extras,\n python_requires=\">=3.5\",\n packages=find_packages(\n exclude=[\n \"*test*\",\n \"elasticdl_client*\",\n \"elasticdl_preprocessing*\",\n \"model_zoo*\",\n ]\n ),\n package_data={\n \"\": [\n \"proto/*.proto\",\n \"docker/*\",\n \"Makefile\",\n \"requirements.txt\",\n \"go/bin/elasticdl_ps\",\n \"go/pkg/kernel/capi/*\",\n ]\n },\n)\n", "path": "setup.py"}, {"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl_client/requirements.txt\") as f:\n required_deps = f.read().splitlines()\n\nsetup(\n name=\"elasticdl_client\",\n version=\"develop\",\n description=\"The client command line tool for ElasticDL.\",\n long_description=\"ElasticDL Client is the client command line tool for\"\n \" ElasticDL. Users can use it to submit distributed ElasticDL jobs to\"\n \" a Kubernetes cluster. It also provides an easy way to build and push\"\n \" Docker images for distributed ElasticDL jobs.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n python_requires=\">=3.5\",\n packages=find_packages(include=[\"elasticdl_client*\"]),\n package_data={\"\": [\"requirements.txt\"]},\n entry_points={\"console_scripts\": [\"elasticdl=elasticdl_client.main:main\"]},\n)\n", "path": "setup_client.py"}], "after_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl_preprocessing/requirements.txt\") as f:\n required_deps = f.read().splitlines()\n\nextras = {}\nwith open(\"elasticdl_preprocessing/requirements-dev.txt\") as f:\n extras[\"develop\"] = f.read().splitlines()\n\nsetup(\n name=\"elasticdl_preprocessing\",\n version=\"0.2.0rc3.dev0\",\n description=\"A feature preprocessing library.\",\n long_description=\"This is an extension of the native Keras Preprocessing\"\n \" Layers and Feature Column API from TensorFlow. We can develop our model\"\n \" using the native high-level API from TensorFlow and our library.\"\n \" We can train this model using native TensorFlow or ElasticDL.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n extras_require=extras,\n python_requires=\">=3.5\",\n packages=find_packages(\n include=[\"elasticdl_preprocessing*\"], exclude=[\"*test*\"]\n ),\n package_data={\"\": [\"requirements.txt\"]},\n)\n", "path": "setup_preprocessing.py"}, {"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl/requirements.txt\") as f:\n required_deps = f.read().splitlines()\nrequired_deps.append(\"elasticdl_client\")\nrequired_deps.append(\"elasticdl_preprocessing\")\n\nextras = {}\nwith open(\"elasticdl/requirements-dev.txt\") as f:\n extras[\"develop\"] = f.read().splitlines()\n\nsetup(\n name=\"elasticdl\",\n version=\"0.2.0rc3.dev0\",\n description=\"A Kubernetes-native Deep Learning Framework\",\n long_description=\"ElasticDL is a Kubernetes-native deep learning framework\"\n \" built on top of TensorFlow 2.0 that supports\"\n \" fault-tolerance and elastic scheduling.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n extras_require=extras,\n python_requires=\">=3.5\",\n packages=find_packages(\n exclude=[\n \"*test*\",\n \"elasticdl_client*\",\n \"elasticdl_preprocessing*\",\n \"model_zoo*\",\n ]\n ),\n package_data={\n \"\": [\n \"proto/*.proto\",\n \"docker/*\",\n \"Makefile\",\n \"requirements.txt\",\n \"go/bin/elasticdl_ps\",\n \"go/pkg/kernel/capi/*\",\n ]\n },\n)\n", "path": "setup.py"}, {"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"elasticdl_client/requirements.txt\") as f:\n required_deps = f.read().splitlines()\n\nsetup(\n name=\"elasticdl_client\",\n version=\"0.2.0rc3.dev0\",\n description=\"The client command line tool for ElasticDL.\",\n long_description=\"ElasticDL Client is the client command line tool for\"\n \" ElasticDL. Users can use it to submit distributed ElasticDL jobs to\"\n \" a Kubernetes cluster. It also provides an easy way to build and push\"\n \" Docker images for distributed ElasticDL jobs.\",\n long_description_content_type=\"text/markdown\",\n author=\"Ant Financial\",\n url=\"https://elasticdl.org\",\n install_requires=required_deps,\n python_requires=\">=3.5\",\n packages=find_packages(include=[\"elasticdl_client*\"]),\n package_data={\"\": [\"requirements.txt\"]},\n entry_points={\"console_scripts\": [\"elasticdl=elasticdl_client.main:main\"]},\n)\n", "path": "setup_client.py"}]} | 1,689 | 322 |
gh_patches_debug_28187 | rasdani/github-patches | git_diff | pymedusa__Medusa-6259 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[APP SUBMITTED]: TypeError: a bytes-like object is required, not 'str'
### INFO
**Python Version**: `3.5.3 (default, Sep 27 2018, 17:25:39) [GCC 6.3.0 20170516]`
**Operating System**: `Linux-4.9.0-4-amd64-x86_64-with-debian-9.6`
**Locale**: `UTF-8`
**Branch**: [feature/py3_2](../tree/feature/py3_2)
**Database**: `44.12`
**Commit**: pymedusa/Medusa@ac61f7ad628d70528cab9e8971071d79c0e90a95
**Link to Log**: https://gist.github.com/ee3a4c1fb32f4ed87dc90f4360ceba99
### ERROR
<pre>
2018-12-03 07:31:45 ERROR SNATCHQUEUE-SNATCH-114801 :: [ac61f7a] Snatch failed! For result: [HorribleSubs].Fairy.Tail.Final.Season.-.286.[1080p].mkv
Traceback (most recent call last):
File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/search/queue.py#L502">medusa/search/queue.py</a>", line 502, in run
self.success = snatch_episode(result)
File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/search/core.py#L167">medusa/search/core.py</a>", line 167, in snatch_episode
result_downloaded = client.send_torrent(result)
File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/clients/torrent/generic.py#L246">medusa/clients/torrent/generic.py</a>", line 246, in send_torrent
if not self._get_auth():
File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/clients/torrent/rtorrent_client.py#L55">medusa/clients/torrent/rtorrent_client.py</a>", line 55, in _get_auth
self.auth = RTorrent(self.host, None, None, True)
File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L87">lib/rtorrent/__init__.py</a>", line 87, in __init__
self._verify_conn()
File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L126">lib/rtorrent/__init__.py</a>", line 126, in _verify_conn
assert 'system.client_version' in self._get_rpc_methods(
File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L164">lib/rtorrent/__init__.py</a>", line 164, in _get_rpc_methods
return(self._rpc_methods or self._update_rpc_methods())
File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L154">lib/rtorrent/__init__.py</a>", line 154, in _update_rpc_methods
self._rpc_methods = self._get_conn().system.listMethods()
File "/usr/lib/python3.5/xmlrpc/client.py", line 1092, in __call__
return self.__send(self.__name, args)
File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L195">lib/rtorrent/lib/xmlrpc/scgi.py</a>", line 195, in __request
verbose=self.__verbose
File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L104">lib/rtorrent/lib/xmlrpc/scgi.py</a>", line 104, in request
return self.single_request(host, handler, request_body, verbose)
File "/opt/medusa/<a href="../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L135">lib/rtorrent/lib/xmlrpc/scgi.py</a>", line 135, in single_request
sock.send(request_body)
TypeError: a bytes-like object is required, not 'str'
</pre>
---
_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `medusa/session/hooks.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 import logging
6
7 from medusa.logger.adapters.style import BraceAdapter
8
9 from six import text_type
10
11 log = BraceAdapter(logging.getLogger(__name__))
12 log.logger.addHandler(logging.NullHandler())
13
14
15 def log_url(response, **kwargs):
16 """Response hook to log request URL."""
17 request = response.request
18 log.debug(
19 '{method} URL: {url} [Status: {status}]', {
20 'method': request.method,
21 'url': request.url,
22 'status': response.status_code,
23 }
24 )
25 log.debug('User-Agent: {}'.format(request.headers['User-Agent']))
26
27 if request.method.upper() == 'POST':
28 if request.body:
29 if 'multipart/form-data' not in request.headers.get('content-type', ''):
30 body = request.body
31 else:
32 body = request.body[1:99].replace('\n', ' ') + '...'
33 else:
34 body = ''
35
36 # try to log post data using various codecs to decode
37 if isinstance(body, text_type):
38 log.debug('With post data: {0}', body)
39 return
40
41 codecs = ('utf-8', 'latin1', 'cp1252')
42 for codec in codecs:
43 try:
44 data = body.decode(codec)
45 except UnicodeError as error:
46 log.debug('Failed to decode post data as {codec}: {msg}',
47 {'codec': codec, 'msg': error})
48 else:
49 log.debug('With post data: {0}', data)
50 break
51 else:
52 log.warning('Failed to decode post data with {codecs}',
53 {'codecs': codecs})
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/medusa/session/hooks.py b/medusa/session/hooks.py
--- a/medusa/session/hooks.py
+++ b/medusa/session/hooks.py
@@ -6,7 +6,7 @@
from medusa.logger.adapters.style import BraceAdapter
-from six import text_type
+from six import ensure_text
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
@@ -26,28 +26,12 @@
if request.method.upper() == 'POST':
if request.body:
+ text_body = ensure_text(request.body)
if 'multipart/form-data' not in request.headers.get('content-type', ''):
- body = request.body
+ body = text_body
+ elif len(text_body) > 99:
+ body = text_body[0:99].replace('\n', ' ') + '...'
else:
- body = request.body[1:99].replace('\n', ' ') + '...'
- else:
- body = ''
+ body = text_body.replace('\n', ' ')
- # try to log post data using various codecs to decode
- if isinstance(body, text_type):
log.debug('With post data: {0}', body)
- return
-
- codecs = ('utf-8', 'latin1', 'cp1252')
- for codec in codecs:
- try:
- data = body.decode(codec)
- except UnicodeError as error:
- log.debug('Failed to decode post data as {codec}: {msg}',
- {'codec': codec, 'msg': error})
- else:
- log.debug('With post data: {0}', data)
- break
- else:
- log.warning('Failed to decode post data with {codecs}',
- {'codecs': codecs})
| {"golden_diff": "diff --git a/medusa/session/hooks.py b/medusa/session/hooks.py\n--- a/medusa/session/hooks.py\n+++ b/medusa/session/hooks.py\n@@ -6,7 +6,7 @@\n \n from medusa.logger.adapters.style import BraceAdapter\n \n-from six import text_type\n+from six import ensure_text\n \n log = BraceAdapter(logging.getLogger(__name__))\n log.logger.addHandler(logging.NullHandler())\n@@ -26,28 +26,12 @@\n \n if request.method.upper() == 'POST':\n if request.body:\n+ text_body = ensure_text(request.body)\n if 'multipart/form-data' not in request.headers.get('content-type', ''):\n- body = request.body\n+ body = text_body\n+ elif len(text_body) > 99:\n+ body = text_body[0:99].replace('\\n', ' ') + '...'\n else:\n- body = request.body[1:99].replace('\\n', ' ') + '...'\n- else:\n- body = ''\n+ body = text_body.replace('\\n', ' ')\n \n- # try to log post data using various codecs to decode\n- if isinstance(body, text_type):\n log.debug('With post data: {0}', body)\n- return\n-\n- codecs = ('utf-8', 'latin1', 'cp1252')\n- for codec in codecs:\n- try:\n- data = body.decode(codec)\n- except UnicodeError as error:\n- log.debug('Failed to decode post data as {codec}: {msg}',\n- {'codec': codec, 'msg': error})\n- else:\n- log.debug('With post data: {0}', data)\n- break\n- else:\n- log.warning('Failed to decode post data with {codecs}',\n- {'codecs': codecs})\n", "issue": "[APP SUBMITTED]: TypeError: a bytes-like object is required, not 'str'\n\n### INFO\n**Python Version**: `3.5.3 (default, Sep 27 2018, 17:25:39) [GCC 6.3.0 20170516]`\n**Operating System**: `Linux-4.9.0-4-amd64-x86_64-with-debian-9.6`\n**Locale**: `UTF-8`\n**Branch**: [feature/py3_2](../tree/feature/py3_2)\n**Database**: `44.12`\n**Commit**: pymedusa/Medusa@ac61f7ad628d70528cab9e8971071d79c0e90a95\n**Link to Log**: https://gist.github.com/ee3a4c1fb32f4ed87dc90f4360ceba99\n### ERROR\n<pre>\n2018-12-03 07:31:45 ERROR SNATCHQUEUE-SNATCH-114801 :: [ac61f7a] Snatch failed! For result: [HorribleSubs].Fairy.Tail.Final.Season.-.286.[1080p].mkv\nTraceback (most recent call last):\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/search/queue.py#L502\">medusa/search/queue.py</a>\", line 502, in run\n self.success = snatch_episode(result)\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/search/core.py#L167\">medusa/search/core.py</a>\", line 167, in snatch_episode\n result_downloaded = client.send_torrent(result)\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/clients/torrent/generic.py#L246\">medusa/clients/torrent/generic.py</a>\", line 246, in send_torrent\n if not self._get_auth():\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/medusa/clients/torrent/rtorrent_client.py#L55\">medusa/clients/torrent/rtorrent_client.py</a>\", line 55, in _get_auth\n self.auth = RTorrent(self.host, None, None, True)\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L87\">lib/rtorrent/__init__.py</a>\", line 87, in __init__\n self._verify_conn()\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L126\">lib/rtorrent/__init__.py</a>\", line 126, in _verify_conn\n assert 'system.client_version' in self._get_rpc_methods(\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L164\">lib/rtorrent/__init__.py</a>\", line 164, in _get_rpc_methods\n return(self._rpc_methods or self._update_rpc_methods())\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/__init__.py#L154\">lib/rtorrent/__init__.py</a>\", line 154, in _update_rpc_methods\n self._rpc_methods = self._get_conn().system.listMethods()\n File \"/usr/lib/python3.5/xmlrpc/client.py\", line 1092, in __call__\n return self.__send(self.__name, args)\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L195\">lib/rtorrent/lib/xmlrpc/scgi.py</a>\", line 195, in __request\n verbose=self.__verbose\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L104\">lib/rtorrent/lib/xmlrpc/scgi.py</a>\", line 104, in request\n return self.single_request(host, handler, request_body, verbose)\n File \"/opt/medusa/<a href=\"../blob/ac61f7ad628d70528cab9e8971071d79c0e90a95/lib/rtorrent/lib/xmlrpc/scgi.py#L135\">lib/rtorrent/lib/xmlrpc/scgi.py</a>\", line 135, in single_request\n sock.send(request_body)\nTypeError: a bytes-like object is required, not 'str'\n</pre>\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom six import text_type\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\ndef log_url(response, **kwargs):\n \"\"\"Response hook to log request URL.\"\"\"\n request = response.request\n log.debug(\n '{method} URL: {url} [Status: {status}]', {\n 'method': request.method,\n 'url': request.url,\n 'status': response.status_code,\n }\n )\n log.debug('User-Agent: {}'.format(request.headers['User-Agent']))\n\n if request.method.upper() == 'POST':\n if request.body:\n if 'multipart/form-data' not in request.headers.get('content-type', ''):\n body = request.body\n else:\n body = request.body[1:99].replace('\\n', ' ') + '...'\n else:\n body = ''\n\n # try to log post data using various codecs to decode\n if isinstance(body, text_type):\n log.debug('With post data: {0}', body)\n return\n\n codecs = ('utf-8', 'latin1', 'cp1252')\n for codec in codecs:\n try:\n data = body.decode(codec)\n except UnicodeError as error:\n log.debug('Failed to decode post data as {codec}: {msg}',\n {'codec': codec, 'msg': error})\n else:\n log.debug('With post data: {0}', data)\n break\n else:\n log.warning('Failed to decode post data with {codecs}',\n {'codecs': codecs})\n", "path": "medusa/session/hooks.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom six import ensure_text\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\ndef log_url(response, **kwargs):\n \"\"\"Response hook to log request URL.\"\"\"\n request = response.request\n log.debug(\n '{method} URL: {url} [Status: {status}]', {\n 'method': request.method,\n 'url': request.url,\n 'status': response.status_code,\n }\n )\n log.debug('User-Agent: {}'.format(request.headers['User-Agent']))\n\n if request.method.upper() == 'POST':\n if request.body:\n text_body = ensure_text(request.body)\n if 'multipart/form-data' not in request.headers.get('content-type', ''):\n body = text_body\n elif len(text_body) > 99:\n body = text_body[0:99].replace('\\n', ' ') + '...'\n else:\n body = text_body.replace('\\n', ' ')\n\n log.debug('With post data: {0}', body)\n", "path": "medusa/session/hooks.py"}]} | 2,156 | 403 |
gh_patches_debug_1874 | rasdani/github-patches | git_diff | cloudtools__troposphere-120 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update metadata to include property keys
Within the CloudFormation metadata object for auto scaling launch configurations, it'd be nice to be able to rely on the validators within the template to sanity check the allowed keys within the 'config' dictionary:
```
"Resources": {
"MyInstance": {
"Type": "AWS::EC2::Instance",
"Metadata" : {
"AWS::CloudFormation::Init" : {
"config" : {
"packages" : {
:
},
"groups" : {
:
},
"users" : {
:
},
"sources" : {
:
},
"files" : {
:
},
"commands" : {
:
},
"services" : {
:
}
}
}
},
"Properties": {
:
}
}
}
```
Currently the implementation of troposphere.cloudformation.InitConfig only contains one key which is 'files'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/cloudformation.py`
Content:
```
1 # Copyright (c) 2013, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSHelperFn, AWSObject, AWSProperty, Ref
7 from .validators import integer
8
9
10 class Stack(AWSObject):
11 type = "AWS::CloudFormation::Stack"
12
13 props = {
14 'TemplateURL': (basestring, True),
15 'TimeoutInMinutes': (integer, False),
16 'Parameters': (dict, False),
17 }
18
19
20 class WaitCondition(AWSObject):
21 type = "AWS::CloudFormation::WaitCondition"
22
23 props = {
24 'Count': (integer, False),
25 'Handle': (Ref, True),
26 'Timeout': (integer, True),
27 }
28
29
30 class WaitConditionHandle(AWSObject):
31 type = "AWS::CloudFormation::WaitConditionHandle"
32
33 props = {}
34
35
36 class InitFileContext(AWSHelperFn):
37 def __init__(self, data):
38 self.data = data
39
40 def JSONrepr(self):
41 return self.data
42
43
44 class InitFile(AWSProperty):
45 props = {
46 'content': (basestring, True),
47 'mode': (basestring, False),
48 'owner': (basestring, False),
49 'group': (basestring, False),
50 'context': (InitFileContext, False)
51 }
52
53
54 class InitFiles(AWSHelperFn):
55 def __init__(self, data):
56 self.validate(data)
57 self.data = data
58
59 def validate(self, data):
60 for k in data:
61 if not isinstance(data[k], InitFile):
62 raise ValueError("File '" + k + "' must be of type InitFile")
63
64 def JSONrepr(self):
65 return self.data
66
67
68 class InitConfig(AWSProperty):
69 props = {
70 'files': (dict, False)
71 }
72
73
74 class Init(AWSHelperFn):
75 def __init__(self, data):
76 self.validate(data)
77 self.data = {"AWS::CloudFormation::Init": data}
78
79 def validate(self, data):
80 if 'config' not in data:
81 raise ValueError('config property is required')
82 if not isinstance(data['config'], InitConfig):
83 raise ValueError(
84 'config property must be of type autoscaling.InitConfig'
85 )
86
87 def JSONrepr(self):
88 return self.data
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/troposphere/cloudformation.py b/troposphere/cloudformation.py
--- a/troposphere/cloudformation.py
+++ b/troposphere/cloudformation.py
@@ -67,7 +67,13 @@
class InitConfig(AWSProperty):
props = {
- 'files': (dict, False)
+ 'groups': (dict, False),
+ 'users': (dict, False),
+ 'sources': (dict, False),
+ 'packages': (dict, False),
+ 'files': (dict, False),
+ 'commands': (dict, False),
+ 'services': (dict, False)
}
| {"golden_diff": "diff --git a/troposphere/cloudformation.py b/troposphere/cloudformation.py\n--- a/troposphere/cloudformation.py\n+++ b/troposphere/cloudformation.py\n@@ -67,7 +67,13 @@\n \n class InitConfig(AWSProperty):\n props = {\n- 'files': (dict, False)\n+ 'groups': (dict, False),\n+ 'users': (dict, False),\n+ 'sources': (dict, False),\n+ 'packages': (dict, False),\n+ 'files': (dict, False),\n+ 'commands': (dict, False),\n+ 'services': (dict, False)\n }\n", "issue": "Update metadata to include property keys\nWithin the CloudFormation metadata object for auto scaling launch configurations, it'd be nice to be able to rely on the validators within the template to sanity check the allowed keys within the 'config' dictionary: \n\n```\n\"Resources\": {\n \"MyInstance\": {\n \"Type\": \"AWS::EC2::Instance\",\n \"Metadata\" : {\n \"AWS::CloudFormation::Init\" : {\n \"config\" : {\n \"packages\" : {\n :\n },\n \"groups\" : {\n :\n },\n \"users\" : {\n :\n },\n \"sources\" : {\n :\n },\n \"files\" : {\n :\n },\n \"commands\" : {\n :\n },\n \"services\" : {\n :\n }\n }\n }\n },\n \"Properties\": {\n :\n }\n }\n}\n```\n\nCurrently the implementation of troposphere.cloudformation.InitConfig only contains one key which is 'files'\n\n", "before_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref\nfrom .validators import integer\n\n\nclass Stack(AWSObject):\n type = \"AWS::CloudFormation::Stack\"\n\n props = {\n 'TemplateURL': (basestring, True),\n 'TimeoutInMinutes': (integer, False),\n 'Parameters': (dict, False),\n }\n\n\nclass WaitCondition(AWSObject):\n type = \"AWS::CloudFormation::WaitCondition\"\n\n props = {\n 'Count': (integer, False),\n 'Handle': (Ref, True),\n 'Timeout': (integer, True),\n }\n\n\nclass WaitConditionHandle(AWSObject):\n type = \"AWS::CloudFormation::WaitConditionHandle\"\n\n props = {}\n\n\nclass InitFileContext(AWSHelperFn):\n def __init__(self, data):\n self.data = data\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitFile(AWSProperty):\n props = {\n 'content': (basestring, True),\n 'mode': (basestring, False),\n 'owner': (basestring, False),\n 'group': (basestring, False),\n 'context': (InitFileContext, False)\n }\n\n\nclass InitFiles(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = data\n\n def validate(self, data):\n for k in data:\n if not isinstance(data[k], InitFile):\n raise ValueError(\"File '\" + k + \"' must be of type InitFile\")\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitConfig(AWSProperty):\n props = {\n 'files': (dict, False)\n }\n\n\nclass Init(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = {\"AWS::CloudFormation::Init\": data}\n\n def validate(self, data):\n if 'config' not in data:\n raise ValueError('config property is required')\n if not isinstance(data['config'], InitConfig):\n raise ValueError(\n 'config property must be of type autoscaling.InitConfig'\n )\n\n def JSONrepr(self):\n return self.data\n", "path": "troposphere/cloudformation.py"}], "after_files": [{"content": "# Copyright (c) 2013, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSHelperFn, AWSObject, AWSProperty, Ref\nfrom .validators import integer\n\n\nclass Stack(AWSObject):\n type = \"AWS::CloudFormation::Stack\"\n\n props = {\n 'TemplateURL': (basestring, True),\n 'TimeoutInMinutes': (integer, False),\n 'Parameters': (dict, False),\n }\n\n\nclass WaitCondition(AWSObject):\n type = \"AWS::CloudFormation::WaitCondition\"\n\n props = {\n 'Count': (integer, False),\n 'Handle': (Ref, True),\n 'Timeout': (integer, True),\n }\n\n\nclass WaitConditionHandle(AWSObject):\n type = \"AWS::CloudFormation::WaitConditionHandle\"\n\n props = {}\n\n\nclass InitFileContext(AWSHelperFn):\n def __init__(self, data):\n self.data = data\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitFile(AWSProperty):\n props = {\n 'content': (basestring, True),\n 'mode': (basestring, False),\n 'owner': (basestring, False),\n 'group': (basestring, False),\n 'context': (InitFileContext, False)\n }\n\n\nclass InitFiles(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = data\n\n def validate(self, data):\n for k in data:\n if not isinstance(data[k], InitFile):\n raise ValueError(\"File '\" + k + \"' must be of type InitFile\")\n\n def JSONrepr(self):\n return self.data\n\n\nclass InitConfig(AWSProperty):\n props = {\n 'groups': (dict, False),\n 'users': (dict, False),\n 'sources': (dict, False),\n 'packages': (dict, False),\n 'files': (dict, False),\n 'commands': (dict, False),\n 'services': (dict, False)\n }\n\n\nclass Init(AWSHelperFn):\n def __init__(self, data):\n self.validate(data)\n self.data = {\"AWS::CloudFormation::Init\": data}\n\n def validate(self, data):\n if 'config' not in data:\n raise ValueError('config property is required')\n if not isinstance(data['config'], InitConfig):\n raise ValueError(\n 'config property must be of type autoscaling.InitConfig'\n )\n\n def JSONrepr(self):\n return self.data\n", "path": "troposphere/cloudformation.py"}]} | 1,144 | 143 |
gh_patches_debug_21385 | rasdani/github-patches | git_diff | joke2k__faker-529 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Decrease binary dummy length
`fake.binary(length=1048576)` generate too huge binary data but it is useless at docs build time.
I tested two case, 1048576 and 1024.
``` console
(faker) [item4@item4-mbp docs]$ echo "1048576"
1048576
(faker) [item4@item4-mbp docs]$ time make html
sphinx-build -b html -d _build/doctrees . _build/html
Running Sphinx v1.4.8
loading pickled environment... done
building [mo]: targets for 0 po files that are out of date
building [html]: targets for 61 source files that are out of date
updating environment: 0 added, 61 changed, 0 removed
reading sources... [100%] providers/faker.providers.user_agent
looking for now-outdated files... none found
pickling environment... done
checking consistency... done
preparing documents... done
writing output... [100%] providers/faker.providers.user_agent
generating indices... genindex
writing additional pages... search
copying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist
done
copying extra files... done
dumping search index in English (code: en) ... done
dumping object inventory... done
build succeeded, 1 warning.
Build finished. The HTML pages are in _build/html.
real 3m24.875s
user 3m3.222s
sys 0m10.986s
(faker) [item4@item4-mbp docs]$ echo "1024"
1024
(faker) [item4@item4-mbp docs]$ time make html
sphinx-build -b html -d _build/doctrees . _build/html
Running Sphinx v1.4.8
loading pickled environment... done
building [mo]: targets for 0 po files that are out of date
building [html]: targets for 61 source files that are out of date
updating environment: 0 added, 61 changed, 0 removed
reading sources... [100%] providers/faker.providers.user_agent
looking for now-outdated files... none found
pickling environment... done
checking consistency... done
preparing documents... done
writing output... [100%] providers/faker.providers.user_agent
generating indices... genindex
writing additional pages... search
copying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist
done
copying extra files... done
dumping search index in English (code: en) ... done
dumping object inventory... done
build succeeded, 1 warning.
Build finished. The HTML pages are in _build/html.
real 0m46.086s
user 0m43.772s
sys 0m1.479s
```
Does docs really need 1048576 length binary?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/documentor.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 import inspect
6
7 from faker import utils
8
9
10 class Documentor(object):
11
12 def __init__(self, generator):
13 """
14 :param generator: a localized Generator with providers filled,
15 for which to write the documentation
16 :type generator: faker.Generator()
17 """
18 self.generator = generator
19 self.max_name_len = 0
20 self.already_generated = []
21
22 def get_formatters(self, locale=None, excludes=None, **kwargs):
23
24 self.max_name_len = 0
25 self.already_generated = [] if excludes is None else excludes[:]
26 formatters = []
27 providers = self.generator.get_providers()
28 for provider in providers[::-1]: # reverse
29 if locale and provider.__lang__ != locale:
30 continue
31 formatters.append(
32 (provider, self.get_provider_formatters(provider, **kwargs))
33 )
34 return formatters
35
36 def get_provider_formatters(self, provider, prefix='fake.',
37 with_args=True, with_defaults=True):
38
39 formatters = {}
40
41 for name, method in inspect.getmembers(provider, inspect.ismethod):
42
43 # skip 'private' method and inherited methods
44 if name.startswith('_') or name in self.already_generated:
45 continue
46
47 arguments = []
48
49 if with_args:
50 # retrieve all parameter
51 argspec = inspect.getargspec(method)
52
53 lst = [x for x in argspec.args if x not in ['self', 'cls']]
54 for i, arg in enumerate(lst):
55
56 if argspec.defaults and with_defaults:
57
58 try:
59 default = argspec.defaults[i]
60 if utils.is_string(default):
61 default = utils.quote(default)
62 else:
63 # TODO check default type
64 default = "{0}".format(default)
65
66 arg = "{0}={1}".format(arg, default)
67
68 except IndexError:
69 pass
70
71 arguments.append(arg)
72 if with_args == 'first':
73 break
74
75 if with_args != 'first':
76 if argspec.varargs:
77 arguments.append('*' + argspec.varargs)
78 if argspec.keywords:
79 arguments.append('**' + argspec.keywords)
80
81 # build fake method signature
82 signature = "{0}{1}({2})".format(prefix,
83 name,
84 ", ".join(arguments))
85
86 # make a fake example
87 example = self.generator.format(name)
88
89 formatters[signature] = example
90
91 self.max_name_len = max(self.max_name_len, len(signature))
92 self.already_generated.append(name)
93
94 return formatters
95
96 @staticmethod
97 def get_provider_name(provider_class):
98 return provider_class.__provider__
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/documentor.py b/faker/documentor.py
--- a/faker/documentor.py
+++ b/faker/documentor.py
@@ -39,12 +39,16 @@
formatters = {}
for name, method in inspect.getmembers(provider, inspect.ismethod):
-
# skip 'private' method and inherited methods
if name.startswith('_') or name in self.already_generated:
continue
arguments = []
+ faker_args = []
+ faker_kwargs = {}
+
+ if name == 'binary':
+ faker_kwargs['length'] = 1024
if with_args:
# retrieve all parameter
@@ -84,7 +88,7 @@
", ".join(arguments))
# make a fake example
- example = self.generator.format(name)
+ example = self.generator.format(name, *faker_args, **faker_kwargs)
formatters[signature] = example
| {"golden_diff": "diff --git a/faker/documentor.py b/faker/documentor.py\n--- a/faker/documentor.py\n+++ b/faker/documentor.py\n@@ -39,12 +39,16 @@\n formatters = {}\n \n for name, method in inspect.getmembers(provider, inspect.ismethod):\n-\n # skip 'private' method and inherited methods\n if name.startswith('_') or name in self.already_generated:\n continue\n \n arguments = []\n+ faker_args = []\n+ faker_kwargs = {}\n+\n+ if name == 'binary':\n+ faker_kwargs['length'] = 1024\n \n if with_args:\n # retrieve all parameter\n@@ -84,7 +88,7 @@\n \", \".join(arguments))\n \n # make a fake example\n- example = self.generator.format(name)\n+ example = self.generator.format(name, *faker_args, **faker_kwargs)\n \n formatters[signature] = example\n", "issue": "Decrease binary dummy length\n`fake.binary(length=1048576)` generate too huge binary data but it is useless at docs build time.\n\nI tested two case, 1048576 and 1024.\n\n``` console\n(faker) [item4@item4-mbp docs]$ echo \"1048576\"\n1048576\n(faker) [item4@item4-mbp docs]$ time make html\nsphinx-build -b html -d _build/doctrees . _build/html\nRunning Sphinx v1.4.8\nloading pickled environment... done\nbuilding [mo]: targets for 0 po files that are out of date\nbuilding [html]: targets for 61 source files that are out of date\nupdating environment: 0 added, 61 changed, 0 removed\nreading sources... [100%] providers/faker.providers.user_agent\nlooking for now-outdated files... none found\npickling environment... done\nchecking consistency... done\npreparing documents... done\nwriting output... [100%] providers/faker.providers.user_agent\ngenerating indices... genindex\nwriting additional pages... search\ncopying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist\ndone\ncopying extra files... done\ndumping search index in English (code: en) ... done\ndumping object inventory... done\nbuild succeeded, 1 warning.\n\nBuild finished. The HTML pages are in _build/html.\n\nreal 3m24.875s\nuser 3m3.222s\nsys 0m10.986s\n(faker) [item4@item4-mbp docs]$ echo \"1024\"\n1024\n(faker) [item4@item4-mbp docs]$ time make html\nsphinx-build -b html -d _build/doctrees . _build/html\nRunning Sphinx v1.4.8\nloading pickled environment... done\nbuilding [mo]: targets for 0 po files that are out of date\nbuilding [html]: targets for 61 source files that are out of date\nupdating environment: 0 added, 61 changed, 0 removed\nreading sources... [100%] providers/faker.providers.user_agent\nlooking for now-outdated files... none found\npickling environment... done\nchecking consistency... done\npreparing documents... done\nwriting output... [100%] providers/faker.providers.user_agent\ngenerating indices... genindex\nwriting additional pages... search\ncopying static files... WARNING: html_static_path entry '/Users/item4/Projects/faker/docs/_static' does not exist\ndone\ncopying extra files... done\ndumping search index in English (code: en) ... done\ndumping object inventory... done\nbuild succeeded, 1 warning.\n\nBuild finished. The HTML pages are in _build/html.\n\nreal 0m46.086s\nuser 0m43.772s\nsys 0m1.479s\n```\n\nDoes docs really need 1048576 length binary?\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport inspect\n\nfrom faker import utils\n\n\nclass Documentor(object):\n\n def __init__(self, generator):\n \"\"\"\n :param generator: a localized Generator with providers filled,\n for which to write the documentation\n :type generator: faker.Generator()\n \"\"\"\n self.generator = generator\n self.max_name_len = 0\n self.already_generated = []\n\n def get_formatters(self, locale=None, excludes=None, **kwargs):\n\n self.max_name_len = 0\n self.already_generated = [] if excludes is None else excludes[:]\n formatters = []\n providers = self.generator.get_providers()\n for provider in providers[::-1]: # reverse\n if locale and provider.__lang__ != locale:\n continue\n formatters.append(\n (provider, self.get_provider_formatters(provider, **kwargs))\n )\n return formatters\n\n def get_provider_formatters(self, provider, prefix='fake.',\n with_args=True, with_defaults=True):\n\n formatters = {}\n\n for name, method in inspect.getmembers(provider, inspect.ismethod):\n\n # skip 'private' method and inherited methods\n if name.startswith('_') or name in self.already_generated:\n continue\n\n arguments = []\n\n if with_args:\n # retrieve all parameter\n argspec = inspect.getargspec(method)\n\n lst = [x for x in argspec.args if x not in ['self', 'cls']]\n for i, arg in enumerate(lst):\n\n if argspec.defaults and with_defaults:\n\n try:\n default = argspec.defaults[i]\n if utils.is_string(default):\n default = utils.quote(default)\n else:\n # TODO check default type\n default = \"{0}\".format(default)\n\n arg = \"{0}={1}\".format(arg, default)\n\n except IndexError:\n pass\n\n arguments.append(arg)\n if with_args == 'first':\n break\n\n if with_args != 'first':\n if argspec.varargs:\n arguments.append('*' + argspec.varargs)\n if argspec.keywords:\n arguments.append('**' + argspec.keywords)\n\n # build fake method signature\n signature = \"{0}{1}({2})\".format(prefix,\n name,\n \", \".join(arguments))\n\n # make a fake example\n example = self.generator.format(name)\n\n formatters[signature] = example\n\n self.max_name_len = max(self.max_name_len, len(signature))\n self.already_generated.append(name)\n\n return formatters\n\n @staticmethod\n def get_provider_name(provider_class):\n return provider_class.__provider__\n", "path": "faker/documentor.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport inspect\n\nfrom faker import utils\n\n\nclass Documentor(object):\n\n def __init__(self, generator):\n \"\"\"\n :param generator: a localized Generator with providers filled,\n for which to write the documentation\n :type generator: faker.Generator()\n \"\"\"\n self.generator = generator\n self.max_name_len = 0\n self.already_generated = []\n\n def get_formatters(self, locale=None, excludes=None, **kwargs):\n\n self.max_name_len = 0\n self.already_generated = [] if excludes is None else excludes[:]\n formatters = []\n providers = self.generator.get_providers()\n for provider in providers[::-1]: # reverse\n if locale and provider.__lang__ != locale:\n continue\n formatters.append(\n (provider, self.get_provider_formatters(provider, **kwargs))\n )\n return formatters\n\n def get_provider_formatters(self, provider, prefix='fake.',\n with_args=True, with_defaults=True):\n\n formatters = {}\n\n for name, method in inspect.getmembers(provider, inspect.ismethod):\n # skip 'private' method and inherited methods\n if name.startswith('_') or name in self.already_generated:\n continue\n\n arguments = []\n faker_args = []\n faker_kwargs = {}\n\n if name == 'binary':\n faker_kwargs['length'] = 1024\n\n if with_args:\n # retrieve all parameter\n argspec = inspect.getargspec(method)\n\n lst = [x for x in argspec.args if x not in ['self', 'cls']]\n for i, arg in enumerate(lst):\n\n if argspec.defaults and with_defaults:\n\n try:\n default = argspec.defaults[i]\n if utils.is_string(default):\n default = utils.quote(default)\n else:\n # TODO check default type\n default = \"{0}\".format(default)\n\n arg = \"{0}={1}\".format(arg, default)\n\n except IndexError:\n pass\n\n arguments.append(arg)\n if with_args == 'first':\n break\n\n if with_args != 'first':\n if argspec.varargs:\n arguments.append('*' + argspec.varargs)\n if argspec.keywords:\n arguments.append('**' + argspec.keywords)\n\n # build fake method signature\n signature = \"{0}{1}({2})\".format(prefix,\n name,\n \", \".join(arguments))\n\n # make a fake example\n example = self.generator.format(name, *faker_args, **faker_kwargs)\n\n formatters[signature] = example\n\n self.max_name_len = max(self.max_name_len, len(signature))\n self.already_generated.append(name)\n\n return formatters\n\n @staticmethod\n def get_provider_name(provider_class):\n return provider_class.__provider__\n", "path": "faker/documentor.py"}]} | 1,732 | 208 |
gh_patches_debug_18357 | rasdani/github-patches | git_diff | pyca__cryptography-414 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Binding.is_avaialble can print to stderr
Specifically it'll print an error message, you can see this if you run `tests/hazmat/bindings/test_bindings.py`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cryptography/hazmat/bindings/openssl/binding.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 from cryptography.hazmat.bindings.utils import (
17 build_ffi, binding_available
18 )
19
20 _OSX_PRE_INCLUDE = """
21 #ifdef __APPLE__
22 #include <AvailabilityMacros.h>
23 #define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \
24 DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
25 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
26 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
27 #endif
28 """
29
30 _OSX_POST_INCLUDE = """
31 #ifdef __APPLE__
32 #undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
33 #define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \
34 __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER
35 #endif
36 """
37
38
39 class Binding(object):
40 """
41 OpenSSL API wrapper.
42 """
43 _module_prefix = "cryptography.hazmat.bindings.openssl."
44 _modules = [
45 "asn1",
46 "bignum",
47 "bio",
48 "conf",
49 "crypto",
50 "dh",
51 "dsa",
52 "engine",
53 "err",
54 "evp",
55 "hmac",
56 "nid",
57 "objects",
58 "opensslv",
59 "pem",
60 "pkcs7",
61 "pkcs12",
62 "rand",
63 "rsa",
64 "ssl",
65 "x509",
66 "x509name",
67 "x509v3",
68 ]
69
70 ffi = None
71 lib = None
72
73 def __init__(self):
74 self._ensure_ffi_initialized()
75
76 @classmethod
77 def _ensure_ffi_initialized(cls):
78 if cls.ffi is not None and cls.lib is not None:
79 return
80
81 cls.ffi, cls.lib = build_ffi(cls._module_prefix, cls._modules,
82 _OSX_PRE_INCLUDE, _OSX_POST_INCLUDE,
83 ["crypto", "ssl"])
84
85 @classmethod
86 def is_available(cls):
87 return binding_available(cls._ensure_ffi_initialized)
88
```
Path: `cryptography/hazmat/bindings/utils.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 import sys
17
18 import cffi
19
20
21 def build_ffi(module_prefix, modules, pre_include, post_include, libraries):
22 """
23 Modules listed in ``modules`` should have the following attributes:
24
25 * ``INCLUDES``: A string containing C includes.
26 * ``TYPES``: A string containing C declarations for types.
27 * ``FUNCTIONS``: A string containing C declarations for functions.
28 * ``MACROS``: A string containing C declarations for any macros.
29 * ``CUSTOMIZATIONS``: A string containing arbitrary top-level C code, this
30 can be used to do things like test for a define and provide an
31 alternate implementation based on that.
32 * ``CONDITIONAL_NAMES``: A dict mapping strings of condition names from the
33 library to a list of names which will not be present without the
34 condition.
35 """
36 ffi = cffi.FFI()
37 includes = []
38 functions = []
39 macros = []
40 customizations = []
41 for name in modules:
42 module_name = module_prefix + name
43 __import__(module_name)
44 module = sys.modules[module_name]
45
46 ffi.cdef(module.TYPES)
47
48 macros.append(module.MACROS)
49 functions.append(module.FUNCTIONS)
50 includes.append(module.INCLUDES)
51 customizations.append(module.CUSTOMIZATIONS)
52
53 # loop over the functions & macros after declaring all the types
54 # so we can set interdependent types in different files and still
55 # have them all defined before we parse the funcs & macros
56 for func in functions:
57 ffi.cdef(func)
58 for macro in macros:
59 ffi.cdef(macro)
60
61 # We include functions here so that if we got any of their definitions
62 # wrong, the underlying C compiler will explode. In C you are allowed
63 # to re-declare a function if it has the same signature. That is:
64 # int foo(int);
65 # int foo(int);
66 # is legal, but the following will fail to compile:
67 # int foo(int);
68 # int foo(short);
69 lib = ffi.verify(
70 source="\n".join(
71 [pre_include] +
72 includes +
73 [post_include] +
74 functions +
75 customizations
76 ),
77 libraries=libraries,
78 ext_package="cryptography",
79 )
80
81 for name in modules:
82 module_name = module_prefix + name
83 module = sys.modules[module_name]
84 for condition, names in module.CONDITIONAL_NAMES.items():
85 if not getattr(lib, condition):
86 for name in names:
87 delattr(lib, name)
88
89 return ffi, lib
90
91
92 def binding_available(initializer):
93 try:
94 initializer()
95 return True
96 except cffi.VerificationError:
97 return False
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cryptography/hazmat/bindings/openssl/binding.py b/cryptography/hazmat/bindings/openssl/binding.py
--- a/cryptography/hazmat/bindings/openssl/binding.py
+++ b/cryptography/hazmat/bindings/openssl/binding.py
@@ -13,9 +13,8 @@
from __future__ import absolute_import, division, print_function
-from cryptography.hazmat.bindings.utils import (
- build_ffi, binding_available
-)
+from cryptography.hazmat.bindings.utils import build_ffi
+
_OSX_PRE_INCLUDE = """
#ifdef __APPLE__
@@ -84,4 +83,5 @@
@classmethod
def is_available(cls):
- return binding_available(cls._ensure_ffi_initialized)
+ # OpenSSL is the only binding so for now it must always be available
+ return True
diff --git a/cryptography/hazmat/bindings/utils.py b/cryptography/hazmat/bindings/utils.py
--- a/cryptography/hazmat/bindings/utils.py
+++ b/cryptography/hazmat/bindings/utils.py
@@ -87,11 +87,3 @@
delattr(lib, name)
return ffi, lib
-
-
-def binding_available(initializer):
- try:
- initializer()
- return True
- except cffi.VerificationError:
- return False
| {"golden_diff": "diff --git a/cryptography/hazmat/bindings/openssl/binding.py b/cryptography/hazmat/bindings/openssl/binding.py\n--- a/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/cryptography/hazmat/bindings/openssl/binding.py\n@@ -13,9 +13,8 @@\n \n from __future__ import absolute_import, division, print_function\n \n-from cryptography.hazmat.bindings.utils import (\n- build_ffi, binding_available\n-)\n+from cryptography.hazmat.bindings.utils import build_ffi\n+\n \n _OSX_PRE_INCLUDE = \"\"\"\n #ifdef __APPLE__\n@@ -84,4 +83,5 @@\n \n @classmethod\n def is_available(cls):\n- return binding_available(cls._ensure_ffi_initialized)\n+ # OpenSSL is the only binding so for now it must always be available\n+ return True\ndiff --git a/cryptography/hazmat/bindings/utils.py b/cryptography/hazmat/bindings/utils.py\n--- a/cryptography/hazmat/bindings/utils.py\n+++ b/cryptography/hazmat/bindings/utils.py\n@@ -87,11 +87,3 @@\n delattr(lib, name)\n \n return ffi, lib\n-\n-\n-def binding_available(initializer):\n- try:\n- initializer()\n- return True\n- except cffi.VerificationError:\n- return False\n", "issue": "Binding.is_avaialble can print to stderr\nSpecifically it'll print an error message, you can see this if you run `tests/hazmat/bindings/test_bindings.py`\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography.hazmat.bindings.utils import (\n build_ffi, binding_available\n)\n\n_OSX_PRE_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#include <AvailabilityMacros.h>\n#define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n_OSX_POST_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n _module_prefix = \"cryptography.hazmat.bindings.openssl.\"\n _modules = [\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"conf\",\n \"crypto\",\n \"dh\",\n \"dsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"opensslv\",\n \"pem\",\n \"pkcs7\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n ]\n\n ffi = None\n lib = None\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n if cls.ffi is not None and cls.lib is not None:\n return\n\n cls.ffi, cls.lib = build_ffi(cls._module_prefix, cls._modules,\n _OSX_PRE_INCLUDE, _OSX_POST_INCLUDE,\n [\"crypto\", \"ssl\"])\n\n @classmethod\n def is_available(cls):\n return binding_available(cls._ensure_ffi_initialized)\n", "path": "cryptography/hazmat/bindings/openssl/binding.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\n\nimport cffi\n\n\ndef build_ffi(module_prefix, modules, pre_include, post_include, libraries):\n \"\"\"\n Modules listed in ``modules`` should have the following attributes:\n\n * ``INCLUDES``: A string containing C includes.\n * ``TYPES``: A string containing C declarations for types.\n * ``FUNCTIONS``: A string containing C declarations for functions.\n * ``MACROS``: A string containing C declarations for any macros.\n * ``CUSTOMIZATIONS``: A string containing arbitrary top-level C code, this\n can be used to do things like test for a define and provide an\n alternate implementation based on that.\n * ``CONDITIONAL_NAMES``: A dict mapping strings of condition names from the\n library to a list of names which will not be present without the\n condition.\n \"\"\"\n ffi = cffi.FFI()\n includes = []\n functions = []\n macros = []\n customizations = []\n for name in modules:\n module_name = module_prefix + name\n __import__(module_name)\n module = sys.modules[module_name]\n\n ffi.cdef(module.TYPES)\n\n macros.append(module.MACROS)\n functions.append(module.FUNCTIONS)\n includes.append(module.INCLUDES)\n customizations.append(module.CUSTOMIZATIONS)\n\n # loop over the functions & macros after declaring all the types\n # so we can set interdependent types in different files and still\n # have them all defined before we parse the funcs & macros\n for func in functions:\n ffi.cdef(func)\n for macro in macros:\n ffi.cdef(macro)\n\n # We include functions here so that if we got any of their definitions\n # wrong, the underlying C compiler will explode. In C you are allowed\n # to re-declare a function if it has the same signature. That is:\n # int foo(int);\n # int foo(int);\n # is legal, but the following will fail to compile:\n # int foo(int);\n # int foo(short);\n lib = ffi.verify(\n source=\"\\n\".join(\n [pre_include] +\n includes +\n [post_include] +\n functions +\n customizations\n ),\n libraries=libraries,\n ext_package=\"cryptography\",\n )\n\n for name in modules:\n module_name = module_prefix + name\n module = sys.modules[module_name]\n for condition, names in module.CONDITIONAL_NAMES.items():\n if not getattr(lib, condition):\n for name in names:\n delattr(lib, name)\n\n return ffi, lib\n\n\ndef binding_available(initializer):\n try:\n initializer()\n return True\n except cffi.VerificationError:\n return False\n", "path": "cryptography/hazmat/bindings/utils.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography.hazmat.bindings.utils import build_ffi\n\n\n_OSX_PRE_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#include <AvailabilityMacros.h>\n#define __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n_OSX_POST_INCLUDE = \"\"\"\n#ifdef __APPLE__\n#undef DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#define DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER \\\n __ORIG_DEPRECATED_IN_MAC_OS_X_VERSION_10_7_AND_LATER\n#endif\n\"\"\"\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n _module_prefix = \"cryptography.hazmat.bindings.openssl.\"\n _modules = [\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"conf\",\n \"crypto\",\n \"dh\",\n \"dsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"opensslv\",\n \"pem\",\n \"pkcs7\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n ]\n\n ffi = None\n lib = None\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n if cls.ffi is not None and cls.lib is not None:\n return\n\n cls.ffi, cls.lib = build_ffi(cls._module_prefix, cls._modules,\n _OSX_PRE_INCLUDE, _OSX_POST_INCLUDE,\n [\"crypto\", \"ssl\"])\n\n @classmethod\n def is_available(cls):\n # OpenSSL is the only binding so for now it must always be available\n return True\n", "path": "cryptography/hazmat/bindings/openssl/binding.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport sys\n\nimport cffi\n\n\ndef build_ffi(module_prefix, modules, pre_include, post_include, libraries):\n \"\"\"\n Modules listed in ``modules`` should have the following attributes:\n\n * ``INCLUDES``: A string containing C includes.\n * ``TYPES``: A string containing C declarations for types.\n * ``FUNCTIONS``: A string containing C declarations for functions.\n * ``MACROS``: A string containing C declarations for any macros.\n * ``CUSTOMIZATIONS``: A string containing arbitrary top-level C code, this\n can be used to do things like test for a define and provide an\n alternate implementation based on that.\n * ``CONDITIONAL_NAMES``: A dict mapping strings of condition names from the\n library to a list of names which will not be present without the\n condition.\n \"\"\"\n ffi = cffi.FFI()\n includes = []\n functions = []\n macros = []\n customizations = []\n for name in modules:\n module_name = module_prefix + name\n __import__(module_name)\n module = sys.modules[module_name]\n\n ffi.cdef(module.TYPES)\n\n macros.append(module.MACROS)\n functions.append(module.FUNCTIONS)\n includes.append(module.INCLUDES)\n customizations.append(module.CUSTOMIZATIONS)\n\n # loop over the functions & macros after declaring all the types\n # so we can set interdependent types in different files and still\n # have them all defined before we parse the funcs & macros\n for func in functions:\n ffi.cdef(func)\n for macro in macros:\n ffi.cdef(macro)\n\n # We include functions here so that if we got any of their definitions\n # wrong, the underlying C compiler will explode. In C you are allowed\n # to re-declare a function if it has the same signature. That is:\n # int foo(int);\n # int foo(int);\n # is legal, but the following will fail to compile:\n # int foo(int);\n # int foo(short);\n lib = ffi.verify(\n source=\"\\n\".join(\n [pre_include] +\n includes +\n [post_include] +\n functions +\n customizations\n ),\n libraries=libraries,\n ext_package=\"cryptography\",\n )\n\n for name in modules:\n module_name = module_prefix + name\n module = sys.modules[module_name]\n for condition, names in module.CONDITIONAL_NAMES.items():\n if not getattr(lib, condition):\n for name in names:\n delattr(lib, name)\n\n return ffi, lib\n", "path": "cryptography/hazmat/bindings/utils.py"}]} | 2,005 | 302 |
gh_patches_debug_20421 | rasdani/github-patches | git_diff | psychopy__psychopy-1123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Builder 'Aperture' component cannot have indefinite duration
If a Builder 'Aperture' component is set to have an indefinite duration, the actual result is that it is enabled and then immediately disabled. Hence, it has no effect on other stimuli. Below is example code that gets generated when an indefinite duration is specified:
```
# *aperture* updates
if t >= 0.0 and aperture.status == NOT_STARTED:
# keep track of start time/frame for later
aperture.tStart = t # underestimates by a little under one frame
aperture.frameNStart = frameN # exact frame index
aperture.enabled = True
if aperture.status == STARTED and t >= (0.0 + (-win.monitorFramePeriod*0.75)): #most of one frame period left
aperture.enabled = False
if aperture.status == STARTED: # only update if being drawn
aperture.setPos(mouse.getPos(), log=False)
```
The error is in the second if/then above, which should not occur. It is included as if there was a maximum duration in effect, but the value is omitted in the `(-win.monitorFramePeriod*0.75)` expression and hence the duration is effectively zero.
The fix here should be simple but I'm not familiar with playing with the Builder component generation code. So recording the bug here until I can figure out how to fix it (and test).
A work-around for users in the mean time would be to simply put an unfeasibly large number in the duration field (e.g. 9999).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `psychopy/app/builder/components/aperture.py`
Content:
```
1 # Part of the PsychoPy library
2 # Copyright (C) 2015 Jonathan Peirce
3 # Distributed under the terms of the GNU General Public License (GPL).
4
5 from os import path
6 from ._base import BaseVisualComponent, getInitVals
7
8 __author__ = 'Jeremy Gray, Jon Peirce'
9 # March 2011; builder-component for Yuri Spitsyn's visual.Aperture class
10 # July 2011: jwp added the code for it to be enabled only when needed
11
12 # the absolute path to the folder containing this path
13 thisFolder = path.abspath(path.dirname(__file__))
14 iconFile = path.join(thisFolder, 'aperture.png')
15 tooltip = _translate('Aperture: restrict the drawing of stimuli to a given '
16 'region')
17
18
19 class ApertureComponent(BaseVisualComponent):
20 """An event class for using GL stencil to restrict the viewing area to a
21 circle or square of a given size and position"""
22
23 def __init__(self, exp, parentName, name='aperture', units='norm',
24 size=1, pos=(0, 0),
25 startType='time (s)', startVal=0.0,
26 stopType='duration (s)', stopVal=1.0,
27 startEstim='', durationEstim=''):
28 # initialise main parameters
29 super(ApertureComponent, self).__init__(
30 exp, parentName, name=name, units=units,
31 pos=pos, size=size,
32 startType=startType, startVal=startVal,
33 stopType=stopType, stopVal=stopVal,
34 startEstim=startEstim, durationEstim=durationEstim)
35
36 self.type = 'Aperture'
37 self.url = "http://www.psychopy.org/builder/components/aperture.html"
38 # params:
39 # NB make some adjustments on the params defined by _visual component
40 self.order = ['name', 'size', 'pos'] # make sure this is at top
41
42 msg = _translate("How big is the aperture? (a single number for diameter)")
43 self.params['size'].hint = msg
44 # only localize hints and labels
45 self.params['size'].label = _translate("Size")
46 self.params['pos'].hint = _translate("Where is the aperture centred?")
47
48 # inherited from _visual component but not needed
49 del self.params['ori']
50 del self.params['color']
51 del self.params['colorSpace']
52 del self.params['opacity']
53
54 def writeInitCode(self, buff):
55 # do we need units code?
56 if self.params['units'].val == 'from exp settings':
57 unitsStr = ""
58 else:
59 unitsStr = "units=%(units)s, " % self.params
60
61 # do writing of init
62 inits = getInitVals(self.params)
63
64 code = ("%(name)s = visual.Aperture(\n"
65 " win=win, name='%(name)s',\n"
66 " " + unitsStr + "size=%(size)s, pos=%(pos)s)\n"
67 "%(name)s.disable() # disable until its actually used\n")
68 buff.writeIndentedLines(code % inits)
69
70 def writeFrameCode(self, buff):
71 """Only activate the aperture for the required frames
72 """
73
74 buff.writeIndented("\n")
75 buff.writeIndented("# *%s* updates\n" % (self.params['name']))
76 # writes an if statement to determine whether to draw etc
77 self.writeStartTestCode(buff)
78 buff.writeIndented("%(name)s.enabled = True\n" % self.params)
79 # to get out of the if statement
80 buff.setIndentLevel(-1, relative=True)
81 # writes an if statement to determine whether to draw etc
82 self.writeStopTestCode(buff)
83 buff.writeIndented("%(name)s.enabled = False\n" % self.params)
84 # to get out of the if statement
85 buff.setIndentLevel(-1, relative=True)
86 # set parameters that need updating every frame
87 # do any params need updating? (this method inherited from _base)
88 if self.checkNeedToUpdate('set every frame'):
89 code = ("if %(name)s.status == STARTED: # only update if being drawn\n")
90 buff.writeIndented(code % self.params)
91
92 buff.setIndentLevel(+1, relative=True) # to enter the if block
93 self.writeParamUpdates(buff, 'set every frame')
94 buff.setIndentLevel(-1, relative=True) # to exit the if block
95
96 def writeRoutineEndCode(self, buff):
97 msg = "%(name)s.enabled = False # just in case it was left enabled\n"
98 buff.writeIndented(msg % self.params)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/psychopy/app/builder/components/aperture.py b/psychopy/app/builder/components/aperture.py
--- a/psychopy/app/builder/components/aperture.py
+++ b/psychopy/app/builder/components/aperture.py
@@ -78,11 +78,12 @@
buff.writeIndented("%(name)s.enabled = True\n" % self.params)
# to get out of the if statement
buff.setIndentLevel(-1, relative=True)
- # writes an if statement to determine whether to draw etc
- self.writeStopTestCode(buff)
- buff.writeIndented("%(name)s.enabled = False\n" % self.params)
- # to get out of the if statement
- buff.setIndentLevel(-1, relative=True)
+ if self.params['stopVal'].val not in ['', None, -1, 'None']:
+ # writes an if statement to determine whether to draw etc
+ self.writeStopTestCode(buff)
+ buff.writeIndented("%(name)s.enabled = False\n" % self.params)
+ # to get out of the if statement
+ buff.setIndentLevel(-1, relative=True)
# set parameters that need updating every frame
# do any params need updating? (this method inherited from _base)
if self.checkNeedToUpdate('set every frame'):
| {"golden_diff": "diff --git a/psychopy/app/builder/components/aperture.py b/psychopy/app/builder/components/aperture.py\n--- a/psychopy/app/builder/components/aperture.py\n+++ b/psychopy/app/builder/components/aperture.py\n@@ -78,11 +78,12 @@\n buff.writeIndented(\"%(name)s.enabled = True\\n\" % self.params)\n # to get out of the if statement\n buff.setIndentLevel(-1, relative=True)\n- # writes an if statement to determine whether to draw etc\n- self.writeStopTestCode(buff)\n- buff.writeIndented(\"%(name)s.enabled = False\\n\" % self.params)\n- # to get out of the if statement\n- buff.setIndentLevel(-1, relative=True)\n+ if self.params['stopVal'].val not in ['', None, -1, 'None']:\n+ # writes an if statement to determine whether to draw etc\n+ self.writeStopTestCode(buff)\n+ buff.writeIndented(\"%(name)s.enabled = False\\n\" % self.params)\n+ # to get out of the if statement\n+ buff.setIndentLevel(-1, relative=True)\n # set parameters that need updating every frame\n # do any params need updating? (this method inherited from _base)\n if self.checkNeedToUpdate('set every frame'):\n", "issue": "Builder 'Aperture' component cannot have indefinite duration\nIf a Builder 'Aperture' component is set to have an indefinite duration, the actual result is that it is enabled and then immediately disabled. Hence, it has no effect on other stimuli. Below is example code that gets generated when an indefinite duration is specified:\n\n```\n# *aperture* updates\nif t >= 0.0 and aperture.status == NOT_STARTED:\n # keep track of start time/frame for later\n aperture.tStart = t # underestimates by a little under one frame\n aperture.frameNStart = frameN # exact frame index\n aperture.enabled = True\nif aperture.status == STARTED and t >= (0.0 + (-win.monitorFramePeriod*0.75)): #most of one frame period left\n aperture.enabled = False\nif aperture.status == STARTED: # only update if being drawn\n aperture.setPos(mouse.getPos(), log=False)\n```\n\nThe error is in the second if/then above, which should not occur. It is included as if there was a maximum duration in effect, but the value is omitted in the `(-win.monitorFramePeriod*0.75)` expression and hence the duration is effectively zero.\n\nThe fix here should be simple but I'm not familiar with playing with the Builder component generation code. So recording the bug here until I can figure out how to fix it (and test).\n\nA work-around for users in the mean time would be to simply put an unfeasibly large number in the duration field (e.g. 9999).\n\n", "before_files": [{"content": "# Part of the PsychoPy library\n# Copyright (C) 2015 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom os import path\nfrom ._base import BaseVisualComponent, getInitVals\n\n__author__ = 'Jeremy Gray, Jon Peirce'\n# March 2011; builder-component for Yuri Spitsyn's visual.Aperture class\n# July 2011: jwp added the code for it to be enabled only when needed\n\n# the absolute path to the folder containing this path\nthisFolder = path.abspath(path.dirname(__file__))\niconFile = path.join(thisFolder, 'aperture.png')\ntooltip = _translate('Aperture: restrict the drawing of stimuli to a given '\n 'region')\n\n\nclass ApertureComponent(BaseVisualComponent):\n \"\"\"An event class for using GL stencil to restrict the viewing area to a\n circle or square of a given size and position\"\"\"\n\n def __init__(self, exp, parentName, name='aperture', units='norm',\n size=1, pos=(0, 0),\n startType='time (s)', startVal=0.0,\n stopType='duration (s)', stopVal=1.0,\n startEstim='', durationEstim=''):\n # initialise main parameters\n super(ApertureComponent, self).__init__(\n exp, parentName, name=name, units=units,\n pos=pos, size=size,\n startType=startType, startVal=startVal,\n stopType=stopType, stopVal=stopVal,\n startEstim=startEstim, durationEstim=durationEstim)\n\n self.type = 'Aperture'\n self.url = \"http://www.psychopy.org/builder/components/aperture.html\"\n # params:\n # NB make some adjustments on the params defined by _visual component\n self.order = ['name', 'size', 'pos'] # make sure this is at top\n\n msg = _translate(\"How big is the aperture? (a single number for diameter)\")\n self.params['size'].hint = msg\n # only localize hints and labels\n self.params['size'].label = _translate(\"Size\")\n self.params['pos'].hint = _translate(\"Where is the aperture centred?\")\n\n # inherited from _visual component but not needed\n del self.params['ori']\n del self.params['color']\n del self.params['colorSpace']\n del self.params['opacity']\n\n def writeInitCode(self, buff):\n # do we need units code?\n if self.params['units'].val == 'from exp settings':\n unitsStr = \"\"\n else:\n unitsStr = \"units=%(units)s, \" % self.params\n\n # do writing of init\n inits = getInitVals(self.params)\n\n code = (\"%(name)s = visual.Aperture(\\n\"\n \" win=win, name='%(name)s',\\n\"\n \" \" + unitsStr + \"size=%(size)s, pos=%(pos)s)\\n\"\n \"%(name)s.disable() # disable until its actually used\\n\")\n buff.writeIndentedLines(code % inits)\n\n def writeFrameCode(self, buff):\n \"\"\"Only activate the aperture for the required frames\n \"\"\"\n\n buff.writeIndented(\"\\n\")\n buff.writeIndented(\"# *%s* updates\\n\" % (self.params['name']))\n # writes an if statement to determine whether to draw etc\n self.writeStartTestCode(buff)\n buff.writeIndented(\"%(name)s.enabled = True\\n\" % self.params)\n # to get out of the if statement\n buff.setIndentLevel(-1, relative=True)\n # writes an if statement to determine whether to draw etc\n self.writeStopTestCode(buff)\n buff.writeIndented(\"%(name)s.enabled = False\\n\" % self.params)\n # to get out of the if statement\n buff.setIndentLevel(-1, relative=True)\n # set parameters that need updating every frame\n # do any params need updating? (this method inherited from _base)\n if self.checkNeedToUpdate('set every frame'):\n code = (\"if %(name)s.status == STARTED: # only update if being drawn\\n\")\n buff.writeIndented(code % self.params)\n\n buff.setIndentLevel(+1, relative=True) # to enter the if block\n self.writeParamUpdates(buff, 'set every frame')\n buff.setIndentLevel(-1, relative=True) # to exit the if block\n\n def writeRoutineEndCode(self, buff):\n msg = \"%(name)s.enabled = False # just in case it was left enabled\\n\"\n buff.writeIndented(msg % self.params)\n", "path": "psychopy/app/builder/components/aperture.py"}], "after_files": [{"content": "# Part of the PsychoPy library\n# Copyright (C) 2015 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom os import path\nfrom ._base import BaseVisualComponent, getInitVals\n\n__author__ = 'Jeremy Gray, Jon Peirce'\n# March 2011; builder-component for Yuri Spitsyn's visual.Aperture class\n# July 2011: jwp added the code for it to be enabled only when needed\n\n# the absolute path to the folder containing this path\nthisFolder = path.abspath(path.dirname(__file__))\niconFile = path.join(thisFolder, 'aperture.png')\ntooltip = _translate('Aperture: restrict the drawing of stimuli to a given '\n 'region')\n\n\nclass ApertureComponent(BaseVisualComponent):\n \"\"\"An event class for using GL stencil to restrict the viewing area to a\n circle or square of a given size and position\"\"\"\n\n def __init__(self, exp, parentName, name='aperture', units='norm',\n size=1, pos=(0, 0),\n startType='time (s)', startVal=0.0,\n stopType='duration (s)', stopVal=1.0,\n startEstim='', durationEstim=''):\n # initialise main parameters\n super(ApertureComponent, self).__init__(\n exp, parentName, name=name, units=units,\n pos=pos, size=size,\n startType=startType, startVal=startVal,\n stopType=stopType, stopVal=stopVal,\n startEstim=startEstim, durationEstim=durationEstim)\n\n self.type = 'Aperture'\n self.url = \"http://www.psychopy.org/builder/components/aperture.html\"\n # params:\n # NB make some adjustments on the params defined by _visual component\n self.order = ['name', 'size', 'pos'] # make sure this is at top\n\n msg = _translate(\"How big is the aperture? (a single number for diameter)\")\n self.params['size'].hint = msg\n # only localize hints and labels\n self.params['size'].label = _translate(\"Size\")\n self.params['pos'].hint = _translate(\"Where is the aperture centred?\")\n\n # inherited from _visual component but not needed\n del self.params['ori']\n del self.params['color']\n del self.params['colorSpace']\n del self.params['opacity']\n\n def writeInitCode(self, buff):\n # do we need units code?\n if self.params['units'].val == 'from exp settings':\n unitsStr = \"\"\n else:\n unitsStr = \"units=%(units)s, \" % self.params\n\n # do writing of init\n inits = getInitVals(self.params)\n\n code = (\"%(name)s = visual.Aperture(\\n\"\n \" win=win, name='%(name)s',\\n\"\n \" \" + unitsStr + \"size=%(size)s, pos=%(pos)s)\\n\"\n \"%(name)s.disable() # disable until its actually used\\n\")\n buff.writeIndentedLines(code % inits)\n\n def writeFrameCode(self, buff):\n \"\"\"Only activate the aperture for the required frames\n \"\"\"\n\n buff.writeIndented(\"\\n\")\n buff.writeIndented(\"# *%s* updates\\n\" % (self.params['name']))\n # writes an if statement to determine whether to draw etc\n self.writeStartTestCode(buff)\n buff.writeIndented(\"%(name)s.enabled = True\\n\" % self.params)\n # to get out of the if statement\n buff.setIndentLevel(-1, relative=True)\n if self.params['stopVal'].val not in ['', None, -1, 'None']:\n # writes an if statement to determine whether to draw etc\n self.writeStopTestCode(buff)\n buff.writeIndented(\"%(name)s.enabled = False\\n\" % self.params)\n # to get out of the if statement\n buff.setIndentLevel(-1, relative=True)\n # set parameters that need updating every frame\n # do any params need updating? (this method inherited from _base)\n if self.checkNeedToUpdate('set every frame'):\n code = (\"if %(name)s.status == STARTED: # only update if being drawn\\n\")\n buff.writeIndented(code % self.params)\n\n buff.setIndentLevel(+1, relative=True) # to enter the if block\n self.writeParamUpdates(buff, 'set every frame')\n buff.setIndentLevel(-1, relative=True) # to exit the if block\n\n def writeRoutineEndCode(self, buff):\n msg = \"%(name)s.enabled = False # just in case it was left enabled\\n\"\n buff.writeIndented(msg % self.params)\n", "path": "psychopy/app/builder/components/aperture.py"}]} | 1,802 | 290 |
gh_patches_debug_443 | rasdani/github-patches | git_diff | doccano__doccano-1209 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
How to reproduce the behaviour
---------
<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->
<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->
I just installed `Doccano==1.2.0` (released just now..) with `pip install doccano` on my Linux machine to check out the `auto_labeling` feature. However, I got the following error running `doccano` in the shell.
```
>>> doccano
```
```
Setup Database.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Create admin user.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/app/manage.py", line 15, in <module>
execute_from_command_line(sys.argv)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 401, in execute_from_command_line
utility.execute()
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 377, in execute
django.setup()
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
Starting server with port 8000.
Exception in thread django-main-thread:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 53, in wrapper
fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/django/core/management/commands/runserver.py", line 110, in inner_run
autoreload.raise_last_exception()
File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 76, in raise_last_exception
raise _exception[1]
File "/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py", line 357, in execute
autoreload.check_errors(django.setup)()
File "/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py", line 53, in wrapper
fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/django/__init__.py", line 24, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/dist-packages/django/apps/registry.py", line 114, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/dist-packages/django/apps/config.py", line 211, in import_models
self.models_module = import_module(models_module_name)
File "/usr/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/usr/local/lib/python3.6/dist-packages/app/api/models.py", line 3, in <module>
from auto_labeling_pipeline.models import RequestModelFactory
ModuleNotFoundError: No module named 'auto_labeling_pipeline'
```
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Ubuntu 18.04.5 LTS
* Python Version Used: 3.6.9
* When you install doccano: 02/19/21 7:40 AM GMT
* How did you install doccano (Heroku button etc): `pip install doccano`
* Doccano version: 1.2.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 import io
4 import os
5
6 from setuptools import find_packages, setup
7
8 NAME = 'doccano'
9 DESCRIPTION = 'doccano, text annotation tool for machine learning practitioners'
10 URL = 'https://github.com/doccano/doccano'
11 EMAIL = '[email protected]'
12 AUTHOR = 'Hironsan'
13 LICENSE = 'MIT'
14
15 here = os.path.abspath(os.path.dirname(__file__))
16 with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
17 long_description = '\n' + f.read()
18
19 required = [
20 'apache-libcloud>=3.2.0',
21 'colour>=0.1.5',
22 'conllu>=4.2.2',
23 'dj-database-url>=0.5.0',
24 'django-cors-headers>=3.5.0',
25 'django-filter>=2.4.0',
26 'django-rest-polymorphic>=0.1.9',
27 'djangorestframework-csv>=2.1.0',
28 'djangorestframework-xml>=2.0.0',
29 'drf-yasg>=1.20.0',
30 'environs>=9.2.0',
31 'furl>=2.1.0',
32 'pyexcel>=0.6.6',
33 'pyexcel-xlsx>=0.6.0',
34 'python-jose>=3.2.0',
35 'seqeval>=1.2.2',
36 'social-auth-app-django>=4.0.0',
37 'whitenoise>=5.2.0'
38 ]
39
40 setup(
41 name=NAME,
42 use_scm_version=True,
43 setup_requires=['setuptools_scm'],
44 description=DESCRIPTION,
45 long_description=long_description,
46 long_description_content_type='text/markdown',
47 author=AUTHOR,
48 author_email=EMAIL,
49 url=URL,
50 packages=find_packages(exclude=('*.tests',)),
51 entry_points={
52 'console_scripts': [
53 'doccano = app.doccano.doccano:main'
54 ]
55 },
56 install_requires=required,
57 extras_require={
58 'postgresql': ['psycopg2-binary>=2.8.6'],
59 'mssql': ['django-mssql-backend>=2.8.1'],
60 },
61 include_package_data=True,
62 license=LICENSE,
63 classifiers=[
64 'License :: OSI Approved :: MIT License',
65 'Programming Language :: Python',
66 'Programming Language :: Python :: 3.6',
67 'Programming Language :: Python :: 3.7',
68 'Programming Language :: Python :: 3.8',
69 'Programming Language :: Python :: Implementation :: CPython',
70 'Programming Language :: Python :: Implementation :: PyPy'
71 ],
72 )
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,7 +34,8 @@
'python-jose>=3.2.0',
'seqeval>=1.2.2',
'social-auth-app-django>=4.0.0',
- 'whitenoise>=5.2.0'
+ 'whitenoise>=5.2.0',
+ 'auto-labeling-pipeline>=0.1.12'
]
setup(
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,7 +34,8 @@\n 'python-jose>=3.2.0',\n 'seqeval>=1.2.2',\n 'social-auth-app-django>=4.0.0',\n- 'whitenoise>=5.2.0'\n+ 'whitenoise>=5.2.0',\n+ 'auto-labeling-pipeline>=0.1.12'\n ]\n \n setup(\n", "issue": "ModuleNotFoundError: No module named 'auto_labeling_pipeline'\nHow to reproduce the behaviour\r\n---------\r\n<!-- Before submitting an issue, make sure to check the docs and closed issues and FAQ to see if any of the solutions work for you. https://github.com/doccano/doccano/wiki/Frequently-Asked-Questions -->\r\n\r\n<!-- Include a code example or the steps that led to the problem. Please try to be as specific as possible. -->\r\n\r\nI just installed `Doccano==1.2.0` (released just now..) with `pip install doccano` on my Linux machine to check out the `auto_labeling` feature. However, I got the following error running `doccano` in the shell.\r\n\r\n```\r\n>>> doccano\r\n```\r\n\r\n```\r\nSetup Database.\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/app/manage.py\", line 15, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 377, in execute\r\n django.setup()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/__init__.py\", line 24, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/registry.py\", line 114, in populate\r\n app_config.import_models()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/config.py\", line 211, in import_models\r\n self.models_module = import_module(models_module_name)\r\n File \"/usr/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/usr/local/lib/python3.6/dist-packages/app/api/models.py\", line 3, in <module>\r\n from auto_labeling_pipeline.models import RequestModelFactory\r\nModuleNotFoundError: No module named 'auto_labeling_pipeline'\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/app/manage.py\", line 15, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 377, in execute\r\n django.setup()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/__init__.py\", line 24, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/registry.py\", line 114, in populate\r\n app_config.import_models()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/config.py\", line 211, in import_models\r\n self.models_module = import_module(models_module_name)\r\n File \"/usr/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/usr/local/lib/python3.6/dist-packages/app/api/models.py\", line 3, in <module>\r\n from auto_labeling_pipeline.models import RequestModelFactory\r\nModuleNotFoundError: No module named 'auto_labeling_pipeline'\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/app/manage.py\", line 15, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 377, in execute\r\n django.setup()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/__init__.py\", line 24, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/registry.py\", line 114, in populate\r\n app_config.import_models()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/config.py\", line 211, in import_models\r\n self.models_module = import_module(models_module_name)\r\n File \"/usr/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/usr/local/lib/python3.6/dist-packages/app/api/models.py\", line 3, in <module>\r\n from auto_labeling_pipeline.models import RequestModelFactory\r\nModuleNotFoundError: No module named 'auto_labeling_pipeline'\r\nCreate admin user.\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/app/manage.py\", line 15, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 377, in execute\r\n django.setup()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/__init__.py\", line 24, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/registry.py\", line 114, in populate\r\n app_config.import_models()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/config.py\", line 211, in import_models\r\n self.models_module = import_module(models_module_name)\r\n File \"/usr/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/usr/local/lib/python3.6/dist-packages/app/api/models.py\", line 3, in <module>\r\n from auto_labeling_pipeline.models import RequestModelFactory\r\nModuleNotFoundError: No module named 'auto_labeling_pipeline'\r\nStarting server with port 8000.\r\nException in thread django-main-thread:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py\", line 53, in wrapper\r\n fn(*args, **kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/commands/runserver.py\", line 110, in inner_run\r\n autoreload.raise_last_exception()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py\", line 76, in raise_last_exception\r\n raise _exception[1]\r\n File \"/usr/local/lib/python3.6/dist-packages/django/core/management/__init__.py\", line 357, in execute\r\n autoreload.check_errors(django.setup)()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/utils/autoreload.py\", line 53, in wrapper\r\n fn(*args, **kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/__init__.py\", line 24, in setup\r\n apps.populate(settings.INSTALLED_APPS)\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/registry.py\", line 114, in populate\r\n app_config.import_models()\r\n File \"/usr/local/lib/python3.6/dist-packages/django/apps/config.py\", line 211, in import_models\r\n self.models_module = import_module(models_module_name)\r\n File \"/usr/lib/python3.6/importlib/__init__.py\", line 126, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 994, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 971, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 955, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 665, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 678, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/usr/local/lib/python3.6/dist-packages/app/api/models.py\", line 3, in <module>\r\n from auto_labeling_pipeline.models import RequestModelFactory\r\nModuleNotFoundError: No module named 'auto_labeling_pipeline'\r\n```\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment.-->\r\n* Operating System: Ubuntu 18.04.5 LTS\r\n* Python Version Used: 3.6.9\r\n* When you install doccano: 02/19/21 7:40 AM GMT\r\n* How did you install doccano (Heroku button etc): `pip install doccano`\r\n* Doccano version: 1.2.0\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nNAME = 'doccano'\nDESCRIPTION = 'doccano, text annotation tool for machine learning practitioners'\nURL = 'https://github.com/doccano/doccano'\nEMAIL = '[email protected]'\nAUTHOR = 'Hironsan'\nLICENSE = 'MIT'\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\n\nrequired = [\n 'apache-libcloud>=3.2.0',\n 'colour>=0.1.5',\n 'conllu>=4.2.2',\n 'dj-database-url>=0.5.0',\n 'django-cors-headers>=3.5.0',\n 'django-filter>=2.4.0',\n 'django-rest-polymorphic>=0.1.9',\n 'djangorestframework-csv>=2.1.0',\n 'djangorestframework-xml>=2.0.0',\n 'drf-yasg>=1.20.0',\n 'environs>=9.2.0',\n 'furl>=2.1.0',\n 'pyexcel>=0.6.6',\n 'pyexcel-xlsx>=0.6.0',\n 'python-jose>=3.2.0',\n 'seqeval>=1.2.2',\n 'social-auth-app-django>=4.0.0',\n 'whitenoise>=5.2.0'\n]\n\nsetup(\n name=NAME,\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n packages=find_packages(exclude=('*.tests',)),\n entry_points={\n 'console_scripts': [\n 'doccano = app.doccano.doccano:main'\n ]\n },\n install_requires=required,\n extras_require={\n 'postgresql': ['psycopg2-binary>=2.8.6'],\n 'mssql': ['django-mssql-backend>=2.8.1'],\n },\n include_package_data=True,\n license=LICENSE,\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy'\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nNAME = 'doccano'\nDESCRIPTION = 'doccano, text annotation tool for machine learning practitioners'\nURL = 'https://github.com/doccano/doccano'\nEMAIL = '[email protected]'\nAUTHOR = 'Hironsan'\nLICENSE = 'MIT'\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = '\\n' + f.read()\n\nrequired = [\n 'apache-libcloud>=3.2.0',\n 'colour>=0.1.5',\n 'conllu>=4.2.2',\n 'dj-database-url>=0.5.0',\n 'django-cors-headers>=3.5.0',\n 'django-filter>=2.4.0',\n 'django-rest-polymorphic>=0.1.9',\n 'djangorestframework-csv>=2.1.0',\n 'djangorestframework-xml>=2.0.0',\n 'drf-yasg>=1.20.0',\n 'environs>=9.2.0',\n 'furl>=2.1.0',\n 'pyexcel>=0.6.6',\n 'pyexcel-xlsx>=0.6.0',\n 'python-jose>=3.2.0',\n 'seqeval>=1.2.2',\n 'social-auth-app-django>=4.0.0',\n 'whitenoise>=5.2.0',\n 'auto-labeling-pipeline>=0.1.12'\n]\n\nsetup(\n name=NAME,\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=AUTHOR,\n author_email=EMAIL,\n url=URL,\n packages=find_packages(exclude=('*.tests',)),\n entry_points={\n 'console_scripts': [\n 'doccano = app.doccano.doccano:main'\n ]\n },\n install_requires=required,\n extras_require={\n 'postgresql': ['psycopg2-binary>=2.8.6'],\n 'mssql': ['django-mssql-backend>=2.8.1'],\n },\n include_package_data=True,\n license=LICENSE,\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy'\n ],\n)\n", "path": "setup.py"}]} | 3,659 | 117 |
gh_patches_debug_64631 | rasdani/github-patches | git_diff | quantumlib__Cirq-2370 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Escape unicode characters when producing latex
Currently, the qcircuit to latex code passes through unicode characters like ↺ ↻. LaTeX may not be happy about this. We should replace the unicode characters with some that LaTeX is happy with. Or perhaps throw an error?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/contrib/qcircuit/qcircuit_pdf.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # coverage: ignore
16
17 import errno
18 import os
19
20 from pylatex import Document, NoEscape, Package
21
22 from cirq import circuits
23 from cirq.contrib.qcircuit.qcircuit_diagram import (
24 circuit_to_latex_using_qcircuit)
25
26
27 def circuit_to_pdf_using_qcircuit_via_tex(circuit: circuits.Circuit,
28 filepath: str,
29 pdf_kwargs=None,
30 qcircuit_kwargs=None,
31 clean_ext=('dvi', 'ps'),
32 documentclass='article'):
33 """Compiles the QCircuit-based latex diagram of the given circuit.
34
35 Args:
36 circuit: The circuit to produce a pdf of.
37 filepath: Where to output the pdf.
38 pdf_kwargs: The arguments to pass to generate_pdf.
39 qcircuit_kwargs: The arguments to pass to
40 circuit_to_latex_using_qcircuit.
41 clean_ext: The file extensions to clean up after compilation. By
42 default, latexmk is used with the '-pdfps' flag, which produces
43 intermediary dvi and ps files.
44 documentclass: The documentclass of the latex file.
45 """
46 pdf_kwargs = {'compiler': 'latexmk', 'compiler_args': ['-pdfps'],
47 **({} if pdf_kwargs is None else pdf_kwargs)}
48 qcircuit_kwargs = {} if qcircuit_kwargs is None else qcircuit_kwargs
49 tex = circuit_to_latex_using_qcircuit(circuit, **qcircuit_kwargs)
50 doc = Document(documentclass=documentclass, document_options='dvips')
51 doc.packages.append(Package('amsmath'))
52 doc.packages.append(Package('qcircuit'))
53 doc.append(NoEscape(tex))
54 doc.generate_pdf(filepath, **pdf_kwargs)
55 for ext in clean_ext:
56 try:
57 os.remove(filepath + '.' + ext)
58 except (OSError, IOError) as e:
59 if e.errno != errno.ENOENT:
60 raise
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq/contrib/qcircuit/qcircuit_pdf.py b/cirq/contrib/qcircuit/qcircuit_pdf.py
--- a/cirq/contrib/qcircuit/qcircuit_pdf.py
+++ b/cirq/contrib/qcircuit/qcircuit_pdf.py
@@ -50,6 +50,7 @@
doc = Document(documentclass=documentclass, document_options='dvips')
doc.packages.append(Package('amsmath'))
doc.packages.append(Package('qcircuit'))
+ doc.preamble.append(Package('inputenc', options=['utf8']))
doc.append(NoEscape(tex))
doc.generate_pdf(filepath, **pdf_kwargs)
for ext in clean_ext:
| {"golden_diff": "diff --git a/cirq/contrib/qcircuit/qcircuit_pdf.py b/cirq/contrib/qcircuit/qcircuit_pdf.py\n--- a/cirq/contrib/qcircuit/qcircuit_pdf.py\n+++ b/cirq/contrib/qcircuit/qcircuit_pdf.py\n@@ -50,6 +50,7 @@\n doc = Document(documentclass=documentclass, document_options='dvips')\n doc.packages.append(Package('amsmath'))\n doc.packages.append(Package('qcircuit'))\n+ doc.preamble.append(Package('inputenc', options=['utf8']))\n doc.append(NoEscape(tex))\n doc.generate_pdf(filepath, **pdf_kwargs)\n for ext in clean_ext:\n", "issue": "Escape unicode characters when producing latex\nCurrently, the qcircuit to latex code passes through unicode characters like \u21ba \u21bb. LaTeX may not be happy about this. We should replace the unicode characters with some that LaTeX is happy with. Or perhaps throw an error?\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coverage: ignore\n\nimport errno\nimport os\n\nfrom pylatex import Document, NoEscape, Package\n\nfrom cirq import circuits\nfrom cirq.contrib.qcircuit.qcircuit_diagram import (\n circuit_to_latex_using_qcircuit)\n\n\ndef circuit_to_pdf_using_qcircuit_via_tex(circuit: circuits.Circuit,\n filepath: str,\n pdf_kwargs=None,\n qcircuit_kwargs=None,\n clean_ext=('dvi', 'ps'),\n documentclass='article'):\n \"\"\"Compiles the QCircuit-based latex diagram of the given circuit.\n\n Args:\n circuit: The circuit to produce a pdf of.\n filepath: Where to output the pdf.\n pdf_kwargs: The arguments to pass to generate_pdf.\n qcircuit_kwargs: The arguments to pass to\n circuit_to_latex_using_qcircuit.\n clean_ext: The file extensions to clean up after compilation. By\n default, latexmk is used with the '-pdfps' flag, which produces\n intermediary dvi and ps files.\n documentclass: The documentclass of the latex file.\n \"\"\"\n pdf_kwargs = {'compiler': 'latexmk', 'compiler_args': ['-pdfps'],\n **({} if pdf_kwargs is None else pdf_kwargs)}\n qcircuit_kwargs = {} if qcircuit_kwargs is None else qcircuit_kwargs\n tex = circuit_to_latex_using_qcircuit(circuit, **qcircuit_kwargs)\n doc = Document(documentclass=documentclass, document_options='dvips')\n doc.packages.append(Package('amsmath'))\n doc.packages.append(Package('qcircuit'))\n doc.append(NoEscape(tex))\n doc.generate_pdf(filepath, **pdf_kwargs)\n for ext in clean_ext:\n try:\n os.remove(filepath + '.' + ext)\n except (OSError, IOError) as e:\n if e.errno != errno.ENOENT:\n raise\n", "path": "cirq/contrib/qcircuit/qcircuit_pdf.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coverage: ignore\n\nimport errno\nimport os\n\nfrom pylatex import Document, NoEscape, Package\n\nfrom cirq import circuits\nfrom cirq.contrib.qcircuit.qcircuit_diagram import (\n circuit_to_latex_using_qcircuit)\n\n\ndef circuit_to_pdf_using_qcircuit_via_tex(circuit: circuits.Circuit,\n filepath: str,\n pdf_kwargs=None,\n qcircuit_kwargs=None,\n clean_ext=('dvi', 'ps'),\n documentclass='article'):\n \"\"\"Compiles the QCircuit-based latex diagram of the given circuit.\n\n Args:\n circuit: The circuit to produce a pdf of.\n filepath: Where to output the pdf.\n pdf_kwargs: The arguments to pass to generate_pdf.\n qcircuit_kwargs: The arguments to pass to\n circuit_to_latex_using_qcircuit.\n clean_ext: The file extensions to clean up after compilation. By\n default, latexmk is used with the '-pdfps' flag, which produces\n intermediary dvi and ps files.\n documentclass: The documentclass of the latex file.\n \"\"\"\n pdf_kwargs = {'compiler': 'latexmk', 'compiler_args': ['-pdfps'],\n **({} if pdf_kwargs is None else pdf_kwargs)}\n qcircuit_kwargs = {} if qcircuit_kwargs is None else qcircuit_kwargs\n tex = circuit_to_latex_using_qcircuit(circuit, **qcircuit_kwargs)\n doc = Document(documentclass=documentclass, document_options='dvips')\n doc.packages.append(Package('amsmath'))\n doc.packages.append(Package('qcircuit'))\n doc.preamble.append(Package('inputenc', options=['utf8']))\n doc.append(NoEscape(tex))\n doc.generate_pdf(filepath, **pdf_kwargs)\n for ext in clean_ext:\n try:\n os.remove(filepath + '.' + ext)\n except (OSError, IOError) as e:\n if e.errno != errno.ENOENT:\n raise\n", "path": "cirq/contrib/qcircuit/qcircuit_pdf.py"}]} | 967 | 152 |
gh_patches_debug_38801 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9083 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Returning index instead of boolean when knuth_morris_pratt matches.
In [knuth morris pratt algorithm](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py), I think it's better to return the "index" of the matched substring in case of success, and `-1` in case of failure, so that it would be consistent with the `str.find()` in Python. `True` and `False` aren't that helpful.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strings/knuth_morris_pratt.py`
Content:
```
1 from __future__ import annotations
2
3
4 def kmp(pattern: str, text: str) -> bool:
5 """
6 The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text
7 with complexity O(n + m)
8
9 1) Preprocess pattern to identify any suffixes that are identical to prefixes
10
11 This tells us where to continue from if we get a mismatch between a character
12 in our pattern and the text.
13
14 2) Step through the text one character at a time and compare it to a character in
15 the pattern updating our location within the pattern if necessary
16
17 """
18
19 # 1) Construct the failure array
20 failure = get_failure_array(pattern)
21
22 # 2) Step through text searching for pattern
23 i, j = 0, 0 # index into text, pattern
24 while i < len(text):
25 if pattern[j] == text[i]:
26 if j == (len(pattern) - 1):
27 return True
28 j += 1
29
30 # if this is a prefix in our pattern
31 # just go back far enough to continue
32 elif j > 0:
33 j = failure[j - 1]
34 continue
35 i += 1
36 return False
37
38
39 def get_failure_array(pattern: str) -> list[int]:
40 """
41 Calculates the new index we should go to if we fail a comparison
42 :param pattern:
43 :return:
44 """
45 failure = [0]
46 i = 0
47 j = 1
48 while j < len(pattern):
49 if pattern[i] == pattern[j]:
50 i += 1
51 elif i > 0:
52 i = failure[i - 1]
53 continue
54 j += 1
55 failure.append(i)
56 return failure
57
58
59 if __name__ == "__main__":
60 # Test 1)
61 pattern = "abc1abc12"
62 text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc"
63 text2 = "alskfjaldsk23adsfabcabc"
64 assert kmp(pattern, text1) and not kmp(pattern, text2)
65
66 # Test 2)
67 pattern = "ABABX"
68 text = "ABABZABABYABABX"
69 assert kmp(pattern, text)
70
71 # Test 3)
72 pattern = "AAAB"
73 text = "ABAAAAAB"
74 assert kmp(pattern, text)
75
76 # Test 4)
77 pattern = "abcdabcy"
78 text = "abcxabcdabxabcdabcdabcy"
79 assert kmp(pattern, text)
80
81 # Test 5)
82 pattern = "aabaabaaa"
83 assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strings/knuth_morris_pratt.py b/strings/knuth_morris_pratt.py
--- a/strings/knuth_morris_pratt.py
+++ b/strings/knuth_morris_pratt.py
@@ -1,7 +1,7 @@
from __future__ import annotations
-def kmp(pattern: str, text: str) -> bool:
+def knuth_morris_pratt(text: str, pattern: str) -> int:
"""
The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text
with complexity O(n + m)
@@ -14,6 +14,12 @@
2) Step through the text one character at a time and compare it to a character in
the pattern updating our location within the pattern if necessary
+ >>> kmp = "knuth_morris_pratt"
+ >>> all(
+ ... knuth_morris_pratt(kmp, s) == kmp.find(s)
+ ... for s in ("kn", "h_m", "rr", "tt", "not there")
+ ... )
+ True
"""
# 1) Construct the failure array
@@ -24,7 +30,7 @@
while i < len(text):
if pattern[j] == text[i]:
if j == (len(pattern) - 1):
- return True
+ return i - j
j += 1
# if this is a prefix in our pattern
@@ -33,7 +39,7 @@
j = failure[j - 1]
continue
i += 1
- return False
+ return -1
def get_failure_array(pattern: str) -> list[int]:
@@ -57,27 +63,38 @@
if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
+
# Test 1)
pattern = "abc1abc12"
text1 = "alskfjaldsabc1abc1abc12k23adsfabcabc"
text2 = "alskfjaldsk23adsfabcabc"
- assert kmp(pattern, text1) and not kmp(pattern, text2)
+ assert knuth_morris_pratt(text1, pattern) and knuth_morris_pratt(text2, pattern)
# Test 2)
pattern = "ABABX"
text = "ABABZABABYABABX"
- assert kmp(pattern, text)
+ assert knuth_morris_pratt(text, pattern)
# Test 3)
pattern = "AAAB"
text = "ABAAAAAB"
- assert kmp(pattern, text)
+ assert knuth_morris_pratt(text, pattern)
# Test 4)
pattern = "abcdabcy"
text = "abcxabcdabxabcdabcdabcy"
- assert kmp(pattern, text)
+ assert knuth_morris_pratt(text, pattern)
+
+ # Test 5) -> Doctests
+ kmp = "knuth_morris_pratt"
+ assert all(
+ knuth_morris_pratt(kmp, s) == kmp.find(s)
+ for s in ("kn", "h_m", "rr", "tt", "not there")
+ )
- # Test 5)
+ # Test 6)
pattern = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| {"golden_diff": "diff --git a/strings/knuth_morris_pratt.py b/strings/knuth_morris_pratt.py\n--- a/strings/knuth_morris_pratt.py\n+++ b/strings/knuth_morris_pratt.py\n@@ -1,7 +1,7 @@\n from __future__ import annotations\n \n \n-def kmp(pattern: str, text: str) -> bool:\n+def knuth_morris_pratt(text: str, pattern: str) -> int:\n \"\"\"\n The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text\n with complexity O(n + m)\n@@ -14,6 +14,12 @@\n 2) Step through the text one character at a time and compare it to a character in\n the pattern updating our location within the pattern if necessary\n \n+ >>> kmp = \"knuth_morris_pratt\"\n+ >>> all(\n+ ... knuth_morris_pratt(kmp, s) == kmp.find(s)\n+ ... for s in (\"kn\", \"h_m\", \"rr\", \"tt\", \"not there\")\n+ ... )\n+ True\n \"\"\"\n \n # 1) Construct the failure array\n@@ -24,7 +30,7 @@\n while i < len(text):\n if pattern[j] == text[i]:\n if j == (len(pattern) - 1):\n- return True\n+ return i - j\n j += 1\n \n # if this is a prefix in our pattern\n@@ -33,7 +39,7 @@\n j = failure[j - 1]\n continue\n i += 1\n- return False\n+ return -1\n \n \n def get_failure_array(pattern: str) -> list[int]:\n@@ -57,27 +63,38 @@\n \n \n if __name__ == \"__main__\":\n+ import doctest\n+\n+ doctest.testmod()\n+\n # Test 1)\n pattern = \"abc1abc12\"\n text1 = \"alskfjaldsabc1abc1abc12k23adsfabcabc\"\n text2 = \"alskfjaldsk23adsfabcabc\"\n- assert kmp(pattern, text1) and not kmp(pattern, text2)\n+ assert knuth_morris_pratt(text1, pattern) and knuth_morris_pratt(text2, pattern)\n \n # Test 2)\n pattern = \"ABABX\"\n text = \"ABABZABABYABABX\"\n- assert kmp(pattern, text)\n+ assert knuth_morris_pratt(text, pattern)\n \n # Test 3)\n pattern = \"AAAB\"\n text = \"ABAAAAAB\"\n- assert kmp(pattern, text)\n+ assert knuth_morris_pratt(text, pattern)\n \n # Test 4)\n pattern = \"abcdabcy\"\n text = \"abcxabcdabxabcdabcdabcy\"\n- assert kmp(pattern, text)\n+ assert knuth_morris_pratt(text, pattern)\n+\n+ # Test 5) -> Doctests\n+ kmp = \"knuth_morris_pratt\"\n+ assert all(\n+ knuth_morris_pratt(kmp, s) == kmp.find(s)\n+ for s in (\"kn\", \"h_m\", \"rr\", \"tt\", \"not there\")\n+ )\n \n- # Test 5)\n+ # Test 6)\n pattern = \"aabaabaaa\"\n assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]\n", "issue": "Returning index instead of boolean when knuth_morris_pratt matches.\nIn [knuth morris pratt algorithm](https://github.com/TheAlgorithms/Python/blob/master/strings/knuth_morris_pratt.py), I think it's better to return the \"index\" of the matched substring in case of success, and `-1` in case of failure, so that it would be consistent with the `str.find()` in Python. `True` and `False` aren't that helpful.\n", "before_files": [{"content": "from __future__ import annotations\n\n\ndef kmp(pattern: str, text: str) -> bool:\n \"\"\"\n The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text\n with complexity O(n + m)\n\n 1) Preprocess pattern to identify any suffixes that are identical to prefixes\n\n This tells us where to continue from if we get a mismatch between a character\n in our pattern and the text.\n\n 2) Step through the text one character at a time and compare it to a character in\n the pattern updating our location within the pattern if necessary\n\n \"\"\"\n\n # 1) Construct the failure array\n failure = get_failure_array(pattern)\n\n # 2) Step through text searching for pattern\n i, j = 0, 0 # index into text, pattern\n while i < len(text):\n if pattern[j] == text[i]:\n if j == (len(pattern) - 1):\n return True\n j += 1\n\n # if this is a prefix in our pattern\n # just go back far enough to continue\n elif j > 0:\n j = failure[j - 1]\n continue\n i += 1\n return False\n\n\ndef get_failure_array(pattern: str) -> list[int]:\n \"\"\"\n Calculates the new index we should go to if we fail a comparison\n :param pattern:\n :return:\n \"\"\"\n failure = [0]\n i = 0\n j = 1\n while j < len(pattern):\n if pattern[i] == pattern[j]:\n i += 1\n elif i > 0:\n i = failure[i - 1]\n continue\n j += 1\n failure.append(i)\n return failure\n\n\nif __name__ == \"__main__\":\n # Test 1)\n pattern = \"abc1abc12\"\n text1 = \"alskfjaldsabc1abc1abc12k23adsfabcabc\"\n text2 = \"alskfjaldsk23adsfabcabc\"\n assert kmp(pattern, text1) and not kmp(pattern, text2)\n\n # Test 2)\n pattern = \"ABABX\"\n text = \"ABABZABABYABABX\"\n assert kmp(pattern, text)\n\n # Test 3)\n pattern = \"AAAB\"\n text = \"ABAAAAAB\"\n assert kmp(pattern, text)\n\n # Test 4)\n pattern = \"abcdabcy\"\n text = \"abcxabcdabxabcdabcdabcy\"\n assert kmp(pattern, text)\n\n # Test 5)\n pattern = \"aabaabaaa\"\n assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]\n", "path": "strings/knuth_morris_pratt.py"}], "after_files": [{"content": "from __future__ import annotations\n\n\ndef knuth_morris_pratt(text: str, pattern: str) -> int:\n \"\"\"\n The Knuth-Morris-Pratt Algorithm for finding a pattern within a piece of text\n with complexity O(n + m)\n\n 1) Preprocess pattern to identify any suffixes that are identical to prefixes\n\n This tells us where to continue from if we get a mismatch between a character\n in our pattern and the text.\n\n 2) Step through the text one character at a time and compare it to a character in\n the pattern updating our location within the pattern if necessary\n\n >>> kmp = \"knuth_morris_pratt\"\n >>> all(\n ... knuth_morris_pratt(kmp, s) == kmp.find(s)\n ... for s in (\"kn\", \"h_m\", \"rr\", \"tt\", \"not there\")\n ... )\n True\n \"\"\"\n\n # 1) Construct the failure array\n failure = get_failure_array(pattern)\n\n # 2) Step through text searching for pattern\n i, j = 0, 0 # index into text, pattern\n while i < len(text):\n if pattern[j] == text[i]:\n if j == (len(pattern) - 1):\n return i - j\n j += 1\n\n # if this is a prefix in our pattern\n # just go back far enough to continue\n elif j > 0:\n j = failure[j - 1]\n continue\n i += 1\n return -1\n\n\ndef get_failure_array(pattern: str) -> list[int]:\n \"\"\"\n Calculates the new index we should go to if we fail a comparison\n :param pattern:\n :return:\n \"\"\"\n failure = [0]\n i = 0\n j = 1\n while j < len(pattern):\n if pattern[i] == pattern[j]:\n i += 1\n elif i > 0:\n i = failure[i - 1]\n continue\n j += 1\n failure.append(i)\n return failure\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n\n # Test 1)\n pattern = \"abc1abc12\"\n text1 = \"alskfjaldsabc1abc1abc12k23adsfabcabc\"\n text2 = \"alskfjaldsk23adsfabcabc\"\n assert knuth_morris_pratt(text1, pattern) and knuth_morris_pratt(text2, pattern)\n\n # Test 2)\n pattern = \"ABABX\"\n text = \"ABABZABABYABABX\"\n assert knuth_morris_pratt(text, pattern)\n\n # Test 3)\n pattern = \"AAAB\"\n text = \"ABAAAAAB\"\n assert knuth_morris_pratt(text, pattern)\n\n # Test 4)\n pattern = \"abcdabcy\"\n text = \"abcxabcdabxabcdabcdabcy\"\n assert knuth_morris_pratt(text, pattern)\n\n # Test 5) -> Doctests\n kmp = \"knuth_morris_pratt\"\n assert all(\n knuth_morris_pratt(kmp, s) == kmp.find(s)\n for s in (\"kn\", \"h_m\", \"rr\", \"tt\", \"not there\")\n )\n\n # Test 6)\n pattern = \"aabaabaaa\"\n assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]\n", "path": "strings/knuth_morris_pratt.py"}]} | 1,155 | 808 |
gh_patches_debug_19094 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4875 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_AWS_300 S3 MPU lifecycle alert
**Issue**
Alert ID - CKV_AWS_300
Checkov sends false alert for MPU "aws_s3_bucket_lifecycle_configuration" despite having the correct rule configuration. In my scenario I'm having an empty filter in my terraform code. But as per logic in checkov code base, (checkov/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py) under scan_resource_conf method, the 'if' condition doesn't take the 'filter' condition into account. This in-turn throws a false alert.
**Examples**
```hcl
resource "aws_s3_bucket_lifecycle_configuration" "lifecycle_rule" {
bucket = aws_s3_bucket.s3.id
rule {
abort_incomplete_multipart_upload {
days_after_initiation = 3
}
id = "MPU 3days"
filter {
}
status = "Enabled"
}
}
```
**Version (please complete the following information):**
- Checkov 2.3.155 (Latest)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any
4
5 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
6 from checkov.common.models.enums import CheckCategories, CheckResult
7
8
9 class S3AbortIncompleteUploads(BaseResourceCheck):
10 def __init__(self) -> None:
11 """
12 If you don't set this value in a lifecycle configuration you'll end up paying for s3
13 resources you never could use
14 """
15 name = "Ensure S3 lifecycle configuration sets period for aborting failed uploads"
16 id = "CKV_AWS_300"
17 supported_resources = ("aws_s3_bucket_lifecycle_configuration",)
18 categories = (CheckCategories.GENERAL_SECURITY,)
19 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
20
21 def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
22 self.evaluated_keys = ["rule"]
23 rules = conf.get("rule")
24 if rules and isinstance(rules, list):
25 for idx_rule, rule in enumerate(rules):
26 if (
27 rule.get("abort_incomplete_multipart_upload")
28 and rule.get("status") == ["Enabled"]
29 and not rule.get("filter")
30 ):
31 self.evaluated_keys = [f"rule/[{idx_rule}]/abort_incomplete_multipart_upload"]
32 return CheckResult.PASSED
33
34 return CheckResult.FAILED
35
36
37 check = S3AbortIncompleteUploads()
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py
--- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py
+++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py
@@ -23,12 +23,13 @@
rules = conf.get("rule")
if rules and isinstance(rules, list):
for idx_rule, rule in enumerate(rules):
- if (
- rule.get("abort_incomplete_multipart_upload")
- and rule.get("status") == ["Enabled"]
- and not rule.get("filter")
- ):
+ if rule.get("abort_incomplete_multipart_upload") and rule.get("status") == ["Enabled"]:
self.evaluated_keys = [f"rule/[{idx_rule}]/abort_incomplete_multipart_upload"]
+ filter = rule.get("filter")
+ if filter and isinstance(filter, list) and filter[0]:
+ # it is possible to set an empty filter, which applies then to all objects
+ continue
+
return CheckResult.PASSED
return CheckResult.FAILED
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n--- a/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n+++ b/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py\n@@ -23,12 +23,13 @@\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n- if (\n- rule.get(\"abort_incomplete_multipart_upload\")\n- and rule.get(\"status\") == [\"Enabled\"]\n- and not rule.get(\"filter\")\n- ):\n+ if rule.get(\"abort_incomplete_multipart_upload\") and rule.get(\"status\") == [\"Enabled\"]:\n self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n+ filter = rule.get(\"filter\")\n+ if filter and isinstance(filter, list) and filter[0]:\n+ # it is possible to set an empty filter, which applies then to all objects\n+ continue\n+\n return CheckResult.PASSED\n \n return CheckResult.FAILED\n", "issue": "CKV_AWS_300 S3 MPU lifecycle alert\n**Issue**\r\nAlert ID - CKV_AWS_300 \r\nCheckov sends false alert for MPU \"aws_s3_bucket_lifecycle_configuration\" despite having the correct rule configuration. In my scenario I'm having an empty filter in my terraform code. But as per logic in checkov code base, (checkov/checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py) under scan_resource_conf method, the 'if' condition doesn't take the 'filter' condition into account. This in-turn throws a false alert.\r\n\r\n**Examples**\r\n```hcl\r\nresource \"aws_s3_bucket_lifecycle_configuration\" \"lifecycle_rule\" {\r\n bucket = aws_s3_bucket.s3.id\r\n rule {\r\n abort_incomplete_multipart_upload {\r\n days_after_initiation = 3\r\n }\r\n id = \"MPU 3days\"\r\n filter {\r\n }\r\n status = \"Enabled\"\r\n }\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Checkov 2.3.155 (Latest)\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass S3AbortIncompleteUploads(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n supported_resources = (\"aws_s3_bucket_lifecycle_configuration\",)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n if (\n rule.get(\"abort_incomplete_multipart_upload\")\n and rule.get(\"status\") == [\"Enabled\"]\n and not rule.get(\"filter\")\n ):\n self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = S3AbortIncompleteUploads()\n", "path": "checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\n\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckCategories, CheckResult\n\n\nclass S3AbortIncompleteUploads(BaseResourceCheck):\n def __init__(self) -> None:\n \"\"\"\n If you don't set this value in a lifecycle configuration you'll end up paying for s3\n resources you never could use\n \"\"\"\n name = \"Ensure S3 lifecycle configuration sets period for aborting failed uploads\"\n id = \"CKV_AWS_300\"\n supported_resources = (\"aws_s3_bucket_lifecycle_configuration\",)\n categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n self.evaluated_keys = [\"rule\"]\n rules = conf.get(\"rule\")\n if rules and isinstance(rules, list):\n for idx_rule, rule in enumerate(rules):\n if rule.get(\"abort_incomplete_multipart_upload\") and rule.get(\"status\") == [\"Enabled\"]:\n self.evaluated_keys = [f\"rule/[{idx_rule}]/abort_incomplete_multipart_upload\"]\n filter = rule.get(\"filter\")\n if filter and isinstance(filter, list) and filter[0]:\n # it is possible to set an empty filter, which applies then to all objects\n continue\n\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = S3AbortIncompleteUploads()\n", "path": "checkov/terraform/checks/resource/aws/S3AbortIncompleteUploads.py"}]} | 886 | 263 |
gh_patches_debug_53987 | rasdani/github-patches | git_diff | getredash__redash-784 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'datetime.timedelta' object has no attribute 'isoformat'
On the latest 0.9.2-rc:
```
[2016-01-21 14:30:36,838: ERROR/MainProcess] Task redash.tasks.execute_query[766d3f9f-68a6-4a64-8cd9-b7e4e18bf2af] raised unexpected: AttributeError("'datetime.timedelta' object has no attribute 'isoformat'",)
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 240, in trace_task
R = retval = fun(*args, **kwargs)
File "/opt/redash/redash/tasks.py", line 31, in __call__
return super(BaseTask, self).__call__(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/celery/app/trace.py", line 437, in __protected_call__
return self.run(*args, **kwargs)
File "/opt/redash/redash/tasks.py", line 286, in execute_query
data, error = query_runner.run_query(annotated_query)
File "/opt/redash/redash/query_runner/pg.py", line 132, in run_query
json_data = json.dumps(data, cls=JSONEncoder)
File "/usr/lib/python2.7/json/__init__.py", line 250, in dumps
sort_keys=sort_keys, **kw).encode(obj)
File "/usr/lib/python2.7/json/encoder.py", line 207, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/usr/lib/python2.7/json/encoder.py", line 270, in iterencode
return _iterencode(o, 0)
File "/opt/redash/redash/utils.py", line 57, in default
return o.isoformat()
AttributeError: 'datetime.timedelta' object has no attribute 'isoformat'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/utils.py`
Content:
```
1 import cStringIO
2 import csv
3 import codecs
4 import decimal
5 import datetime
6 import json
7 import random
8 import re
9 import hashlib
10 import pytz
11
12 COMMENTS_REGEX = re.compile("/\*.*?\*/")
13
14
15 def utcnow():
16 """Return datetime.now value with timezone specified.
17
18 Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,
19 which leads to errors in calculations.
20 """
21 return datetime.datetime.now(pytz.utc)
22
23
24 def slugify(s):
25 return re.sub('[^a-z0-9_\-]+', '-', s.lower())
26
27
28 def gen_query_hash(sql):
29 """Returns hash of the given query after stripping all comments, line breaks and multiple
30 spaces, and lower casing all text.
31
32 TODO: possible issue - the following queries will get the same id:
33 1. SELECT 1 FROM table WHERE column='Value';
34 2. SELECT 1 FROM table where column='value';
35 """
36 sql = COMMENTS_REGEX.sub("", sql)
37 sql = "".join(sql.split()).lower()
38 return hashlib.md5(sql.encode('utf-8')).hexdigest()
39
40
41 def generate_token(length):
42 chars = ('abcdefghijklmnopqrstuvwxyz'
43 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
44 '0123456789')
45
46 rand = random.SystemRandom()
47 return ''.join(rand.choice(chars) for x in range(length))
48
49 class JSONEncoder(json.JSONEncoder):
50 """Custom JSON encoding class, to handle Decimal and datetime.date instances.
51 """
52 def default(self, o):
53 if isinstance(o, decimal.Decimal):
54 return float(o)
55
56 if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):
57 return o.isoformat()
58
59 super(JSONEncoder, self).default(o)
60
61
62 def json_dumps(data):
63 return json.dumps(data, cls=JSONEncoder)
64
65
66 def build_url(request, host, path):
67 parts = request.host.split(':')
68 if len(parts) > 1:
69 port = parts[1]
70 if (port, request.scheme) not in (('80', 'http'), ('443', 'https')):
71 host = '{}:{}'.format(host, port)
72
73 return "{}://{}{}".format(request.scheme, host, path)
74
75
76 class UnicodeWriter:
77 """
78 A CSV writer which will write rows to CSV file "f",
79 which is encoded in the given encoding.
80 """
81 def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
82 # Redirect output to a queue
83 self.queue = cStringIO.StringIO()
84 self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
85 self.stream = f
86 self.encoder = codecs.getincrementalencoder(encoding)()
87
88 def _encode_utf8(self, val):
89 if isinstance(val, (unicode, str)):
90 return val.encode('utf-8')
91
92 return val
93
94 def writerow(self, row):
95 self.writer.writerow([self._encode_utf8(s) for s in row])
96 # Fetch UTF-8 output from the queue ...
97 data = self.queue.getvalue()
98 data = data.decode("utf-8")
99 # ... and reencode it into the target encoding
100 data = self.encoder.encode(data)
101 # write to the target stream
102 self.stream.write(data)
103 # empty queue
104 self.queue.truncate(0)
105
106 def writerows(self, rows):
107 for row in rows:
108 self.writerow(row)
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/utils.py b/redash/utils.py
--- a/redash/utils.py
+++ b/redash/utils.py
@@ -53,9 +53,12 @@
if isinstance(o, decimal.Decimal):
return float(o)
- if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):
+ if isinstance(o, (datetime.date, datetime.time)):
return o.isoformat()
-
+
+ if isinstance(o, datetime.timedelta):
+ return str(o)
+
super(JSONEncoder, self).default(o)
| {"golden_diff": "diff --git a/redash/utils.py b/redash/utils.py\n--- a/redash/utils.py\n+++ b/redash/utils.py\n@@ -53,9 +53,12 @@\n if isinstance(o, decimal.Decimal):\n return float(o)\n \n- if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):\n+ if isinstance(o, (datetime.date, datetime.time)):\n return o.isoformat()\n- \n+\n+ if isinstance(o, datetime.timedelta):\n+ return str(o)\n+\n super(JSONEncoder, self).default(o)\n", "issue": "AttributeError: 'datetime.timedelta' object has no attribute 'isoformat'\nOn the latest 0.9.2-rc:\n\n```\n[2016-01-21 14:30:36,838: ERROR/MainProcess] Task redash.tasks.execute_query[766d3f9f-68a6-4a64-8cd9-b7e4e18bf2af] raised unexpected: AttributeError(\"'datetime.timedelta' object has no attribute 'isoformat'\",)\nTraceback (most recent call last):\n File \"/usr/local/lib/python2.7/dist-packages/celery/app/trace.py\", line 240, in trace_task\n R = retval = fun(*args, **kwargs)\n File \"/opt/redash/redash/tasks.py\", line 31, in __call__\n return super(BaseTask, self).__call__(*args, **kwargs)\n File \"/usr/local/lib/python2.7/dist-packages/celery/app/trace.py\", line 437, in __protected_call__\n return self.run(*args, **kwargs)\n File \"/opt/redash/redash/tasks.py\", line 286, in execute_query\n data, error = query_runner.run_query(annotated_query)\n File \"/opt/redash/redash/query_runner/pg.py\", line 132, in run_query\n json_data = json.dumps(data, cls=JSONEncoder)\n File \"/usr/lib/python2.7/json/__init__.py\", line 250, in dumps\n sort_keys=sort_keys, **kw).encode(obj)\n File \"/usr/lib/python2.7/json/encoder.py\", line 207, in encode\n chunks = self.iterencode(o, _one_shot=True)\n File \"/usr/lib/python2.7/json/encoder.py\", line 270, in iterencode\n return _iterencode(o, 0)\n File \"/opt/redash/redash/utils.py\", line 57, in default\n return o.isoformat()\nAttributeError: 'datetime.timedelta' object has no attribute 'isoformat'\n```\n\n", "before_files": [{"content": "import cStringIO\nimport csv\nimport codecs\nimport decimal\nimport datetime\nimport json\nimport random\nimport re\nimport hashlib\nimport pytz\n\nCOMMENTS_REGEX = re.compile(\"/\\*.*?\\*/\")\n\n\ndef utcnow():\n \"\"\"Return datetime.now value with timezone specified.\n\n Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,\n which leads to errors in calculations.\n \"\"\"\n return datetime.datetime.now(pytz.utc)\n\n\ndef slugify(s):\n return re.sub('[^a-z0-9_\\-]+', '-', s.lower())\n\n\ndef gen_query_hash(sql):\n \"\"\"Returns hash of the given query after stripping all comments, line breaks and multiple\n spaces, and lower casing all text.\n\n TODO: possible issue - the following queries will get the same id:\n 1. SELECT 1 FROM table WHERE column='Value';\n 2. SELECT 1 FROM table where column='value';\n \"\"\"\n sql = COMMENTS_REGEX.sub(\"\", sql)\n sql = \"\".join(sql.split()).lower()\n return hashlib.md5(sql.encode('utf-8')).hexdigest()\n\n\ndef generate_token(length):\n chars = ('abcdefghijklmnopqrstuvwxyz'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n '0123456789')\n\n rand = random.SystemRandom()\n return ''.join(rand.choice(chars) for x in range(length))\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"Custom JSON encoding class, to handle Decimal and datetime.date instances.\n \"\"\"\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n\n if isinstance(o, (datetime.date, datetime.time, datetime.timedelta)):\n return o.isoformat()\n \n super(JSONEncoder, self).default(o)\n\n\ndef json_dumps(data):\n return json.dumps(data, cls=JSONEncoder)\n\n\ndef build_url(request, host, path):\n parts = request.host.split(':')\n if len(parts) > 1:\n port = parts[1]\n if (port, request.scheme) not in (('80', 'http'), ('443', 'https')):\n host = '{}:{}'.format(host, port)\n\n return \"{}://{}{}\".format(request.scheme, host, path)\n\n\nclass UnicodeWriter:\n \"\"\"\n A CSV writer which will write rows to CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = cStringIO.StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n\n def _encode_utf8(self, val):\n if isinstance(val, (unicode, str)):\n return val.encode('utf-8')\n\n return val\n\n def writerow(self, row):\n self.writer.writerow([self._encode_utf8(s) for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n", "path": "redash/utils.py"}], "after_files": [{"content": "import cStringIO\nimport csv\nimport codecs\nimport decimal\nimport datetime\nimport json\nimport random\nimport re\nimport hashlib\nimport pytz\n\nCOMMENTS_REGEX = re.compile(\"/\\*.*?\\*/\")\n\n\ndef utcnow():\n \"\"\"Return datetime.now value with timezone specified.\n\n Without the timezone data, when the timestamp stored to the database it gets the current timezone of the server,\n which leads to errors in calculations.\n \"\"\"\n return datetime.datetime.now(pytz.utc)\n\n\ndef slugify(s):\n return re.sub('[^a-z0-9_\\-]+', '-', s.lower())\n\n\ndef gen_query_hash(sql):\n \"\"\"Returns hash of the given query after stripping all comments, line breaks and multiple\n spaces, and lower casing all text.\n\n TODO: possible issue - the following queries will get the same id:\n 1. SELECT 1 FROM table WHERE column='Value';\n 2. SELECT 1 FROM table where column='value';\n \"\"\"\n sql = COMMENTS_REGEX.sub(\"\", sql)\n sql = \"\".join(sql.split()).lower()\n return hashlib.md5(sql.encode('utf-8')).hexdigest()\n\n\ndef generate_token(length):\n chars = ('abcdefghijklmnopqrstuvwxyz'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n '0123456789')\n\n rand = random.SystemRandom()\n return ''.join(rand.choice(chars) for x in range(length))\n\nclass JSONEncoder(json.JSONEncoder):\n \"\"\"Custom JSON encoding class, to handle Decimal and datetime.date instances.\n \"\"\"\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n\n if isinstance(o, (datetime.date, datetime.time)):\n return o.isoformat()\n\n if isinstance(o, datetime.timedelta):\n return str(o)\n\n super(JSONEncoder, self).default(o)\n\n\ndef json_dumps(data):\n return json.dumps(data, cls=JSONEncoder)\n\n\ndef build_url(request, host, path):\n parts = request.host.split(':')\n if len(parts) > 1:\n port = parts[1]\n if (port, request.scheme) not in (('80', 'http'), ('443', 'https')):\n host = '{}:{}'.format(host, port)\n\n return \"{}://{}{}\".format(request.scheme, host, path)\n\n\nclass UnicodeWriter:\n \"\"\"\n A CSV writer which will write rows to CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = cStringIO.StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getincrementalencoder(encoding)()\n\n def _encode_utf8(self, val):\n if isinstance(val, (unicode, str)):\n return val.encode('utf-8')\n\n return val\n\n def writerow(self, row):\n self.writer.writerow([self._encode_utf8(s) for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder.encode(data)\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n", "path": "redash/utils.py"}]} | 1,685 | 119 |
gh_patches_debug_56069 | rasdani/github-patches | git_diff | pypa__pip-9636 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The --target option clashes with other command line flags and config files
The ``--target`` option clashes with several other command like flags and config files like ``--user`` and distutils setup. Ideally we should handle this far more gracefully.
See also #3826, #4106, #562, #4139
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/utils/setuptools_build.py`
Content:
```
1 import sys
2 from typing import List, Optional, Sequence
3
4 # Shim to wrap setup.py invocation with setuptools
5 #
6 # We set sys.argv[0] to the path to the underlying setup.py file so
7 # setuptools / distutils don't take the path to the setup.py to be "-c" when
8 # invoking via the shim. This avoids e.g. the following manifest_maker
9 # warning: "warning: manifest_maker: standard file '-c' not found".
10 _SETUPTOOLS_SHIM = (
11 "import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};"
12 "f=getattr(tokenize, 'open', open)(__file__);"
13 "code=f.read().replace('\\r\\n', '\\n');"
14 "f.close();"
15 "exec(compile(code, __file__, 'exec'))"
16 )
17
18
19 def make_setuptools_shim_args(
20 setup_py_path, # type: str
21 global_options=None, # type: Sequence[str]
22 no_user_config=False, # type: bool
23 unbuffered_output=False # type: bool
24 ):
25 # type: (...) -> List[str]
26 """
27 Get setuptools command arguments with shim wrapped setup file invocation.
28
29 :param setup_py_path: The path to setup.py to be wrapped.
30 :param global_options: Additional global options.
31 :param no_user_config: If True, disables personal user configuration.
32 :param unbuffered_output: If True, adds the unbuffered switch to the
33 argument list.
34 """
35 args = [sys.executable]
36 if unbuffered_output:
37 args += ["-u"]
38 args += ["-c", _SETUPTOOLS_SHIM.format(setup_py_path)]
39 if global_options:
40 args += global_options
41 if no_user_config:
42 args += ["--no-user-cfg"]
43 return args
44
45
46 def make_setuptools_bdist_wheel_args(
47 setup_py_path, # type: str
48 global_options, # type: Sequence[str]
49 build_options, # type: Sequence[str]
50 destination_dir, # type: str
51 ):
52 # type: (...) -> List[str]
53 # NOTE: Eventually, we'd want to also -S to the flags here, when we're
54 # isolating. Currently, it breaks Python in virtualenvs, because it
55 # relies on site.py to find parts of the standard library outside the
56 # virtualenv.
57 args = make_setuptools_shim_args(
58 setup_py_path,
59 global_options=global_options,
60 unbuffered_output=True
61 )
62 args += ["bdist_wheel", "-d", destination_dir]
63 args += build_options
64 return args
65
66
67 def make_setuptools_clean_args(
68 setup_py_path, # type: str
69 global_options, # type: Sequence[str]
70 ):
71 # type: (...) -> List[str]
72 args = make_setuptools_shim_args(
73 setup_py_path,
74 global_options=global_options,
75 unbuffered_output=True
76 )
77 args += ["clean", "--all"]
78 return args
79
80
81 def make_setuptools_develop_args(
82 setup_py_path, # type: str
83 global_options, # type: Sequence[str]
84 install_options, # type: Sequence[str]
85 no_user_config, # type: bool
86 prefix, # type: Optional[str]
87 home, # type: Optional[str]
88 use_user_site, # type: bool
89 ):
90 # type: (...) -> List[str]
91 assert not (use_user_site and prefix)
92
93 args = make_setuptools_shim_args(
94 setup_py_path,
95 global_options=global_options,
96 no_user_config=no_user_config,
97 )
98
99 args += ["develop", "--no-deps"]
100
101 args += install_options
102
103 if prefix:
104 args += ["--prefix", prefix]
105 if home is not None:
106 args += ["--home", home]
107
108 if use_user_site:
109 args += ["--user", "--prefix="]
110
111 return args
112
113
114 def make_setuptools_egg_info_args(
115 setup_py_path, # type: str
116 egg_info_dir, # type: Optional[str]
117 no_user_config, # type: bool
118 ):
119 # type: (...) -> List[str]
120 args = make_setuptools_shim_args(
121 setup_py_path, no_user_config=no_user_config
122 )
123
124 args += ["egg_info"]
125
126 if egg_info_dir:
127 args += ["--egg-base", egg_info_dir]
128
129 return args
130
131
132 def make_setuptools_install_args(
133 setup_py_path, # type: str
134 global_options, # type: Sequence[str]
135 install_options, # type: Sequence[str]
136 record_filename, # type: str
137 root, # type: Optional[str]
138 prefix, # type: Optional[str]
139 header_dir, # type: Optional[str]
140 home, # type: Optional[str]
141 use_user_site, # type: bool
142 no_user_config, # type: bool
143 pycompile # type: bool
144 ):
145 # type: (...) -> List[str]
146 assert not (use_user_site and prefix)
147 assert not (use_user_site and root)
148
149 args = make_setuptools_shim_args(
150 setup_py_path,
151 global_options=global_options,
152 no_user_config=no_user_config,
153 unbuffered_output=True
154 )
155 args += ["install", "--record", record_filename]
156 args += ["--single-version-externally-managed"]
157
158 if root is not None:
159 args += ["--root", root]
160 if prefix is not None:
161 args += ["--prefix", prefix]
162 if home is not None:
163 args += ["--home", home]
164 if use_user_site:
165 args += ["--user", "--prefix="]
166
167 if pycompile:
168 args += ["--compile"]
169 else:
170 args += ["--no-compile"]
171
172 if header_dir:
173 args += ["--install-headers", header_dir]
174
175 args += install_options
176
177 return args
178
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/utils/setuptools_build.py b/src/pip/_internal/utils/setuptools_build.py
--- a/src/pip/_internal/utils/setuptools_build.py
+++ b/src/pip/_internal/utils/setuptools_build.py
@@ -103,7 +103,7 @@
if prefix:
args += ["--prefix", prefix]
if home is not None:
- args += ["--home", home]
+ args += ["--install-dir", home]
if use_user_site:
args += ["--user", "--prefix="]
| {"golden_diff": "diff --git a/src/pip/_internal/utils/setuptools_build.py b/src/pip/_internal/utils/setuptools_build.py\n--- a/src/pip/_internal/utils/setuptools_build.py\n+++ b/src/pip/_internal/utils/setuptools_build.py\n@@ -103,7 +103,7 @@\n if prefix:\n args += [\"--prefix\", prefix]\n if home is not None:\n- args += [\"--home\", home]\n+ args += [\"--install-dir\", home]\n \n if use_user_site:\n args += [\"--user\", \"--prefix=\"]\n", "issue": "The --target option clashes with other command line flags and config files\nThe ``--target`` option clashes with several other command like flags and config files like ``--user`` and distutils setup. Ideally we should handle this far more gracefully.\r\n\r\nSee also #3826, #4106, #562, #4139\n", "before_files": [{"content": "import sys\nfrom typing import List, Optional, Sequence\n\n# Shim to wrap setup.py invocation with setuptools\n#\n# We set sys.argv[0] to the path to the underlying setup.py file so\n# setuptools / distutils don't take the path to the setup.py to be \"-c\" when\n# invoking via the shim. This avoids e.g. the following manifest_maker\n# warning: \"warning: manifest_maker: standard file '-c' not found\".\n_SETUPTOOLS_SHIM = (\n \"import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};\"\n \"f=getattr(tokenize, 'open', open)(__file__);\"\n \"code=f.read().replace('\\\\r\\\\n', '\\\\n');\"\n \"f.close();\"\n \"exec(compile(code, __file__, 'exec'))\"\n)\n\n\ndef make_setuptools_shim_args(\n setup_py_path, # type: str\n global_options=None, # type: Sequence[str]\n no_user_config=False, # type: bool\n unbuffered_output=False # type: bool\n):\n # type: (...) -> List[str]\n \"\"\"\n Get setuptools command arguments with shim wrapped setup file invocation.\n\n :param setup_py_path: The path to setup.py to be wrapped.\n :param global_options: Additional global options.\n :param no_user_config: If True, disables personal user configuration.\n :param unbuffered_output: If True, adds the unbuffered switch to the\n argument list.\n \"\"\"\n args = [sys.executable]\n if unbuffered_output:\n args += [\"-u\"]\n args += [\"-c\", _SETUPTOOLS_SHIM.format(setup_py_path)]\n if global_options:\n args += global_options\n if no_user_config:\n args += [\"--no-user-cfg\"]\n return args\n\n\ndef make_setuptools_bdist_wheel_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n build_options, # type: Sequence[str]\n destination_dir, # type: str\n):\n # type: (...) -> List[str]\n # NOTE: Eventually, we'd want to also -S to the flags here, when we're\n # isolating. Currently, it breaks Python in virtualenvs, because it\n # relies on site.py to find parts of the standard library outside the\n # virtualenv.\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n unbuffered_output=True\n )\n args += [\"bdist_wheel\", \"-d\", destination_dir]\n args += build_options\n return args\n\n\ndef make_setuptools_clean_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n):\n # type: (...) -> List[str]\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n unbuffered_output=True\n )\n args += [\"clean\", \"--all\"]\n return args\n\n\ndef make_setuptools_develop_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n install_options, # type: Sequence[str]\n no_user_config, # type: bool\n prefix, # type: Optional[str]\n home, # type: Optional[str]\n use_user_site, # type: bool\n):\n # type: (...) -> List[str]\n assert not (use_user_site and prefix)\n\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n no_user_config=no_user_config,\n )\n\n args += [\"develop\", \"--no-deps\"]\n\n args += install_options\n\n if prefix:\n args += [\"--prefix\", prefix]\n if home is not None:\n args += [\"--home\", home]\n\n if use_user_site:\n args += [\"--user\", \"--prefix=\"]\n\n return args\n\n\ndef make_setuptools_egg_info_args(\n setup_py_path, # type: str\n egg_info_dir, # type: Optional[str]\n no_user_config, # type: bool\n):\n # type: (...) -> List[str]\n args = make_setuptools_shim_args(\n setup_py_path, no_user_config=no_user_config\n )\n\n args += [\"egg_info\"]\n\n if egg_info_dir:\n args += [\"--egg-base\", egg_info_dir]\n\n return args\n\n\ndef make_setuptools_install_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n install_options, # type: Sequence[str]\n record_filename, # type: str\n root, # type: Optional[str]\n prefix, # type: Optional[str]\n header_dir, # type: Optional[str]\n home, # type: Optional[str]\n use_user_site, # type: bool\n no_user_config, # type: bool\n pycompile # type: bool\n):\n # type: (...) -> List[str]\n assert not (use_user_site and prefix)\n assert not (use_user_site and root)\n\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n no_user_config=no_user_config,\n unbuffered_output=True\n )\n args += [\"install\", \"--record\", record_filename]\n args += [\"--single-version-externally-managed\"]\n\n if root is not None:\n args += [\"--root\", root]\n if prefix is not None:\n args += [\"--prefix\", prefix]\n if home is not None:\n args += [\"--home\", home]\n if use_user_site:\n args += [\"--user\", \"--prefix=\"]\n\n if pycompile:\n args += [\"--compile\"]\n else:\n args += [\"--no-compile\"]\n\n if header_dir:\n args += [\"--install-headers\", header_dir]\n\n args += install_options\n\n return args\n", "path": "src/pip/_internal/utils/setuptools_build.py"}], "after_files": [{"content": "import sys\nfrom typing import List, Optional, Sequence\n\n# Shim to wrap setup.py invocation with setuptools\n#\n# We set sys.argv[0] to the path to the underlying setup.py file so\n# setuptools / distutils don't take the path to the setup.py to be \"-c\" when\n# invoking via the shim. This avoids e.g. the following manifest_maker\n# warning: \"warning: manifest_maker: standard file '-c' not found\".\n_SETUPTOOLS_SHIM = (\n \"import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};\"\n \"f=getattr(tokenize, 'open', open)(__file__);\"\n \"code=f.read().replace('\\\\r\\\\n', '\\\\n');\"\n \"f.close();\"\n \"exec(compile(code, __file__, 'exec'))\"\n)\n\n\ndef make_setuptools_shim_args(\n setup_py_path, # type: str\n global_options=None, # type: Sequence[str]\n no_user_config=False, # type: bool\n unbuffered_output=False # type: bool\n):\n # type: (...) -> List[str]\n \"\"\"\n Get setuptools command arguments with shim wrapped setup file invocation.\n\n :param setup_py_path: The path to setup.py to be wrapped.\n :param global_options: Additional global options.\n :param no_user_config: If True, disables personal user configuration.\n :param unbuffered_output: If True, adds the unbuffered switch to the\n argument list.\n \"\"\"\n args = [sys.executable]\n if unbuffered_output:\n args += [\"-u\"]\n args += [\"-c\", _SETUPTOOLS_SHIM.format(setup_py_path)]\n if global_options:\n args += global_options\n if no_user_config:\n args += [\"--no-user-cfg\"]\n return args\n\n\ndef make_setuptools_bdist_wheel_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n build_options, # type: Sequence[str]\n destination_dir, # type: str\n):\n # type: (...) -> List[str]\n # NOTE: Eventually, we'd want to also -S to the flags here, when we're\n # isolating. Currently, it breaks Python in virtualenvs, because it\n # relies on site.py to find parts of the standard library outside the\n # virtualenv.\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n unbuffered_output=True\n )\n args += [\"bdist_wheel\", \"-d\", destination_dir]\n args += build_options\n return args\n\n\ndef make_setuptools_clean_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n):\n # type: (...) -> List[str]\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n unbuffered_output=True\n )\n args += [\"clean\", \"--all\"]\n return args\n\n\ndef make_setuptools_develop_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n install_options, # type: Sequence[str]\n no_user_config, # type: bool\n prefix, # type: Optional[str]\n home, # type: Optional[str]\n use_user_site, # type: bool\n):\n # type: (...) -> List[str]\n assert not (use_user_site and prefix)\n\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n no_user_config=no_user_config,\n )\n\n args += [\"develop\", \"--no-deps\"]\n\n args += install_options\n\n if prefix:\n args += [\"--prefix\", prefix]\n if home is not None:\n args += [\"--install-dir\", home]\n\n if use_user_site:\n args += [\"--user\", \"--prefix=\"]\n\n return args\n\n\ndef make_setuptools_egg_info_args(\n setup_py_path, # type: str\n egg_info_dir, # type: Optional[str]\n no_user_config, # type: bool\n):\n # type: (...) -> List[str]\n args = make_setuptools_shim_args(\n setup_py_path, no_user_config=no_user_config\n )\n\n args += [\"egg_info\"]\n\n if egg_info_dir:\n args += [\"--egg-base\", egg_info_dir]\n\n return args\n\n\ndef make_setuptools_install_args(\n setup_py_path, # type: str\n global_options, # type: Sequence[str]\n install_options, # type: Sequence[str]\n record_filename, # type: str\n root, # type: Optional[str]\n prefix, # type: Optional[str]\n header_dir, # type: Optional[str]\n home, # type: Optional[str]\n use_user_site, # type: bool\n no_user_config, # type: bool\n pycompile # type: bool\n):\n # type: (...) -> List[str]\n assert not (use_user_site and prefix)\n assert not (use_user_site and root)\n\n args = make_setuptools_shim_args(\n setup_py_path,\n global_options=global_options,\n no_user_config=no_user_config,\n unbuffered_output=True\n )\n args += [\"install\", \"--record\", record_filename]\n args += [\"--single-version-externally-managed\"]\n\n if root is not None:\n args += [\"--root\", root]\n if prefix is not None:\n args += [\"--prefix\", prefix]\n if home is not None:\n args += [\"--home\", home]\n if use_user_site:\n args += [\"--user\", \"--prefix=\"]\n\n if pycompile:\n args += [\"--compile\"]\n else:\n args += [\"--no-compile\"]\n\n if header_dir:\n args += [\"--install-headers\", header_dir]\n\n args += install_options\n\n return args\n", "path": "src/pip/_internal/utils/setuptools_build.py"}]} | 2,103 | 125 |
gh_patches_debug_9891 | rasdani/github-patches | git_diff | spack__spack-4489 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
metis 4.0.3 doesn't install on macOS 10.11.6 with appleclang 7.0.3
A gist of the installation log is at https://gist.github.com/goxberry/b9ca39f4d7c5cd6b0905aaaa42f53746
If I had to guess, the error at the end of the log re: the RPATH option `-rpath=$STUFF` is the problem; IIRC, that syntax works on GCC, but not with LLVM/clang. Replacing `-rpath=` with `-rpath,` (that is, replace the equals sign with a comma) should work for both GCC and LLVM/clang compilers.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `var/spack/repos/builtin/packages/metis/package.py`
Content:
```
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25
26 from spack import *
27 import glob
28 import sys
29 import os
30
31
32 class Metis(Package):
33 """METIS is a set of serial programs for partitioning graphs, partitioning
34 finite element meshes, and producing fill reducing orderings for sparse
35 matrices. The algorithms implemented in METIS are based on the
36 multilevel recursive-bisection, multilevel k-way, and multi-constraint
37 partitioning schemes."""
38
39 homepage = "http://glaros.dtc.umn.edu/gkhome/metis/metis/overview"
40 url = "http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/metis-5.1.0.tar.gz"
41 list_url = "http://glaros.dtc.umn.edu/gkhome/fsroot/sw/metis/OLD"
42
43 version('5.1.0', '5465e67079419a69e0116de24fce58fe')
44 version('5.0.2', 'acb521a4e8c2e6dd559a7f9abd0468c5')
45 version('4.0.3', 'd3848b454532ef18dc83e4fb160d1e10')
46
47 variant('shared', default=True, description='Enables the build of shared libraries.')
48 variant('debug', default=False, description='Builds the library in debug mode.')
49 variant('gdb', default=False, description='Enables gdb support.')
50
51 variant('int64', default=False, description='Sets the bit width of METIS\'s index type to 64.')
52 variant('real64', default=False, description='Sets the bit width of METIS\'s real type to 64.')
53
54 depends_on('[email protected]:', when='@5:', type='build')
55
56 patch('install_gklib_defs_rename.patch', when='@5:')
57
58 def url_for_version(self, version):
59 url = "http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis"
60 if version < Version('4.0.3'):
61 url += "/OLD"
62 url += "/metis-{0}.tar.gz".format(version)
63 return url
64
65 @when('@5:')
66 def patch(self):
67 source_path = self.stage.source_path
68 metis_header = FileFilter(join_path(source_path, 'include', 'metis.h'))
69
70 metis_header.filter(
71 r'(\b)(IDXTYPEWIDTH )(\d+)(\b)',
72 r'\1\2{0}\4'.format('64' if '+int64' in self.spec else '32'),
73 )
74 metis_header.filter(
75 r'(\b)(REALTYPEWIDTH )(\d+)(\b)',
76 r'\1\2{0}\4'.format('64' if '+real64' in self.spec else '32'),
77 )
78
79 # Make clang 7.3 happy.
80 # Prevents "ld: section __DATA/__thread_bss extends beyond end of file"
81 # See upstream LLVM issue https://llvm.org/bugs/show_bug.cgi?id=27059
82 # and https://github.com/Homebrew/homebrew-science/blob/master/metis.rb
83 if self.spec.satisfies('%[email protected]'):
84 filter_file('#define MAX_JBUFS 128', '#define MAX_JBUFS 24',
85 join_path(source_path, 'GKlib', 'error.c'))
86
87 @when('@:4')
88 def install(self, spec, prefix):
89 # Process library spec and options
90 if any('+{0}'.format(v) in spec for v in ['gdb', 'int64', 'real64']):
91 raise InstallError('METIS@:4 does not support the following '
92 'variants: gdb, int64, real64.')
93
94 options = ['COPTIONS=-fPIC']
95 if '+debug' in spec:
96 options.append('OPTFLAGS=-g -O0')
97 make(*options)
98
99 # Compile and install library files
100 ccompile = Executable(self.compiler.cc)
101
102 mkdir(prefix.bin)
103 binfiles = ('pmetis', 'kmetis', 'oemetis', 'onmetis', 'partnmesh',
104 'partdmesh', 'mesh2nodal', 'mesh2dual', 'graphchk')
105 for binfile in binfiles:
106 install(binfile, prefix.bin)
107
108 mkdir(prefix.lib)
109 install('libmetis.a', prefix.lib)
110
111 mkdir(prefix.include)
112 for h in glob.glob(join_path('Lib', '*.h')):
113 install(h, prefix.include)
114
115 mkdir(prefix.share)
116 sharefiles = (('Graphs', '4elt.graph'), ('Graphs', 'metis.mesh'),
117 ('Graphs', 'test.mgraph'))
118 for sharefile in tuple(join_path(*sf) for sf in sharefiles):
119 install(sharefile, prefix.share)
120
121 if '+shared' in spec:
122 shared_flags = ['-fPIC', '-shared']
123 if sys.platform == 'darwin':
124 shared_suffix = 'dylib'
125 shared_flags.extend(['-Wl,-all_load', 'libmetis.a'])
126 else:
127 shared_suffix = 'so'
128 shared_flags.extend(['-Wl,-whole-archive', 'libmetis.a',
129 '-Wl,-no-whole-archive'])
130
131 shared_out = '%s/libmetis.%s' % (prefix.lib, shared_suffix)
132 shared_flags.extend(['-o', shared_out])
133
134 ccompile(*shared_flags)
135
136 # Set up and run tests on installation
137 ccompile('-I%s' % prefix.include, '-L%s' % prefix.lib,
138 '-Wl,-rpath=%s' % (prefix.lib if '+shared' in spec else ''),
139 join_path('Programs', 'io.o'), join_path('Test', 'mtest.c'),
140 '-o', '%s/mtest' % prefix.bin, '-lmetis', '-lm')
141
142 if self.run_tests:
143 test_bin = lambda testname: join_path(prefix.bin, testname)
144 test_graph = lambda graphname: join_path(prefix.share, graphname)
145
146 graph = test_graph('4elt.graph')
147 os.system('%s %s' % (test_bin('mtest'), graph))
148 os.system('%s %s 40' % (test_bin('kmetis'), graph))
149 os.system('%s %s' % (test_bin('onmetis'), graph))
150 graph = test_graph('test.mgraph')
151 os.system('%s %s 2' % (test_bin('pmetis'), graph))
152 os.system('%s %s 2' % (test_bin('kmetis'), graph))
153 os.system('%s %s 5' % (test_bin('kmetis'), graph))
154 graph = test_graph('metis.mesh')
155 os.system('%s %s 10' % (test_bin('partnmesh'), graph))
156 os.system('%s %s 10' % (test_bin('partdmesh'), graph))
157 os.system('%s %s' % (test_bin('mesh2dual'), graph))
158
159 # FIXME: The following code should replace the testing code in the
160 # block above since it causes installs to fail when one or more of
161 # the Metis tests fail, but it currently doesn't work because the
162 # 'mtest', 'onmetis', and 'partnmesh' tests return error codes that
163 # trigger false positives for failure.
164 """
165 Executable(test_bin('mtest'))(test_graph('4elt.graph'))
166 Executable(test_bin('kmetis'))(test_graph('4elt.graph'), '40')
167 Executable(test_bin('onmetis'))(test_graph('4elt.graph'))
168
169 Executable(test_bin('pmetis'))(test_graph('test.mgraph'), '2')
170 Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '2')
171 Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '5')
172
173 Executable(test_bin('partnmesh'))(test_graph('metis.mesh'), '10')
174 Executable(test_bin('partdmesh'))(test_graph('metis.mesh'), '10')
175 Executable(test_bin('mesh2dual'))(test_graph('metis.mesh'))
176 """
177
178 @when('@5:')
179 def install(self, spec, prefix):
180 source_directory = self.stage.source_path
181 build_directory = join_path(source_directory, 'build')
182
183 options = std_cmake_args[:]
184 options.append('-DGKLIB_PATH:PATH=%s/GKlib' % source_directory)
185 options.append('-DCMAKE_INSTALL_NAME_DIR:PATH=%s/lib' % prefix)
186
187 if '+shared' in spec:
188 options.append('-DSHARED:BOOL=ON')
189 else:
190 # Remove all RPATH options
191 # (RPATHxxx options somehow trigger cmake to link dynamically)
192 rpath_options = []
193 for o in options:
194 if o.find('RPATH') >= 0:
195 rpath_options.append(o)
196 for o in rpath_options:
197 options.remove(o)
198 if '+debug' in spec:
199 options.extend(['-DDEBUG:BOOL=ON',
200 '-DCMAKE_BUILD_TYPE:STRING=Debug'])
201 if '+gdb' in spec:
202 options.append('-DGDB:BOOL=ON')
203
204 with working_dir(build_directory, create=True):
205 cmake(source_directory, *options)
206 make()
207 make('install')
208
209 # install GKlib headers, which will be needed for ParMETIS
210 GKlib_dist = join_path(prefix.include, 'GKlib')
211 mkdirp(GKlib_dist)
212 hfiles = glob.glob(join_path(source_directory, 'GKlib', '*.h'))
213 for hfile in hfiles:
214 install(hfile, GKlib_dist)
215
216 if self.run_tests:
217 # FIXME: On some systems, the installed binaries for METIS cannot
218 # be executed without first being read.
219 ls = which('ls')
220 ls('-a', '-l', prefix.bin)
221
222 for f in ['4elt', 'copter2', 'mdual']:
223 graph = join_path(source_directory, 'graphs', '%s.graph' % f)
224 Executable(join_path(prefix.bin, 'graphchk'))(graph)
225 Executable(join_path(prefix.bin, 'gpmetis'))(graph, '2')
226 Executable(join_path(prefix.bin, 'ndmetis'))(graph)
227
228 graph = join_path(source_directory, 'graphs', 'test.mgraph')
229 Executable(join_path(prefix.bin, 'gpmetis'))(graph, '2')
230 graph = join_path(source_directory, 'graphs', 'metis.mesh')
231 Executable(join_path(prefix.bin, 'mpmetis'))(graph, '2')
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/var/spack/repos/builtin/packages/metis/package.py b/var/spack/repos/builtin/packages/metis/package.py
--- a/var/spack/repos/builtin/packages/metis/package.py
+++ b/var/spack/repos/builtin/packages/metis/package.py
@@ -135,7 +135,8 @@
# Set up and run tests on installation
ccompile('-I%s' % prefix.include, '-L%s' % prefix.lib,
- '-Wl,-rpath=%s' % (prefix.lib if '+shared' in spec else ''),
+ self.compiler.cc_rpath_arg +
+ '%s' % (prefix.lib if '+shared' in spec else ''),
join_path('Programs', 'io.o'), join_path('Test', 'mtest.c'),
'-o', '%s/mtest' % prefix.bin, '-lmetis', '-lm')
| {"golden_diff": "diff --git a/var/spack/repos/builtin/packages/metis/package.py b/var/spack/repos/builtin/packages/metis/package.py\n--- a/var/spack/repos/builtin/packages/metis/package.py\n+++ b/var/spack/repos/builtin/packages/metis/package.py\n@@ -135,7 +135,8 @@\n \n # Set up and run tests on installation\n ccompile('-I%s' % prefix.include, '-L%s' % prefix.lib,\n- '-Wl,-rpath=%s' % (prefix.lib if '+shared' in spec else ''),\n+ self.compiler.cc_rpath_arg +\n+ '%s' % (prefix.lib if '+shared' in spec else ''),\n join_path('Programs', 'io.o'), join_path('Test', 'mtest.c'),\n '-o', '%s/mtest' % prefix.bin, '-lmetis', '-lm')\n", "issue": "metis 4.0.3 doesn't install on macOS 10.11.6 with appleclang 7.0.3\nA gist of the installation log is at https://gist.github.com/goxberry/b9ca39f4d7c5cd6b0905aaaa42f53746\r\n\r\nIf I had to guess, the error at the end of the log re: the RPATH option `-rpath=$STUFF` is the problem; IIRC, that syntax works on GCC, but not with LLVM/clang. Replacing `-rpath=` with `-rpath,` (that is, replace the equals sign with a comma) should work for both GCC and LLVM/clang compilers.\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\n\nfrom spack import *\nimport glob\nimport sys\nimport os\n\n\nclass Metis(Package):\n \"\"\"METIS is a set of serial programs for partitioning graphs, partitioning\n finite element meshes, and producing fill reducing orderings for sparse\n matrices. The algorithms implemented in METIS are based on the\n multilevel recursive-bisection, multilevel k-way, and multi-constraint\n partitioning schemes.\"\"\"\n\n homepage = \"http://glaros.dtc.umn.edu/gkhome/metis/metis/overview\"\n url = \"http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/metis-5.1.0.tar.gz\"\n list_url = \"http://glaros.dtc.umn.edu/gkhome/fsroot/sw/metis/OLD\"\n\n version('5.1.0', '5465e67079419a69e0116de24fce58fe')\n version('5.0.2', 'acb521a4e8c2e6dd559a7f9abd0468c5')\n version('4.0.3', 'd3848b454532ef18dc83e4fb160d1e10')\n\n variant('shared', default=True, description='Enables the build of shared libraries.')\n variant('debug', default=False, description='Builds the library in debug mode.')\n variant('gdb', default=False, description='Enables gdb support.')\n\n variant('int64', default=False, description='Sets the bit width of METIS\\'s index type to 64.')\n variant('real64', default=False, description='Sets the bit width of METIS\\'s real type to 64.')\n\n depends_on('[email protected]:', when='@5:', type='build')\n\n patch('install_gklib_defs_rename.patch', when='@5:')\n\n def url_for_version(self, version):\n url = \"http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis\"\n if version < Version('4.0.3'):\n url += \"/OLD\"\n url += \"/metis-{0}.tar.gz\".format(version)\n return url\n\n @when('@5:')\n def patch(self):\n source_path = self.stage.source_path\n metis_header = FileFilter(join_path(source_path, 'include', 'metis.h'))\n\n metis_header.filter(\n r'(\\b)(IDXTYPEWIDTH )(\\d+)(\\b)',\n r'\\1\\2{0}\\4'.format('64' if '+int64' in self.spec else '32'),\n )\n metis_header.filter(\n r'(\\b)(REALTYPEWIDTH )(\\d+)(\\b)',\n r'\\1\\2{0}\\4'.format('64' if '+real64' in self.spec else '32'),\n )\n\n # Make clang 7.3 happy.\n # Prevents \"ld: section __DATA/__thread_bss extends beyond end of file\"\n # See upstream LLVM issue https://llvm.org/bugs/show_bug.cgi?id=27059\n # and https://github.com/Homebrew/homebrew-science/blob/master/metis.rb\n if self.spec.satisfies('%[email protected]'):\n filter_file('#define MAX_JBUFS 128', '#define MAX_JBUFS 24',\n join_path(source_path, 'GKlib', 'error.c'))\n\n @when('@:4')\n def install(self, spec, prefix):\n # Process library spec and options\n if any('+{0}'.format(v) in spec for v in ['gdb', 'int64', 'real64']):\n raise InstallError('METIS@:4 does not support the following '\n 'variants: gdb, int64, real64.')\n\n options = ['COPTIONS=-fPIC']\n if '+debug' in spec:\n options.append('OPTFLAGS=-g -O0')\n make(*options)\n\n # Compile and install library files\n ccompile = Executable(self.compiler.cc)\n\n mkdir(prefix.bin)\n binfiles = ('pmetis', 'kmetis', 'oemetis', 'onmetis', 'partnmesh',\n 'partdmesh', 'mesh2nodal', 'mesh2dual', 'graphchk')\n for binfile in binfiles:\n install(binfile, prefix.bin)\n\n mkdir(prefix.lib)\n install('libmetis.a', prefix.lib)\n\n mkdir(prefix.include)\n for h in glob.glob(join_path('Lib', '*.h')):\n install(h, prefix.include)\n\n mkdir(prefix.share)\n sharefiles = (('Graphs', '4elt.graph'), ('Graphs', 'metis.mesh'),\n ('Graphs', 'test.mgraph'))\n for sharefile in tuple(join_path(*sf) for sf in sharefiles):\n install(sharefile, prefix.share)\n\n if '+shared' in spec:\n shared_flags = ['-fPIC', '-shared']\n if sys.platform == 'darwin':\n shared_suffix = 'dylib'\n shared_flags.extend(['-Wl,-all_load', 'libmetis.a'])\n else:\n shared_suffix = 'so'\n shared_flags.extend(['-Wl,-whole-archive', 'libmetis.a',\n '-Wl,-no-whole-archive'])\n\n shared_out = '%s/libmetis.%s' % (prefix.lib, shared_suffix)\n shared_flags.extend(['-o', shared_out])\n\n ccompile(*shared_flags)\n\n # Set up and run tests on installation\n ccompile('-I%s' % prefix.include, '-L%s' % prefix.lib,\n '-Wl,-rpath=%s' % (prefix.lib if '+shared' in spec else ''),\n join_path('Programs', 'io.o'), join_path('Test', 'mtest.c'),\n '-o', '%s/mtest' % prefix.bin, '-lmetis', '-lm')\n\n if self.run_tests:\n test_bin = lambda testname: join_path(prefix.bin, testname)\n test_graph = lambda graphname: join_path(prefix.share, graphname)\n\n graph = test_graph('4elt.graph')\n os.system('%s %s' % (test_bin('mtest'), graph))\n os.system('%s %s 40' % (test_bin('kmetis'), graph))\n os.system('%s %s' % (test_bin('onmetis'), graph))\n graph = test_graph('test.mgraph')\n os.system('%s %s 2' % (test_bin('pmetis'), graph))\n os.system('%s %s 2' % (test_bin('kmetis'), graph))\n os.system('%s %s 5' % (test_bin('kmetis'), graph))\n graph = test_graph('metis.mesh')\n os.system('%s %s 10' % (test_bin('partnmesh'), graph))\n os.system('%s %s 10' % (test_bin('partdmesh'), graph))\n os.system('%s %s' % (test_bin('mesh2dual'), graph))\n\n # FIXME: The following code should replace the testing code in the\n # block above since it causes installs to fail when one or more of\n # the Metis tests fail, but it currently doesn't work because the\n # 'mtest', 'onmetis', and 'partnmesh' tests return error codes that\n # trigger false positives for failure.\n \"\"\"\n Executable(test_bin('mtest'))(test_graph('4elt.graph'))\n Executable(test_bin('kmetis'))(test_graph('4elt.graph'), '40')\n Executable(test_bin('onmetis'))(test_graph('4elt.graph'))\n\n Executable(test_bin('pmetis'))(test_graph('test.mgraph'), '2')\n Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '2')\n Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '5')\n\n Executable(test_bin('partnmesh'))(test_graph('metis.mesh'), '10')\n Executable(test_bin('partdmesh'))(test_graph('metis.mesh'), '10')\n Executable(test_bin('mesh2dual'))(test_graph('metis.mesh'))\n \"\"\"\n\n @when('@5:')\n def install(self, spec, prefix):\n source_directory = self.stage.source_path\n build_directory = join_path(source_directory, 'build')\n\n options = std_cmake_args[:]\n options.append('-DGKLIB_PATH:PATH=%s/GKlib' % source_directory)\n options.append('-DCMAKE_INSTALL_NAME_DIR:PATH=%s/lib' % prefix)\n\n if '+shared' in spec:\n options.append('-DSHARED:BOOL=ON')\n else:\n # Remove all RPATH options\n # (RPATHxxx options somehow trigger cmake to link dynamically)\n rpath_options = []\n for o in options:\n if o.find('RPATH') >= 0:\n rpath_options.append(o)\n for o in rpath_options:\n options.remove(o)\n if '+debug' in spec:\n options.extend(['-DDEBUG:BOOL=ON',\n '-DCMAKE_BUILD_TYPE:STRING=Debug'])\n if '+gdb' in spec:\n options.append('-DGDB:BOOL=ON')\n\n with working_dir(build_directory, create=True):\n cmake(source_directory, *options)\n make()\n make('install')\n\n # install GKlib headers, which will be needed for ParMETIS\n GKlib_dist = join_path(prefix.include, 'GKlib')\n mkdirp(GKlib_dist)\n hfiles = glob.glob(join_path(source_directory, 'GKlib', '*.h'))\n for hfile in hfiles:\n install(hfile, GKlib_dist)\n\n if self.run_tests:\n # FIXME: On some systems, the installed binaries for METIS cannot\n # be executed without first being read.\n ls = which('ls')\n ls('-a', '-l', prefix.bin)\n\n for f in ['4elt', 'copter2', 'mdual']:\n graph = join_path(source_directory, 'graphs', '%s.graph' % f)\n Executable(join_path(prefix.bin, 'graphchk'))(graph)\n Executable(join_path(prefix.bin, 'gpmetis'))(graph, '2')\n Executable(join_path(prefix.bin, 'ndmetis'))(graph)\n\n graph = join_path(source_directory, 'graphs', 'test.mgraph')\n Executable(join_path(prefix.bin, 'gpmetis'))(graph, '2')\n graph = join_path(source_directory, 'graphs', 'metis.mesh')\n Executable(join_path(prefix.bin, 'mpmetis'))(graph, '2')\n", "path": "var/spack/repos/builtin/packages/metis/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\n\nfrom spack import *\nimport glob\nimport sys\nimport os\n\n\nclass Metis(Package):\n \"\"\"METIS is a set of serial programs for partitioning graphs, partitioning\n finite element meshes, and producing fill reducing orderings for sparse\n matrices. The algorithms implemented in METIS are based on the\n multilevel recursive-bisection, multilevel k-way, and multi-constraint\n partitioning schemes.\"\"\"\n\n homepage = \"http://glaros.dtc.umn.edu/gkhome/metis/metis/overview\"\n url = \"http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/metis-5.1.0.tar.gz\"\n list_url = \"http://glaros.dtc.umn.edu/gkhome/fsroot/sw/metis/OLD\"\n\n version('5.1.0', '5465e67079419a69e0116de24fce58fe')\n version('5.0.2', 'acb521a4e8c2e6dd559a7f9abd0468c5')\n version('4.0.3', 'd3848b454532ef18dc83e4fb160d1e10')\n\n variant('shared', default=True, description='Enables the build of shared libraries.')\n variant('debug', default=False, description='Builds the library in debug mode.')\n variant('gdb', default=False, description='Enables gdb support.')\n\n variant('int64', default=False, description='Sets the bit width of METIS\\'s index type to 64.')\n variant('real64', default=False, description='Sets the bit width of METIS\\'s real type to 64.')\n\n depends_on('[email protected]:', when='@5:', type='build')\n\n patch('install_gklib_defs_rename.patch', when='@5:')\n\n def url_for_version(self, version):\n url = \"http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis\"\n if version < Version('4.0.3'):\n url += \"/OLD\"\n url += \"/metis-{0}.tar.gz\".format(version)\n return url\n\n @when('@5:')\n def patch(self):\n source_path = self.stage.source_path\n metis_header = FileFilter(join_path(source_path, 'include', 'metis.h'))\n\n metis_header.filter(\n r'(\\b)(IDXTYPEWIDTH )(\\d+)(\\b)',\n r'\\1\\2{0}\\4'.format('64' if '+int64' in self.spec else '32'),\n )\n metis_header.filter(\n r'(\\b)(REALTYPEWIDTH )(\\d+)(\\b)',\n r'\\1\\2{0}\\4'.format('64' if '+real64' in self.spec else '32'),\n )\n\n # Make clang 7.3 happy.\n # Prevents \"ld: section __DATA/__thread_bss extends beyond end of file\"\n # See upstream LLVM issue https://llvm.org/bugs/show_bug.cgi?id=27059\n # and https://github.com/Homebrew/homebrew-science/blob/master/metis.rb\n if self.spec.satisfies('%[email protected]'):\n filter_file('#define MAX_JBUFS 128', '#define MAX_JBUFS 24',\n join_path(source_path, 'GKlib', 'error.c'))\n\n @when('@:4')\n def install(self, spec, prefix):\n # Process library spec and options\n if any('+{0}'.format(v) in spec for v in ['gdb', 'int64', 'real64']):\n raise InstallError('METIS@:4 does not support the following '\n 'variants: gdb, int64, real64.')\n\n options = ['COPTIONS=-fPIC']\n if '+debug' in spec:\n options.append('OPTFLAGS=-g -O0')\n make(*options)\n\n # Compile and install library files\n ccompile = Executable(self.compiler.cc)\n\n mkdir(prefix.bin)\n binfiles = ('pmetis', 'kmetis', 'oemetis', 'onmetis', 'partnmesh',\n 'partdmesh', 'mesh2nodal', 'mesh2dual', 'graphchk')\n for binfile in binfiles:\n install(binfile, prefix.bin)\n\n mkdir(prefix.lib)\n install('libmetis.a', prefix.lib)\n\n mkdir(prefix.include)\n for h in glob.glob(join_path('Lib', '*.h')):\n install(h, prefix.include)\n\n mkdir(prefix.share)\n sharefiles = (('Graphs', '4elt.graph'), ('Graphs', 'metis.mesh'),\n ('Graphs', 'test.mgraph'))\n for sharefile in tuple(join_path(*sf) for sf in sharefiles):\n install(sharefile, prefix.share)\n\n if '+shared' in spec:\n shared_flags = ['-fPIC', '-shared']\n if sys.platform == 'darwin':\n shared_suffix = 'dylib'\n shared_flags.extend(['-Wl,-all_load', 'libmetis.a'])\n else:\n shared_suffix = 'so'\n shared_flags.extend(['-Wl,-whole-archive', 'libmetis.a',\n '-Wl,-no-whole-archive'])\n\n shared_out = '%s/libmetis.%s' % (prefix.lib, shared_suffix)\n shared_flags.extend(['-o', shared_out])\n\n ccompile(*shared_flags)\n\n # Set up and run tests on installation\n ccompile('-I%s' % prefix.include, '-L%s' % prefix.lib,\n self.compiler.cc_rpath_arg +\n '%s' % (prefix.lib if '+shared' in spec else ''),\n join_path('Programs', 'io.o'), join_path('Test', 'mtest.c'),\n '-o', '%s/mtest' % prefix.bin, '-lmetis', '-lm')\n\n if self.run_tests:\n test_bin = lambda testname: join_path(prefix.bin, testname)\n test_graph = lambda graphname: join_path(prefix.share, graphname)\n\n graph = test_graph('4elt.graph')\n os.system('%s %s' % (test_bin('mtest'), graph))\n os.system('%s %s 40' % (test_bin('kmetis'), graph))\n os.system('%s %s' % (test_bin('onmetis'), graph))\n graph = test_graph('test.mgraph')\n os.system('%s %s 2' % (test_bin('pmetis'), graph))\n os.system('%s %s 2' % (test_bin('kmetis'), graph))\n os.system('%s %s 5' % (test_bin('kmetis'), graph))\n graph = test_graph('metis.mesh')\n os.system('%s %s 10' % (test_bin('partnmesh'), graph))\n os.system('%s %s 10' % (test_bin('partdmesh'), graph))\n os.system('%s %s' % (test_bin('mesh2dual'), graph))\n\n # FIXME: The following code should replace the testing code in the\n # block above since it causes installs to fail when one or more of\n # the Metis tests fail, but it currently doesn't work because the\n # 'mtest', 'onmetis', and 'partnmesh' tests return error codes that\n # trigger false positives for failure.\n \"\"\"\n Executable(test_bin('mtest'))(test_graph('4elt.graph'))\n Executable(test_bin('kmetis'))(test_graph('4elt.graph'), '40')\n Executable(test_bin('onmetis'))(test_graph('4elt.graph'))\n\n Executable(test_bin('pmetis'))(test_graph('test.mgraph'), '2')\n Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '2')\n Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '5')\n\n Executable(test_bin('partnmesh'))(test_graph('metis.mesh'), '10')\n Executable(test_bin('partdmesh'))(test_graph('metis.mesh'), '10')\n Executable(test_bin('mesh2dual'))(test_graph('metis.mesh'))\n \"\"\"\n\n @when('@5:')\n def install(self, spec, prefix):\n source_directory = self.stage.source_path\n build_directory = join_path(source_directory, 'build')\n\n options = std_cmake_args[:]\n options.append('-DGKLIB_PATH:PATH=%s/GKlib' % source_directory)\n options.append('-DCMAKE_INSTALL_NAME_DIR:PATH=%s/lib' % prefix)\n\n if '+shared' in spec:\n options.append('-DSHARED:BOOL=ON')\n else:\n # Remove all RPATH options\n # (RPATHxxx options somehow trigger cmake to link dynamically)\n rpath_options = []\n for o in options:\n if o.find('RPATH') >= 0:\n rpath_options.append(o)\n for o in rpath_options:\n options.remove(o)\n if '+debug' in spec:\n options.extend(['-DDEBUG:BOOL=ON',\n '-DCMAKE_BUILD_TYPE:STRING=Debug'])\n if '+gdb' in spec:\n options.append('-DGDB:BOOL=ON')\n\n with working_dir(build_directory, create=True):\n cmake(source_directory, *options)\n make()\n make('install')\n\n # install GKlib headers, which will be needed for ParMETIS\n GKlib_dist = join_path(prefix.include, 'GKlib')\n mkdirp(GKlib_dist)\n hfiles = glob.glob(join_path(source_directory, 'GKlib', '*.h'))\n for hfile in hfiles:\n install(hfile, GKlib_dist)\n\n if self.run_tests:\n # FIXME: On some systems, the installed binaries for METIS cannot\n # be executed without first being read.\n ls = which('ls')\n ls('-a', '-l', prefix.bin)\n\n for f in ['4elt', 'copter2', 'mdual']:\n graph = join_path(source_directory, 'graphs', '%s.graph' % f)\n Executable(join_path(prefix.bin, 'graphchk'))(graph)\n Executable(join_path(prefix.bin, 'gpmetis'))(graph, '2')\n Executable(join_path(prefix.bin, 'ndmetis'))(graph)\n\n graph = join_path(source_directory, 'graphs', 'test.mgraph')\n Executable(join_path(prefix.bin, 'gpmetis'))(graph, '2')\n graph = join_path(source_directory, 'graphs', 'metis.mesh')\n Executable(join_path(prefix.bin, 'mpmetis'))(graph, '2')\n", "path": "var/spack/repos/builtin/packages/metis/package.py"}]} | 3,665 | 190 |
gh_patches_debug_43914 | rasdani/github-patches | git_diff | google__TensorNetwork-580 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ncon should take a BaseBackend object
Since we are now supporting passing `BaseBackend` to `Node` constructors, we should also
allow it in `ncon`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensornetwork/ncon_interface.py`
Content:
```
1 # Copyright 2019 The TensorNetwork Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """NCON interface to TensorNetwork."""
15
16 import warnings
17 from typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict
18 from tensornetwork import network_components
19 from tensornetwork.backend_contextmanager import get_default_backend
20 from tensornetwork.backends import backend_factory
21 Tensor = Any
22
23
24 def ncon(
25 tensors: Sequence[Union[network_components.BaseNode, Tensor]],
26 network_structure: Sequence[Sequence],
27 con_order: Optional[Sequence] = None,
28 out_order: Optional[Sequence] = None,
29 backend: Optional[Text] = None
30 ) -> Union[network_components.BaseNode, Tensor]:
31 r"""Contracts a list of tensors or nodes according to a tensor network
32 specification.
33
34 The network is provided as a list of lists, one for each
35 tensor, specifying labels for the edges connected to that tensor.
36
37 If a contraction order `con_order` and an output order `out_order`
38 are both provided, the edge labels can be anything.
39 Otherwise (`con_order == None or out_order == None`), the edge labels
40 must be nonzero integers and edges will be contracted in ascending order.
41 Negative integers denote the (dangling) indices of the output tensor,
42 which will be in descending order, e.g. `[-1,-2,-3,...]`.
43
44 For example, matrix multiplication:
45
46 .. code-block:: python
47
48 A = np.array([[1.0, 2.0], [3.0, 4.0]])
49 B = np.array([[1.0, 1.0], [0.0, 1.0]])
50 ncon([A,B], [(-1, 1), (1, -2)])
51
52 Matrix trace:
53
54 .. code-block:: python
55
56 A = np.array([[1.0, 2.0], [3.0, 4.0]])
57 ncon([A], [(1, 1)]) # 5.0
58
59 Note:
60 The reason `0` is not allowed as an edge label without manually
61 specifying the contraction order is to maintain compatibility with the
62 `original NCON implementation`_. However, the use of `0` in `con_order`
63 to denote outer products is not (currently)
64 supported in this implementation.
65
66 .. _original NCON implementation:
67 https://arxiv.org/abs/1402.0939
68
69 Args:
70 tensors: List of `Tensors` or `BaseNodes`.
71 network_structure: List of lists specifying the tensor network structure.
72 con_order: List of edge labels specifying the contraction order.
73 out_order: List of edge labels specifying the output order.
74 backend: String specifying the backend to use. Defaults to
75 `tensornetwork.backend_contextmanager.get_default_backend`.
76
77 Returns:
78 The result of the contraction. The result is returned as a `Node`
79 if all elements of `tensors` are `BaseNode` objects, else
80 it is returned as a `Tensor` object.
81 """
82 if backend and (backend not in backend_factory._BACKENDS):
83 raise ValueError("Backend '{}' does not exist".format(backend))
84 if backend is None:
85 backend = get_default_backend()
86
87 are_nodes = [isinstance(t, network_components.BaseNode) for t in tensors]
88 nodes = {t for t in tensors if isinstance(t, network_components.BaseNode)}
89 if not all([n.backend.name == backend for n in nodes]):
90 raise ValueError(
91 "Some nodes have backends different from '{}'".format(backend))
92
93 _tensors = []
94 for t in tensors:
95 if isinstance(t, network_components.BaseNode):
96 _tensors.append(t.tensor)
97 else:
98 _tensors.append(t)
99
100 nodes, con_edges, out_edges = ncon_network(
101 _tensors,
102 network_structure,
103 con_order=con_order,
104 out_order=out_order,
105 backend=backend)
106
107 nodes = set(nodes) # we don't need the ordering here
108
109 # Reverse the list so we can pop from the end: O(1).
110 con_edges = con_edges[::-1]
111 while con_edges:
112 nodes_to_contract = con_edges[-1].get_nodes()
113 edges_to_contract = network_components.get_shared_edges(*nodes_to_contract)
114
115 # Eat up all parallel edges that are adjacent in the ordering.
116 adjacent_parallel_edges = set()
117 for edge in reversed(con_edges):
118 if edge in edges_to_contract:
119 adjacent_parallel_edges.add(edge)
120 else:
121 break
122 con_edges = con_edges[:-len(adjacent_parallel_edges)]
123
124 # In an optimal ordering, all edges connecting a given pair of nodes are
125 # adjacent in con_order. If this is not the case, warn the user.
126 leftovers = edges_to_contract - adjacent_parallel_edges
127 if leftovers:
128 warnings.warn(
129 "Suboptimal ordering detected. Edges {} are not adjacent in the "
130 "contraction order to edges {}, connecting nodes {}. Deviating from "
131 "the specified ordering!".format(
132 list(map(str, leftovers)),
133 list(map(str, adjacent_parallel_edges)),
134 list(map(str, nodes_to_contract))))
135 con_edges = [e for e in con_edges if e not in edges_to_contract]
136
137 if set(nodes_to_contract) == nodes:
138 # This contraction produces the final output, so order the edges
139 # here to avoid transposes in some cases.
140 contraction_output_order = out_edges
141 else:
142 contraction_output_order = None
143
144 nodes = nodes - set(nodes_to_contract)
145 nodes.add(
146 network_components.contract_between(
147 *nodes_to_contract,
148 name="con({},{})".format(*nodes_to_contract),
149 output_edge_order=contraction_output_order))
150
151 # TODO: More efficient ordering of products based on out_edges
152 res_node = network_components.outer_product_final_nodes(nodes, out_edges)
153 if all(are_nodes):
154 return res_node
155 return res_node.tensor
156
157
158 def ncon_network(
159 tensors: Sequence[Tensor],
160 network_structure: Sequence[Sequence],
161 con_order: Optional[Sequence] = None,
162 out_order: Optional[Sequence] = None,
163 backend: Optional[Text] = None
164 ) -> Tuple[List[network_components.BaseNode], List[network_components.Edge],
165 List[network_components.Edge]]:
166 r"""Creates a network from a list of tensors according to `tensors`.
167
168 The network is provided as a list of lists, one for each
169 tensor, specifying labels for the edges connected to that tensor.
170
171 If a contraction order `con_order` and an output order `out_order`
172 are both provided, the edge labels can be anything.
173 Otherwise (`con_order == None or out_order == None`), the edge labels
174 must be integers and edges will be contracted in ascending order.
175 Negative integers denote the (dangling) indices of the output tensor,
176 which will be in descending order, e.g. `[-1,-2,-3,...]`.
177
178 This is used internally by `ncon()`.
179
180 Args:
181 tensors: List of `Tensor`s.
182 network_structure: List of lists specifying the tensor network.
183 con_order: List of edge labels specifying the contraction order.
184 out_order: List of edge labels specifying the output order.
185 backend: String specifying the backend to use. Defaults to the default
186 TensorNetwork backend.
187
188 Returns:
189 nodes: List of constructed nodes in the same order as given in `tensors`.
190 con_edges: List of internal `Edge` objects in contraction order.
191 out_edges: List of dangling `Edge` objects in output order.
192 """
193 if len(tensors) != len(network_structure):
194 raise ValueError('len(tensors) != len(network_structure)')
195
196 nodes, edges = _build_network(tensors, network_structure, backend)
197
198 if con_order is None:
199 try:
200 con_order = sorted((k for k in edges if k >= 0))
201 if con_order and con_order[0] == 0:
202 raise ValueError("'0' is not a valid edge label when the "
203 "contraction order is not specified separately.")
204 except TypeError:
205 raise ValueError("Non-integer edge label(s): {}".format(
206 list(edges.keys())))
207 else:
208 if len(con_order) != len(set(con_order)):
209 raise ValueError("Duplicate labels in con_order: {}".format(con_order))
210
211 if out_order is None:
212 try:
213 out_order = sorted((k for k in edges if k < 0), reverse=True)
214 except TypeError:
215 raise ValueError("Non-integer edge label(s): {}".format(
216 list(edges.keys())))
217 else:
218 if len(out_order) != len(set(out_order)):
219 raise ValueError("Duplicate labels in out_order: {}".format(out_order))
220
221 try:
222 con_edges = [edges[k] for k in con_order]
223 out_edges = [edges[k] for k in out_order]
224 except KeyError as err:
225 raise ValueError("Order contained an unknown edge label: {}".format(
226 err.args[0]))
227
228 if len(con_edges) + len(out_edges) != len(edges):
229 raise ValueError(
230 "Edges {} were not included in the contraction and output "
231 "ordering.".format(
232 list(set(edges.keys()) - set(con_order) - set(out_order))))
233
234 for e in con_edges:
235 if e.is_dangling():
236 raise ValueError(
237 "Contraction edge {} appears only once in the network.".format(
238 str(e)))
239
240 for e in out_edges:
241 if not e.is_dangling():
242 raise ValueError(
243 "Output edge {} appears more than once in the network.".format(
244 str(e)))
245
246 return nodes, con_edges, out_edges
247
248
249 def _build_network(
250 tensors: Sequence[Tensor], network_structure: Sequence[Sequence],
251 backend: Text
252 ) -> Tuple[List[network_components.BaseNode], Dict[Any,
253 network_components.Edge]]:
254 nodes = []
255 edges = {}
256 for i, (tensor, edge_lbls) in enumerate(zip(tensors, network_structure)):
257 if len(tensor.shape) != len(edge_lbls):
258 raise ValueError(
259 "Incorrect number of edge labels specified tensor {}".format(i))
260 if isinstance(tensor, network_components.BaseNode):
261 node = tensor
262 else:
263 node = network_components.Node(
264 tensor, name="tensor_{}".format(i), backend=backend)
265
266 nodes.append(node)
267
268 for (axis_num, edge_lbl) in enumerate(edge_lbls):
269 if edge_lbl not in edges:
270 e = node[axis_num]
271 e.set_name(str(edge_lbl))
272 edges[edge_lbl] = e
273 else:
274 # This will raise an error if the edges are not dangling.
275 e = network_components.connect(
276 edges[edge_lbl], node[axis_num], name=str(edge_lbl))
277 edges[edge_lbl] = e
278 return nodes, edges
279
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensornetwork/ncon_interface.py b/tensornetwork/ncon_interface.py
--- a/tensornetwork/ncon_interface.py
+++ b/tensornetwork/ncon_interface.py
@@ -18,6 +18,8 @@
from tensornetwork import network_components
from tensornetwork.backend_contextmanager import get_default_backend
from tensornetwork.backends import backend_factory
+from tensornetwork.backends.base_backend import BaseBackend
+
Tensor = Any
@@ -26,7 +28,7 @@
network_structure: Sequence[Sequence],
con_order: Optional[Sequence] = None,
out_order: Optional[Sequence] = None,
- backend: Optional[Text] = None
+ backend: Optional[Union[Text, BaseBackend]] = None
) -> Union[network_components.BaseNode, Tensor]:
r"""Contracts a list of tensors or nodes according to a tensor network
specification.
@@ -79,16 +81,18 @@
if all elements of `tensors` are `BaseNode` objects, else
it is returned as a `Tensor` object.
"""
- if backend and (backend not in backend_factory._BACKENDS):
- raise ValueError("Backend '{}' does not exist".format(backend))
if backend is None:
backend = get_default_backend()
+ if isinstance(backend, BaseBackend):
+ backend_obj = backend
+ else:
+ backend_obj = backend_factory.get_backend(backend)
are_nodes = [isinstance(t, network_components.BaseNode) for t in tensors]
nodes = {t for t in tensors if isinstance(t, network_components.BaseNode)}
- if not all([n.backend.name == backend for n in nodes]):
- raise ValueError(
- "Some nodes have backends different from '{}'".format(backend))
+ if not all([n.backend.name == backend_obj.name for n in nodes]):
+ raise ValueError("Some nodes have backends different from '{}'".format(
+ backend_obj.name))
_tensors = []
for t in tensors:
@@ -102,7 +106,7 @@
network_structure,
con_order=con_order,
out_order=out_order,
- backend=backend)
+ backend=backend_obj)
nodes = set(nodes) # we don't need the ordering here
@@ -160,7 +164,7 @@
network_structure: Sequence[Sequence],
con_order: Optional[Sequence] = None,
out_order: Optional[Sequence] = None,
- backend: Optional[Text] = None
+ backend: Optional[Union[Text, BaseBackend]] = None
) -> Tuple[List[network_components.BaseNode], List[network_components.Edge],
List[network_components.Edge]]:
r"""Creates a network from a list of tensors according to `tensors`.
@@ -182,9 +186,8 @@
network_structure: List of lists specifying the tensor network.
con_order: List of edge labels specifying the contraction order.
out_order: List of edge labels specifying the output order.
- backend: String specifying the backend to use. Defaults to the default
- TensorNetwork backend.
-
+ backend: String or BaseBackend object specifying the backend to use.
+ Defaults to the default TensorNetwork backend.
Returns:
nodes: List of constructed nodes in the same order as given in `tensors`.
con_edges: List of internal `Edge` objects in contraction order.
@@ -247,8 +250,9 @@
def _build_network(
- tensors: Sequence[Tensor], network_structure: Sequence[Sequence],
- backend: Text
+ tensors: Sequence[Tensor],
+ network_structure: Sequence[Sequence],
+ backend: Optional[Union[BaseBackend, Text]] = None,
) -> Tuple[List[network_components.BaseNode], Dict[Any,
network_components.Edge]]:
nodes = []
| {"golden_diff": "diff --git a/tensornetwork/ncon_interface.py b/tensornetwork/ncon_interface.py\n--- a/tensornetwork/ncon_interface.py\n+++ b/tensornetwork/ncon_interface.py\n@@ -18,6 +18,8 @@\n from tensornetwork import network_components\n from tensornetwork.backend_contextmanager import get_default_backend\n from tensornetwork.backends import backend_factory\n+from tensornetwork.backends.base_backend import BaseBackend\n+\n Tensor = Any\n \n \n@@ -26,7 +28,7 @@\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None,\n- backend: Optional[Text] = None\n+ backend: Optional[Union[Text, BaseBackend]] = None\n ) -> Union[network_components.BaseNode, Tensor]:\n r\"\"\"Contracts a list of tensors or nodes according to a tensor network \n specification.\n@@ -79,16 +81,18 @@\n if all elements of `tensors` are `BaseNode` objects, else\n it is returned as a `Tensor` object.\n \"\"\"\n- if backend and (backend not in backend_factory._BACKENDS):\n- raise ValueError(\"Backend '{}' does not exist\".format(backend))\n if backend is None:\n backend = get_default_backend()\n+ if isinstance(backend, BaseBackend):\n+ backend_obj = backend\n+ else:\n+ backend_obj = backend_factory.get_backend(backend)\n \n are_nodes = [isinstance(t, network_components.BaseNode) for t in tensors]\n nodes = {t for t in tensors if isinstance(t, network_components.BaseNode)}\n- if not all([n.backend.name == backend for n in nodes]):\n- raise ValueError(\n- \"Some nodes have backends different from '{}'\".format(backend))\n+ if not all([n.backend.name == backend_obj.name for n in nodes]):\n+ raise ValueError(\"Some nodes have backends different from '{}'\".format(\n+ backend_obj.name))\n \n _tensors = []\n for t in tensors:\n@@ -102,7 +106,7 @@\n network_structure,\n con_order=con_order,\n out_order=out_order,\n- backend=backend)\n+ backend=backend_obj)\n \n nodes = set(nodes) # we don't need the ordering here\n \n@@ -160,7 +164,7 @@\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None,\n- backend: Optional[Text] = None\n+ backend: Optional[Union[Text, BaseBackend]] = None\n ) -> Tuple[List[network_components.BaseNode], List[network_components.Edge],\n List[network_components.Edge]]:\n r\"\"\"Creates a network from a list of tensors according to `tensors`.\n@@ -182,9 +186,8 @@\n network_structure: List of lists specifying the tensor network.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n- backend: String specifying the backend to use. Defaults to the default\n- TensorNetwork backend.\n-\n+ backend: String or BaseBackend object specifying the backend to use. \n+ Defaults to the default TensorNetwork backend.\n Returns:\n nodes: List of constructed nodes in the same order as given in `tensors`.\n con_edges: List of internal `Edge` objects in contraction order.\n@@ -247,8 +250,9 @@\n \n \n def _build_network(\n- tensors: Sequence[Tensor], network_structure: Sequence[Sequence],\n- backend: Text\n+ tensors: Sequence[Tensor],\n+ network_structure: Sequence[Sequence],\n+ backend: Optional[Union[BaseBackend, Text]] = None,\n ) -> Tuple[List[network_components.BaseNode], Dict[Any,\n network_components.Edge]]:\n nodes = []\n", "issue": "ncon should take a BaseBackend object\nSince we are now supporting passing `BaseBackend` to `Node` constructors, we should also\r\nallow it in `ncon`\n", "before_files": [{"content": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"NCON interface to TensorNetwork.\"\"\"\n\nimport warnings\nfrom typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict\nfrom tensornetwork import network_components\nfrom tensornetwork.backend_contextmanager import get_default_backend\nfrom tensornetwork.backends import backend_factory\nTensor = Any\n\n\ndef ncon(\n tensors: Sequence[Union[network_components.BaseNode, Tensor]],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None,\n backend: Optional[Text] = None\n) -> Union[network_components.BaseNode, Tensor]:\n r\"\"\"Contracts a list of tensors or nodes according to a tensor network \n specification.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be nonzero integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. `[-1,-2,-3,...]`.\n\n For example, matrix multiplication:\n\n .. code-block:: python\n\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n B = np.array([[1.0, 1.0], [0.0, 1.0]])\n ncon([A,B], [(-1, 1), (1, -2)])\n\n Matrix trace:\n\n .. code-block:: python\n\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n ncon([A], [(1, 1)]) # 5.0\n\n Note: \n The reason `0` is not allowed as an edge label without manually\n specifying the contraction order is to maintain compatibility with the\n `original NCON implementation`_. However, the use of `0` in `con_order` \n to denote outer products is not (currently) \n supported in this implementation.\n \n .. _original NCON implementation:\n https://arxiv.org/abs/1402.0939\n\n Args:\n tensors: List of `Tensors` or `BaseNodes`.\n network_structure: List of lists specifying the tensor network structure.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n backend: String specifying the backend to use. Defaults to\n `tensornetwork.backend_contextmanager.get_default_backend`.\n\n Returns:\n The result of the contraction. The result is returned as a `Node`\n if all elements of `tensors` are `BaseNode` objects, else\n it is returned as a `Tensor` object.\n \"\"\"\n if backend and (backend not in backend_factory._BACKENDS):\n raise ValueError(\"Backend '{}' does not exist\".format(backend))\n if backend is None:\n backend = get_default_backend()\n\n are_nodes = [isinstance(t, network_components.BaseNode) for t in tensors]\n nodes = {t for t in tensors if isinstance(t, network_components.BaseNode)}\n if not all([n.backend.name == backend for n in nodes]):\n raise ValueError(\n \"Some nodes have backends different from '{}'\".format(backend))\n\n _tensors = []\n for t in tensors:\n if isinstance(t, network_components.BaseNode):\n _tensors.append(t.tensor)\n else:\n _tensors.append(t)\n\n nodes, con_edges, out_edges = ncon_network(\n _tensors,\n network_structure,\n con_order=con_order,\n out_order=out_order,\n backend=backend)\n\n nodes = set(nodes) # we don't need the ordering here\n\n # Reverse the list so we can pop from the end: O(1).\n con_edges = con_edges[::-1]\n while con_edges:\n nodes_to_contract = con_edges[-1].get_nodes()\n edges_to_contract = network_components.get_shared_edges(*nodes_to_contract)\n\n # Eat up all parallel edges that are adjacent in the ordering.\n adjacent_parallel_edges = set()\n for edge in reversed(con_edges):\n if edge in edges_to_contract:\n adjacent_parallel_edges.add(edge)\n else:\n break\n con_edges = con_edges[:-len(adjacent_parallel_edges)]\n\n # In an optimal ordering, all edges connecting a given pair of nodes are\n # adjacent in con_order. If this is not the case, warn the user.\n leftovers = edges_to_contract - adjacent_parallel_edges\n if leftovers:\n warnings.warn(\n \"Suboptimal ordering detected. Edges {} are not adjacent in the \"\n \"contraction order to edges {}, connecting nodes {}. Deviating from \"\n \"the specified ordering!\".format(\n list(map(str, leftovers)),\n list(map(str, adjacent_parallel_edges)),\n list(map(str, nodes_to_contract))))\n con_edges = [e for e in con_edges if e not in edges_to_contract]\n\n if set(nodes_to_contract) == nodes:\n # This contraction produces the final output, so order the edges\n # here to avoid transposes in some cases.\n contraction_output_order = out_edges\n else:\n contraction_output_order = None\n\n nodes = nodes - set(nodes_to_contract)\n nodes.add(\n network_components.contract_between(\n *nodes_to_contract,\n name=\"con({},{})\".format(*nodes_to_contract),\n output_edge_order=contraction_output_order))\n\n # TODO: More efficient ordering of products based on out_edges\n res_node = network_components.outer_product_final_nodes(nodes, out_edges)\n if all(are_nodes):\n return res_node\n return res_node.tensor\n\n\ndef ncon_network(\n tensors: Sequence[Tensor],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None,\n backend: Optional[Text] = None\n) -> Tuple[List[network_components.BaseNode], List[network_components.Edge],\n List[network_components.Edge]]:\n r\"\"\"Creates a network from a list of tensors according to `tensors`.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. `[-1,-2,-3,...]`.\n\n This is used internally by `ncon()`.\n\n Args:\n tensors: List of `Tensor`s.\n network_structure: List of lists specifying the tensor network.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n backend: String specifying the backend to use. Defaults to the default\n TensorNetwork backend.\n\n Returns:\n nodes: List of constructed nodes in the same order as given in `tensors`.\n con_edges: List of internal `Edge` objects in contraction order.\n out_edges: List of dangling `Edge` objects in output order.\n \"\"\"\n if len(tensors) != len(network_structure):\n raise ValueError('len(tensors) != len(network_structure)')\n\n nodes, edges = _build_network(tensors, network_structure, backend)\n\n if con_order is None:\n try:\n con_order = sorted((k for k in edges if k >= 0))\n if con_order and con_order[0] == 0:\n raise ValueError(\"'0' is not a valid edge label when the \"\n \"contraction order is not specified separately.\")\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(con_order) != len(set(con_order)):\n raise ValueError(\"Duplicate labels in con_order: {}\".format(con_order))\n\n if out_order is None:\n try:\n out_order = sorted((k for k in edges if k < 0), reverse=True)\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(out_order) != len(set(out_order)):\n raise ValueError(\"Duplicate labels in out_order: {}\".format(out_order))\n\n try:\n con_edges = [edges[k] for k in con_order]\n out_edges = [edges[k] for k in out_order]\n except KeyError as err:\n raise ValueError(\"Order contained an unknown edge label: {}\".format(\n err.args[0]))\n\n if len(con_edges) + len(out_edges) != len(edges):\n raise ValueError(\n \"Edges {} were not included in the contraction and output \"\n \"ordering.\".format(\n list(set(edges.keys()) - set(con_order) - set(out_order))))\n\n for e in con_edges:\n if e.is_dangling():\n raise ValueError(\n \"Contraction edge {} appears only once in the network.\".format(\n str(e)))\n\n for e in out_edges:\n if not e.is_dangling():\n raise ValueError(\n \"Output edge {} appears more than once in the network.\".format(\n str(e)))\n\n return nodes, con_edges, out_edges\n\n\ndef _build_network(\n tensors: Sequence[Tensor], network_structure: Sequence[Sequence],\n backend: Text\n) -> Tuple[List[network_components.BaseNode], Dict[Any,\n network_components.Edge]]:\n nodes = []\n edges = {}\n for i, (tensor, edge_lbls) in enumerate(zip(tensors, network_structure)):\n if len(tensor.shape) != len(edge_lbls):\n raise ValueError(\n \"Incorrect number of edge labels specified tensor {}\".format(i))\n if isinstance(tensor, network_components.BaseNode):\n node = tensor\n else:\n node = network_components.Node(\n tensor, name=\"tensor_{}\".format(i), backend=backend)\n\n nodes.append(node)\n\n for (axis_num, edge_lbl) in enumerate(edge_lbls):\n if edge_lbl not in edges:\n e = node[axis_num]\n e.set_name(str(edge_lbl))\n edges[edge_lbl] = e\n else:\n # This will raise an error if the edges are not dangling.\n e = network_components.connect(\n edges[edge_lbl], node[axis_num], name=str(edge_lbl))\n edges[edge_lbl] = e\n return nodes, edges\n", "path": "tensornetwork/ncon_interface.py"}], "after_files": [{"content": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"NCON interface to TensorNetwork.\"\"\"\n\nimport warnings\nfrom typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict\nfrom tensornetwork import network_components\nfrom tensornetwork.backend_contextmanager import get_default_backend\nfrom tensornetwork.backends import backend_factory\nfrom tensornetwork.backends.base_backend import BaseBackend\n\nTensor = Any\n\n\ndef ncon(\n tensors: Sequence[Union[network_components.BaseNode, Tensor]],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None,\n backend: Optional[Union[Text, BaseBackend]] = None\n) -> Union[network_components.BaseNode, Tensor]:\n r\"\"\"Contracts a list of tensors or nodes according to a tensor network \n specification.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be nonzero integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. `[-1,-2,-3,...]`.\n\n For example, matrix multiplication:\n\n .. code-block:: python\n\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n B = np.array([[1.0, 1.0], [0.0, 1.0]])\n ncon([A,B], [(-1, 1), (1, -2)])\n\n Matrix trace:\n\n .. code-block:: python\n\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n ncon([A], [(1, 1)]) # 5.0\n\n Note: \n The reason `0` is not allowed as an edge label without manually\n specifying the contraction order is to maintain compatibility with the\n `original NCON implementation`_. However, the use of `0` in `con_order` \n to denote outer products is not (currently) \n supported in this implementation.\n \n .. _original NCON implementation:\n https://arxiv.org/abs/1402.0939\n\n Args:\n tensors: List of `Tensors` or `BaseNodes`.\n network_structure: List of lists specifying the tensor network structure.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n backend: String specifying the backend to use. Defaults to\n `tensornetwork.backend_contextmanager.get_default_backend`.\n\n Returns:\n The result of the contraction. The result is returned as a `Node`\n if all elements of `tensors` are `BaseNode` objects, else\n it is returned as a `Tensor` object.\n \"\"\"\n if backend is None:\n backend = get_default_backend()\n if isinstance(backend, BaseBackend):\n backend_obj = backend\n else:\n backend_obj = backend_factory.get_backend(backend)\n\n are_nodes = [isinstance(t, network_components.BaseNode) for t in tensors]\n nodes = {t for t in tensors if isinstance(t, network_components.BaseNode)}\n if not all([n.backend.name == backend_obj.name for n in nodes]):\n raise ValueError(\"Some nodes have backends different from '{}'\".format(\n backend_obj.name))\n\n _tensors = []\n for t in tensors:\n if isinstance(t, network_components.BaseNode):\n _tensors.append(t.tensor)\n else:\n _tensors.append(t)\n\n nodes, con_edges, out_edges = ncon_network(\n _tensors,\n network_structure,\n con_order=con_order,\n out_order=out_order,\n backend=backend_obj)\n\n nodes = set(nodes) # we don't need the ordering here\n\n # Reverse the list so we can pop from the end: O(1).\n con_edges = con_edges[::-1]\n while con_edges:\n nodes_to_contract = con_edges[-1].get_nodes()\n edges_to_contract = network_components.get_shared_edges(*nodes_to_contract)\n\n # Eat up all parallel edges that are adjacent in the ordering.\n adjacent_parallel_edges = set()\n for edge in reversed(con_edges):\n if edge in edges_to_contract:\n adjacent_parallel_edges.add(edge)\n else:\n break\n con_edges = con_edges[:-len(adjacent_parallel_edges)]\n\n # In an optimal ordering, all edges connecting a given pair of nodes are\n # adjacent in con_order. If this is not the case, warn the user.\n leftovers = edges_to_contract - adjacent_parallel_edges\n if leftovers:\n warnings.warn(\n \"Suboptimal ordering detected. Edges {} are not adjacent in the \"\n \"contraction order to edges {}, connecting nodes {}. Deviating from \"\n \"the specified ordering!\".format(\n list(map(str, leftovers)),\n list(map(str, adjacent_parallel_edges)),\n list(map(str, nodes_to_contract))))\n con_edges = [e for e in con_edges if e not in edges_to_contract]\n\n if set(nodes_to_contract) == nodes:\n # This contraction produces the final output, so order the edges\n # here to avoid transposes in some cases.\n contraction_output_order = out_edges\n else:\n contraction_output_order = None\n\n nodes = nodes - set(nodes_to_contract)\n nodes.add(\n network_components.contract_between(\n *nodes_to_contract,\n name=\"con({},{})\".format(*nodes_to_contract),\n output_edge_order=contraction_output_order))\n\n # TODO: More efficient ordering of products based on out_edges\n res_node = network_components.outer_product_final_nodes(nodes, out_edges)\n if all(are_nodes):\n return res_node\n return res_node.tensor\n\n\ndef ncon_network(\n tensors: Sequence[Tensor],\n network_structure: Sequence[Sequence],\n con_order: Optional[Sequence] = None,\n out_order: Optional[Sequence] = None,\n backend: Optional[Union[Text, BaseBackend]] = None\n) -> Tuple[List[network_components.BaseNode], List[network_components.Edge],\n List[network_components.Edge]]:\n r\"\"\"Creates a network from a list of tensors according to `tensors`.\n\n The network is provided as a list of lists, one for each\n tensor, specifying labels for the edges connected to that tensor.\n\n If a contraction order `con_order` and an output order `out_order`\n are both provided, the edge labels can be anything.\n Otherwise (`con_order == None or out_order == None`), the edge labels \n must be integers and edges will be contracted in ascending order.\n Negative integers denote the (dangling) indices of the output tensor,\n which will be in descending order, e.g. `[-1,-2,-3,...]`.\n\n This is used internally by `ncon()`.\n\n Args:\n tensors: List of `Tensor`s.\n network_structure: List of lists specifying the tensor network.\n con_order: List of edge labels specifying the contraction order.\n out_order: List of edge labels specifying the output order.\n backend: String or BaseBackend object specifying the backend to use. \n Defaults to the default TensorNetwork backend.\n Returns:\n nodes: List of constructed nodes in the same order as given in `tensors`.\n con_edges: List of internal `Edge` objects in contraction order.\n out_edges: List of dangling `Edge` objects in output order.\n \"\"\"\n if len(tensors) != len(network_structure):\n raise ValueError('len(tensors) != len(network_structure)')\n\n nodes, edges = _build_network(tensors, network_structure, backend)\n\n if con_order is None:\n try:\n con_order = sorted((k for k in edges if k >= 0))\n if con_order and con_order[0] == 0:\n raise ValueError(\"'0' is not a valid edge label when the \"\n \"contraction order is not specified separately.\")\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(con_order) != len(set(con_order)):\n raise ValueError(\"Duplicate labels in con_order: {}\".format(con_order))\n\n if out_order is None:\n try:\n out_order = sorted((k for k in edges if k < 0), reverse=True)\n except TypeError:\n raise ValueError(\"Non-integer edge label(s): {}\".format(\n list(edges.keys())))\n else:\n if len(out_order) != len(set(out_order)):\n raise ValueError(\"Duplicate labels in out_order: {}\".format(out_order))\n\n try:\n con_edges = [edges[k] for k in con_order]\n out_edges = [edges[k] for k in out_order]\n except KeyError as err:\n raise ValueError(\"Order contained an unknown edge label: {}\".format(\n err.args[0]))\n\n if len(con_edges) + len(out_edges) != len(edges):\n raise ValueError(\n \"Edges {} were not included in the contraction and output \"\n \"ordering.\".format(\n list(set(edges.keys()) - set(con_order) - set(out_order))))\n\n for e in con_edges:\n if e.is_dangling():\n raise ValueError(\n \"Contraction edge {} appears only once in the network.\".format(\n str(e)))\n\n for e in out_edges:\n if not e.is_dangling():\n raise ValueError(\n \"Output edge {} appears more than once in the network.\".format(\n str(e)))\n\n return nodes, con_edges, out_edges\n\n\ndef _build_network(\n tensors: Sequence[Tensor],\n network_structure: Sequence[Sequence],\n backend: Optional[Union[BaseBackend, Text]] = None,\n) -> Tuple[List[network_components.BaseNode], Dict[Any,\n network_components.Edge]]:\n nodes = []\n edges = {}\n for i, (tensor, edge_lbls) in enumerate(zip(tensors, network_structure)):\n if len(tensor.shape) != len(edge_lbls):\n raise ValueError(\n \"Incorrect number of edge labels specified tensor {}\".format(i))\n if isinstance(tensor, network_components.BaseNode):\n node = tensor\n else:\n node = network_components.Node(\n tensor, name=\"tensor_{}\".format(i), backend=backend)\n\n nodes.append(node)\n\n for (axis_num, edge_lbl) in enumerate(edge_lbls):\n if edge_lbl not in edges:\n e = node[axis_num]\n e.set_name(str(edge_lbl))\n edges[edge_lbl] = e\n else:\n # This will raise an error if the edges are not dangling.\n e = network_components.connect(\n edges[edge_lbl], node[axis_num], name=str(edge_lbl))\n edges[edge_lbl] = e\n return nodes, edges\n", "path": "tensornetwork/ncon_interface.py"}]} | 3,499 | 860 |
gh_patches_debug_13901 | rasdani/github-patches | git_diff | fossasia__open-event-server-1396 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Category/type hashtags are not clickable
Make the category/top hashtags under each event element in home page and search page clickable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `open_event/views/admin/home.py`
Content:
```
1 """Copyright 2015 Rafal Kowalski"""
2 import logging
3 import os
4 from urllib2 import urlopen
5
6 from flask import url_for, redirect, request, session, send_from_directory
7 from flask.ext import login
8 from flask_admin import expose
9 from flask_admin.base import AdminIndexView
10 from flask.ext.scrypt import generate_password_hash
11 from wtforms import ValidationError
12
13 from open_event.helpers.flask_helpers import get_real_ip, slugify
14 from ...helpers.data import DataManager, save_to_db, get_google_auth, get_facebook_auth, create_user_password, \
15 user_logged_in, record_activity
16 from ...helpers.data_getter import DataGetter
17 from ...helpers.helpers import send_email_with_reset_password_hash, send_email_confirmation, \
18 get_serializer, get_request_stats
19 from open_event.helpers.oauth import OAuth, FbOAuth
20 from open_event.models.user import User
21 import geoip2.database
22
23 def intended_url():
24 return request.args.get('next') or url_for('.index')
25
26 def record_user_login_logout(template, user):
27 req_stats = get_request_stats()
28 record_activity(
29 template,
30 user=user,
31 **req_stats
32 )
33
34 class MyHomeView(AdminIndexView):
35
36 @expose('/')
37 def index(self):
38 call_for_speakers_events = DataGetter.get_call_for_speakers_events().limit(12).all()
39 upcoming_events = DataGetter.get_all_published_events().limit(12).all()
40 return self.render('gentelella/index.html',
41 call_for_speakers_events=call_for_speakers_events,
42 upcoming_events=upcoming_events)
43
44 @expose('/login/', methods=('GET', 'POST'))
45 def login_view(self):
46 if request.method == 'GET':
47 google = get_google_auth()
48 auth_url, state = google.authorization_url(OAuth.get_auth_uri(), access_type='offline')
49 session['oauth_state'] = state
50
51 # Add Facebook Oauth 2.0 login
52 facebook = get_facebook_auth()
53 fb_auth_url, state = facebook.authorization_url(FbOAuth.get_auth_uri(), access_type='offline')
54 session['fb_oauth_state'] = state
55 return self.render('/gentelella/admin/login/login.html', auth_url=auth_url, fb_auth_url=fb_auth_url)
56 if request.method == 'POST':
57 email = request.form['email']
58 user = DataGetter.get_user_by_email(email)
59 if user is None:
60 logging.info('No such user')
61 return redirect(url_for('admin.login_view'))
62 if user.password != generate_password_hash(request.form['password'], user.salt):
63 logging.info('Password Incorrect')
64 return redirect(url_for('admin.login_view'))
65 login.login_user(user)
66 record_user_login_logout('user_login', user)
67 logging.info('logged successfully')
68 user_logged_in(user)
69 return redirect(intended_url())
70
71 @expose('/register/', methods=('GET', 'POST'))
72 def register_view(self):
73 """Register view page"""
74 if request.method == 'GET':
75 return self.render('/gentelella/admin/login/register.html')
76 if request.method == 'POST':
77 users = DataGetter.get_all_users()
78 for user in users:
79 if user.email == request.form['email']:
80 raise ValidationError('Email already exists')
81 logging.info("Registration under process")
82 s = get_serializer()
83 data = [request.form['email'], request.form['password']]
84 user = DataManager.create_user(data)
85 form_hash = s.dumps(data)
86 link = url_for('.create_account_after_confirmation_view', hash=form_hash, _external=True)
87 send_email_confirmation(request.form, link)
88 login.login_user(user)
89 record_user_login_logout('user_login', user)
90 logging.info('logged successfully')
91 user_logged_in(user)
92 return redirect(intended_url())
93
94 @expose('/account/create/<hash>', methods=('GET',))
95 def create_account_after_confirmation_view(self, hash):
96 s = get_serializer()
97 data = s.loads(hash)
98 user = User.query.filter_by(email=data[0]).first()
99 user.is_verified = True
100 save_to_db(user, 'User updated')
101 login.login_user(user)
102 record_user_login_logout('user_login', user)
103 user_logged_in(user)
104 return redirect(intended_url())
105
106 @expose('/password/new/<email>', methods=('GET', 'POST'))
107 def create_password_after_oauth_login(self, email):
108 s = get_serializer()
109 email = s.loads(email)
110 user = DataGetter.get_user_by_email(email)
111 if request.method == 'GET':
112 return self.render('/gentelella/admin/login/create_password.html')
113 if request.method == 'POST':
114 user = create_user_password(request.form, user)
115 if user is not None:
116 login.login_user(user)
117 record_user_login_logout('user_login', user)
118 user_logged_in(user)
119 return redirect(intended_url())
120
121 @expose('/password/reset', methods=('GET', 'POST'))
122 def password_reset_view(self):
123 """Password reset view"""
124 if request.method == 'GET':
125 return self.render('/gentelella/admin/login/password_reminder.html')
126 if request.method == 'POST':
127 email = request.form['email']
128 user = DataGetter.get_user_by_email(email)
129 if user:
130 link = request.host + url_for(".change_password_view", hash=user.reset_password)
131 send_email_with_reset_password_hash(email, link)
132 return redirect(intended_url())
133
134 @expose('/reset_password/<hash>', methods=('GET', 'POST'))
135 def change_password_view(self, hash):
136 """Change password view"""
137 if request.method == 'GET':
138 return self.render('/gentelella/admin/login/change_password.html')
139 if request.method == 'POST':
140 DataManager.reset_password(request.form, hash)
141 return redirect(url_for('.index'))
142
143 @expose('/logout/')
144 def logout_view(self):
145 """Logout method which redirect to index"""
146 record_user_login_logout('user_logout', login.current_user)
147 login.logout_user()
148 return redirect(url_for('.index'))
149
150 @expose('/set_role', methods=('GET', 'POST'))
151 def set_role(self):
152 """Set user role method"""
153 id = request.args['id']
154 role = request.args['roles']
155 user = DataGetter.get_user(id)
156 user.role = role
157 save_to_db(user, "User Role updated")
158 return redirect(url_for('.roles_manager'))
159
160 @expose('/manage_roles')
161 def roles_manager(self):
162 """Roles manager view"""
163 users = DataGetter.get_all_users()
164 events = DataGetter.get_all_events()
165 return self.render('admin/role_manager.html',
166 users=users,
167 events=events)
168
169 @expose('/sessions/', methods=('GET',))
170 def view_user_sessions(self):
171 sessions = DataGetter.get_user_sessions()
172 return self.render('/gentelella/admin/session/user_sessions.html',
173 sessions=sessions)
174
175 @expose('/forbidden/', methods=('GET',))
176 def forbidden_view(self):
177 return self.render('/gentelella/admin/forbidden.html')
178
179 @expose('/browse/', methods=('GET',))
180 def browse_view(self):
181 try:
182 reader = geoip2.database.Reader(os.path.realpath('.') + '/static/data/GeoLite2-Country.mmdb')
183 ip = get_real_ip()
184 if ip == '127.0.0.1' or ip == '0.0.0.0':
185 ip = urlopen('http://ip.42.pl/raw').read() # On local test environments
186 response = reader.country(ip)
187 country = response.country.name
188 except:
189 country = "United States"
190 return redirect(url_for('explore.explore_view', location=slugify(country)))
191
192
193
194
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/open_event/views/admin/home.py b/open_event/views/admin/home.py
--- a/open_event/views/admin/home.py
+++ b/open_event/views/admin/home.py
@@ -1,6 +1,7 @@
"""Copyright 2015 Rafal Kowalski"""
import logging
import os
+import urllib
from urllib2 import urlopen
from flask import url_for, redirect, request, session, send_from_directory
@@ -187,8 +188,5 @@
country = response.country.name
except:
country = "United States"
- return redirect(url_for('explore.explore_view', location=slugify(country)))
-
-
-
-
+ return redirect(url_for('explore.explore_view', location=slugify(country)) + '?' +
+ urllib.urlencode(request.args))
| {"golden_diff": "diff --git a/open_event/views/admin/home.py b/open_event/views/admin/home.py\n--- a/open_event/views/admin/home.py\n+++ b/open_event/views/admin/home.py\n@@ -1,6 +1,7 @@\n \"\"\"Copyright 2015 Rafal Kowalski\"\"\"\n import logging\n import os\n+import urllib\n from urllib2 import urlopen\n \n from flask import url_for, redirect, request, session, send_from_directory\n@@ -187,8 +188,5 @@\n country = response.country.name\n except:\n country = \"United States\"\n- return redirect(url_for('explore.explore_view', location=slugify(country)))\n-\n-\n-\n-\n+ return redirect(url_for('explore.explore_view', location=slugify(country)) + '?' +\n+ urllib.urlencode(request.args))\n", "issue": "Category/type hashtags are not clickable \nMake the category/top hashtags under each event element in home page and search page clickable.\n\n", "before_files": [{"content": "\"\"\"Copyright 2015 Rafal Kowalski\"\"\"\nimport logging\nimport os\nfrom urllib2 import urlopen\n\nfrom flask import url_for, redirect, request, session, send_from_directory\nfrom flask.ext import login\nfrom flask_admin import expose\nfrom flask_admin.base import AdminIndexView\nfrom flask.ext.scrypt import generate_password_hash\nfrom wtforms import ValidationError\n\nfrom open_event.helpers.flask_helpers import get_real_ip, slugify\nfrom ...helpers.data import DataManager, save_to_db, get_google_auth, get_facebook_auth, create_user_password, \\\n user_logged_in, record_activity\nfrom ...helpers.data_getter import DataGetter\nfrom ...helpers.helpers import send_email_with_reset_password_hash, send_email_confirmation, \\\n get_serializer, get_request_stats\nfrom open_event.helpers.oauth import OAuth, FbOAuth\nfrom open_event.models.user import User\nimport geoip2.database\n\ndef intended_url():\n return request.args.get('next') or url_for('.index')\n\ndef record_user_login_logout(template, user):\n req_stats = get_request_stats()\n record_activity(\n template,\n user=user,\n **req_stats\n )\n\nclass MyHomeView(AdminIndexView):\n\n @expose('/')\n def index(self):\n call_for_speakers_events = DataGetter.get_call_for_speakers_events().limit(12).all()\n upcoming_events = DataGetter.get_all_published_events().limit(12).all()\n return self.render('gentelella/index.html',\n call_for_speakers_events=call_for_speakers_events,\n upcoming_events=upcoming_events)\n\n @expose('/login/', methods=('GET', 'POST'))\n def login_view(self):\n if request.method == 'GET':\n google = get_google_auth()\n auth_url, state = google.authorization_url(OAuth.get_auth_uri(), access_type='offline')\n session['oauth_state'] = state\n\n # Add Facebook Oauth 2.0 login\n facebook = get_facebook_auth()\n fb_auth_url, state = facebook.authorization_url(FbOAuth.get_auth_uri(), access_type='offline')\n session['fb_oauth_state'] = state\n return self.render('/gentelella/admin/login/login.html', auth_url=auth_url, fb_auth_url=fb_auth_url)\n if request.method == 'POST':\n email = request.form['email']\n user = DataGetter.get_user_by_email(email)\n if user is None:\n logging.info('No such user')\n return redirect(url_for('admin.login_view'))\n if user.password != generate_password_hash(request.form['password'], user.salt):\n logging.info('Password Incorrect')\n return redirect(url_for('admin.login_view'))\n login.login_user(user)\n record_user_login_logout('user_login', user)\n logging.info('logged successfully')\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/register/', methods=('GET', 'POST'))\n def register_view(self):\n \"\"\"Register view page\"\"\"\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/register.html')\n if request.method == 'POST':\n users = DataGetter.get_all_users()\n for user in users:\n if user.email == request.form['email']:\n raise ValidationError('Email already exists')\n logging.info(\"Registration under process\")\n s = get_serializer()\n data = [request.form['email'], request.form['password']]\n user = DataManager.create_user(data)\n form_hash = s.dumps(data)\n link = url_for('.create_account_after_confirmation_view', hash=form_hash, _external=True)\n send_email_confirmation(request.form, link)\n login.login_user(user)\n record_user_login_logout('user_login', user)\n logging.info('logged successfully')\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/account/create/<hash>', methods=('GET',))\n def create_account_after_confirmation_view(self, hash):\n s = get_serializer()\n data = s.loads(hash)\n user = User.query.filter_by(email=data[0]).first()\n user.is_verified = True\n save_to_db(user, 'User updated')\n login.login_user(user)\n record_user_login_logout('user_login', user)\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/password/new/<email>', methods=('GET', 'POST'))\n def create_password_after_oauth_login(self, email):\n s = get_serializer()\n email = s.loads(email)\n user = DataGetter.get_user_by_email(email)\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/create_password.html')\n if request.method == 'POST':\n user = create_user_password(request.form, user)\n if user is not None:\n login.login_user(user)\n record_user_login_logout('user_login', user)\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/password/reset', methods=('GET', 'POST'))\n def password_reset_view(self):\n \"\"\"Password reset view\"\"\"\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/password_reminder.html')\n if request.method == 'POST':\n email = request.form['email']\n user = DataGetter.get_user_by_email(email)\n if user:\n link = request.host + url_for(\".change_password_view\", hash=user.reset_password)\n send_email_with_reset_password_hash(email, link)\n return redirect(intended_url())\n\n @expose('/reset_password/<hash>', methods=('GET', 'POST'))\n def change_password_view(self, hash):\n \"\"\"Change password view\"\"\"\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/change_password.html')\n if request.method == 'POST':\n DataManager.reset_password(request.form, hash)\n return redirect(url_for('.index'))\n\n @expose('/logout/')\n def logout_view(self):\n \"\"\"Logout method which redirect to index\"\"\"\n record_user_login_logout('user_logout', login.current_user)\n login.logout_user()\n return redirect(url_for('.index'))\n\n @expose('/set_role', methods=('GET', 'POST'))\n def set_role(self):\n \"\"\"Set user role method\"\"\"\n id = request.args['id']\n role = request.args['roles']\n user = DataGetter.get_user(id)\n user.role = role\n save_to_db(user, \"User Role updated\")\n return redirect(url_for('.roles_manager'))\n\n @expose('/manage_roles')\n def roles_manager(self):\n \"\"\"Roles manager view\"\"\"\n users = DataGetter.get_all_users()\n events = DataGetter.get_all_events()\n return self.render('admin/role_manager.html',\n users=users,\n events=events)\n\n @expose('/sessions/', methods=('GET',))\n def view_user_sessions(self):\n sessions = DataGetter.get_user_sessions()\n return self.render('/gentelella/admin/session/user_sessions.html',\n sessions=sessions)\n\n @expose('/forbidden/', methods=('GET',))\n def forbidden_view(self):\n return self.render('/gentelella/admin/forbidden.html')\n\n @expose('/browse/', methods=('GET',))\n def browse_view(self):\n try:\n reader = geoip2.database.Reader(os.path.realpath('.') + '/static/data/GeoLite2-Country.mmdb')\n ip = get_real_ip()\n if ip == '127.0.0.1' or ip == '0.0.0.0':\n ip = urlopen('http://ip.42.pl/raw').read() # On local test environments\n response = reader.country(ip)\n country = response.country.name\n except:\n country = \"United States\"\n return redirect(url_for('explore.explore_view', location=slugify(country)))\n\n\n\n\n", "path": "open_event/views/admin/home.py"}], "after_files": [{"content": "\"\"\"Copyright 2015 Rafal Kowalski\"\"\"\nimport logging\nimport os\nimport urllib\nfrom urllib2 import urlopen\n\nfrom flask import url_for, redirect, request, session, send_from_directory\nfrom flask.ext import login\nfrom flask_admin import expose\nfrom flask_admin.base import AdminIndexView\nfrom flask.ext.scrypt import generate_password_hash\nfrom wtforms import ValidationError\n\nfrom open_event.helpers.flask_helpers import get_real_ip, slugify\nfrom ...helpers.data import DataManager, save_to_db, get_google_auth, get_facebook_auth, create_user_password, \\\n user_logged_in, record_activity\nfrom ...helpers.data_getter import DataGetter\nfrom ...helpers.helpers import send_email_with_reset_password_hash, send_email_confirmation, \\\n get_serializer, get_request_stats\nfrom open_event.helpers.oauth import OAuth, FbOAuth\nfrom open_event.models.user import User\nimport geoip2.database\n\ndef intended_url():\n return request.args.get('next') or url_for('.index')\n\ndef record_user_login_logout(template, user):\n req_stats = get_request_stats()\n record_activity(\n template,\n user=user,\n **req_stats\n )\n\nclass MyHomeView(AdminIndexView):\n\n @expose('/')\n def index(self):\n call_for_speakers_events = DataGetter.get_call_for_speakers_events().limit(12).all()\n upcoming_events = DataGetter.get_all_published_events().limit(12).all()\n return self.render('gentelella/index.html',\n call_for_speakers_events=call_for_speakers_events,\n upcoming_events=upcoming_events)\n\n @expose('/login/', methods=('GET', 'POST'))\n def login_view(self):\n if request.method == 'GET':\n google = get_google_auth()\n auth_url, state = google.authorization_url(OAuth.get_auth_uri(), access_type='offline')\n session['oauth_state'] = state\n\n # Add Facebook Oauth 2.0 login\n facebook = get_facebook_auth()\n fb_auth_url, state = facebook.authorization_url(FbOAuth.get_auth_uri(), access_type='offline')\n session['fb_oauth_state'] = state\n return self.render('/gentelella/admin/login/login.html', auth_url=auth_url, fb_auth_url=fb_auth_url)\n if request.method == 'POST':\n email = request.form['email']\n user = DataGetter.get_user_by_email(email)\n if user is None:\n logging.info('No such user')\n return redirect(url_for('admin.login_view'))\n if user.password != generate_password_hash(request.form['password'], user.salt):\n logging.info('Password Incorrect')\n return redirect(url_for('admin.login_view'))\n login.login_user(user)\n record_user_login_logout('user_login', user)\n logging.info('logged successfully')\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/register/', methods=('GET', 'POST'))\n def register_view(self):\n \"\"\"Register view page\"\"\"\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/register.html')\n if request.method == 'POST':\n users = DataGetter.get_all_users()\n for user in users:\n if user.email == request.form['email']:\n raise ValidationError('Email already exists')\n logging.info(\"Registration under process\")\n s = get_serializer()\n data = [request.form['email'], request.form['password']]\n user = DataManager.create_user(data)\n form_hash = s.dumps(data)\n link = url_for('.create_account_after_confirmation_view', hash=form_hash, _external=True)\n send_email_confirmation(request.form, link)\n login.login_user(user)\n record_user_login_logout('user_login', user)\n logging.info('logged successfully')\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/account/create/<hash>', methods=('GET',))\n def create_account_after_confirmation_view(self, hash):\n s = get_serializer()\n data = s.loads(hash)\n user = User.query.filter_by(email=data[0]).first()\n user.is_verified = True\n save_to_db(user, 'User updated')\n login.login_user(user)\n record_user_login_logout('user_login', user)\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/password/new/<email>', methods=('GET', 'POST'))\n def create_password_after_oauth_login(self, email):\n s = get_serializer()\n email = s.loads(email)\n user = DataGetter.get_user_by_email(email)\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/create_password.html')\n if request.method == 'POST':\n user = create_user_password(request.form, user)\n if user is not None:\n login.login_user(user)\n record_user_login_logout('user_login', user)\n user_logged_in(user)\n return redirect(intended_url())\n\n @expose('/password/reset', methods=('GET', 'POST'))\n def password_reset_view(self):\n \"\"\"Password reset view\"\"\"\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/password_reminder.html')\n if request.method == 'POST':\n email = request.form['email']\n user = DataGetter.get_user_by_email(email)\n if user:\n link = request.host + url_for(\".change_password_view\", hash=user.reset_password)\n send_email_with_reset_password_hash(email, link)\n return redirect(intended_url())\n\n @expose('/reset_password/<hash>', methods=('GET', 'POST'))\n def change_password_view(self, hash):\n \"\"\"Change password view\"\"\"\n if request.method == 'GET':\n return self.render('/gentelella/admin/login/change_password.html')\n if request.method == 'POST':\n DataManager.reset_password(request.form, hash)\n return redirect(url_for('.index'))\n\n @expose('/logout/')\n def logout_view(self):\n \"\"\"Logout method which redirect to index\"\"\"\n record_user_login_logout('user_logout', login.current_user)\n login.logout_user()\n return redirect(url_for('.index'))\n\n @expose('/set_role', methods=('GET', 'POST'))\n def set_role(self):\n \"\"\"Set user role method\"\"\"\n id = request.args['id']\n role = request.args['roles']\n user = DataGetter.get_user(id)\n user.role = role\n save_to_db(user, \"User Role updated\")\n return redirect(url_for('.roles_manager'))\n\n @expose('/manage_roles')\n def roles_manager(self):\n \"\"\"Roles manager view\"\"\"\n users = DataGetter.get_all_users()\n events = DataGetter.get_all_events()\n return self.render('admin/role_manager.html',\n users=users,\n events=events)\n\n @expose('/sessions/', methods=('GET',))\n def view_user_sessions(self):\n sessions = DataGetter.get_user_sessions()\n return self.render('/gentelella/admin/session/user_sessions.html',\n sessions=sessions)\n\n @expose('/forbidden/', methods=('GET',))\n def forbidden_view(self):\n return self.render('/gentelella/admin/forbidden.html')\n\n @expose('/browse/', methods=('GET',))\n def browse_view(self):\n try:\n reader = geoip2.database.Reader(os.path.realpath('.') + '/static/data/GeoLite2-Country.mmdb')\n ip = get_real_ip()\n if ip == '127.0.0.1' or ip == '0.0.0.0':\n ip = urlopen('http://ip.42.pl/raw').read() # On local test environments\n response = reader.country(ip)\n country = response.country.name\n except:\n country = \"United States\"\n return redirect(url_for('explore.explore_view', location=slugify(country)) + '?' +\n urllib.urlencode(request.args))\n", "path": "open_event/views/admin/home.py"}]} | 2,430 | 177 |
gh_patches_debug_23933 | rasdani/github-patches | git_diff | ietf-tools__datatracker-5109 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
secr.middleware.dbquery.QueryCountDebugMiddleware is unused.
Is this still needed in the source tree? Can it be removed?
cc @rpcross
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ietf/secr/middleware/dbquery.py`
Content:
```
1 # Copyright The IETF Trust 2014-2019, All Rights Reserved
2 #import logging
3
4 from django.db import connection
5 from django.utils.log import getLogger # type: ignore
6
7
8 logger = getLogger(__name__)
9 #logger.setLevel(logging.DEBUG)
10 #logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE))
11
12 class QueryCountDebugMiddleware(object):
13 """
14 This middleware will log the number of queries run
15 and the total time taken for each request (with a
16 status code of 200). It does not currently support
17 multi-db setups.
18 """
19 def process_response(self, request, response):
20 #assert False, request.path
21 logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries)))
22 if response.status_code == 200:
23 total_time = 0
24 #for query in connection.queries:
25 # query_time = query.get('time')
26 # if query_time is None:
27 # django-debug-toolbar monkeypatches the connection
28 # cursor wrapper and adds extra information in each
29 # item in connection.queries. The query time is stored
30 # under the key "duration" rather than "time" and is
31 # in milliseconds, not seconds.
32 # query_time = query.get('duration', 0) / 1000
33 # total_time += float(query_time)
34 logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time))
35 return response
36
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ietf/secr/middleware/dbquery.py b/ietf/secr/middleware/dbquery.py
deleted file mode 100644
--- a/ietf/secr/middleware/dbquery.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright The IETF Trust 2014-2019, All Rights Reserved
-#import logging
-
-from django.db import connection
-from django.utils.log import getLogger # type: ignore
-
-
-logger = getLogger(__name__)
-#logger.setLevel(logging.DEBUG)
-#logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE))
-
-class QueryCountDebugMiddleware(object):
- """
- This middleware will log the number of queries run
- and the total time taken for each request (with a
- status code of 200). It does not currently support
- multi-db setups.
- """
- def process_response(self, request, response):
- #assert False, request.path
- logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries)))
- if response.status_code == 200:
- total_time = 0
- #for query in connection.queries:
- # query_time = query.get('time')
- # if query_time is None:
- # django-debug-toolbar monkeypatches the connection
- # cursor wrapper and adds extra information in each
- # item in connection.queries. The query time is stored
- # under the key "duration" rather than "time" and is
- # in milliseconds, not seconds.
- # query_time = query.get('duration', 0) / 1000
- # total_time += float(query_time)
- logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time))
- return response
| {"golden_diff": "diff --git a/ietf/secr/middleware/dbquery.py b/ietf/secr/middleware/dbquery.py\ndeleted file mode 100644\n--- a/ietf/secr/middleware/dbquery.py\n+++ /dev/null\n@@ -1,35 +0,0 @@\n-# Copyright The IETF Trust 2014-2019, All Rights Reserved\n-#import logging\n-\n-from django.db import connection\n-from django.utils.log import getLogger # type: ignore\n-\n-\n-logger = getLogger(__name__)\n-#logger.setLevel(logging.DEBUG)\n-#logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE))\n-\n-class QueryCountDebugMiddleware(object):\n- \"\"\"\n- This middleware will log the number of queries run\n- and the total time taken for each request (with a\n- status code of 200). It does not currently support\n- multi-db setups.\n- \"\"\"\n- def process_response(self, request, response):\n- #assert False, request.path\n- logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries)))\n- if response.status_code == 200:\n- total_time = 0\n- #for query in connection.queries:\n- # query_time = query.get('time')\n- # if query_time is None:\n- # django-debug-toolbar monkeypatches the connection\n- # cursor wrapper and adds extra information in each\n- # item in connection.queries. The query time is stored\n- # under the key \"duration\" rather than \"time\" and is\n- # in milliseconds, not seconds.\n- # query_time = query.get('duration', 0) / 1000\n- # total_time += float(query_time)\n- logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time))\n- return response\n", "issue": "secr.middleware.dbquery.QueryCountDebugMiddleware is unused.\nIs this still needed in the source tree? Can it be removed?\r\ncc @rpcross\n", "before_files": [{"content": "# Copyright The IETF Trust 2014-2019, All Rights Reserved\n#import logging\n\nfrom django.db import connection\nfrom django.utils.log import getLogger # type: ignore\n\n\nlogger = getLogger(__name__)\n#logger.setLevel(logging.DEBUG)\n#logger.addHandler(logging.FileHandler(settings.SECR_LOG_FILE))\n\nclass QueryCountDebugMiddleware(object):\n \"\"\"\n This middleware will log the number of queries run\n and the total time taken for each request (with a\n status code of 200). It does not currently support\n multi-db setups.\n \"\"\"\n def process_response(self, request, response):\n #assert False, request.path\n logger.debug('called middleware. %s:%s' % (request.path,len(connection.queries)))\n if response.status_code == 200:\n total_time = 0\n #for query in connection.queries:\n # query_time = query.get('time')\n # if query_time is None:\n # django-debug-toolbar monkeypatches the connection\n # cursor wrapper and adds extra information in each\n # item in connection.queries. The query time is stored\n # under the key \"duration\" rather than \"time\" and is\n # in milliseconds, not seconds.\n # query_time = query.get('duration', 0) / 1000\n # total_time += float(query_time)\n logger.debug('%s: %s queries run, total %s seconds' % (request.path,len(connection.queries), total_time))\n return response\n", "path": "ietf/secr/middleware/dbquery.py"}], "after_files": [{"content": null, "path": "ietf/secr/middleware/dbquery.py"}]} | 691 | 426 |
gh_patches_debug_8071 | rasdani/github-patches | git_diff | cal-itp__benefits-2023 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`Eligibility index`: update copy to be more consistent and provide context for user
| Desktop (showing MST) | Mobile (showing SBMTD) |
| --- | --- |
|  |  |
## Figma links
- [Desktop_Eligibility](https://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(Full-Application)?type=design&node-id=14387-12551&mode=design&t=11PIPi0dRtWNuQmK-4)
- [Mobile_Eligibility](https://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(Full-Application)?type=design&node-id=14646-18505&mode=design&t=11PIPi0dRtWNuQmK-4)
## Acceptance Criteria
<!-- Remember to consider edge cases -->
- [x] Body copy: first body sentence was updated
- [x] Remove agency name in the copy for the radio buttons subheaders
- [x] “MST Courtesy Card” is now “Courtesy Card”
- [x] "SBMTD Reduced Fare Mobility ID" is now "Reduced Fare Mobility ID"
- [x] Radio button descriptions were updated and for SBMTD, mentions the previous name
- [x] CTA copy is updated
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/eligibility/forms.py`
Content:
```
1 """
2 The eligibility application: Form definition for the eligibility verification flow.
3 """
4
5 import logging
6
7 from django import forms
8 from django.utils.translation import gettext_lazy as _
9
10 from benefits.core import models, recaptcha, widgets
11
12 logger = logging.getLogger(__name__)
13
14
15 class EligibilityVerifierSelectionForm(forms.Form):
16 """Form to capture eligibility verifier selection."""
17
18 action_url = "eligibility:index"
19 id = "form-verifier-selection"
20 method = "POST"
21
22 verifier = forms.ChoiceField(label="", widget=widgets.VerifierRadioSelect)
23 # sets label to empty string so the radio_select template can override the label style
24 submit_value = _("Choose this Benefit")
25
26 def __init__(self, agency: models.TransitAgency, *args, **kwargs):
27 super().__init__(*args, **kwargs)
28 verifiers = agency.active_verifiers
29
30 self.classes = "col-lg-8"
31 # second element is not used since we render the whole label using selection_label_template,
32 # therefore set to None
33 self.fields["verifier"].choices = [(v.id, None) for v in verifiers]
34 self.fields["verifier"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}
35
36 def clean(self):
37 if not recaptcha.verify(self.data):
38 raise forms.ValidationError("reCAPTCHA failed")
39
40
41 class EligibilityVerificationForm(forms.Form):
42 """Form to collect eligibility verification details."""
43
44 action_url = "eligibility:confirm"
45 id = "form-eligibility-verification"
46 method = "POST"
47
48 submit_value = _("Check eligibility")
49 submitting_value = _("Checking")
50
51 _error_messages = {
52 "invalid": _("Check your input. The format looks wrong."),
53 "missing": _("This field is required."),
54 }
55
56 def __init__(
57 self,
58 title,
59 headline,
60 blurb,
61 name_label,
62 name_placeholder,
63 name_help_text,
64 sub_label,
65 sub_placeholder,
66 sub_help_text,
67 name_max_length=None,
68 sub_input_mode=None,
69 sub_max_length=None,
70 sub_pattern=None,
71 *args,
72 **kwargs,
73 ):
74 """Initialize a new EligibilityVerifier form.
75
76 Args:
77 title (str): The page (i.e. tab) title for the form's page.
78
79 headline (str): The <h1> on the form's page.
80
81 blurb (str): Intro <p> on the form's page.
82
83 name_label (str): Label for the name form field.
84
85 name_placeholder (str): Field placeholder for the name form field.
86
87 name_help_text (str): Extra help text for the name form field.
88
89 sub_label (str): Label for the sub form field.
90
91 sub_placeholder (str): Field placeholder for the sub form field.
92
93 sub_help_text (str): Extra help text for the sub form field.
94
95 name_max_length (int): The maximum length accepted for the 'name' API field before sending to this verifier
96
97 sub_input_mode (str): Input mode can be "numeric", "tel", "search", etc. to override default "text" keyboard on
98 mobile devices
99
100 sub_max_length (int): The maximum length accepted for the 'sub' API field before sending to this verifier
101
102 sub_pattern (str): A regular expression used to validate the 'sub' API field before sending to this verifier
103
104 Extra args and kwargs are passed through to the underlying django.forms.Form.
105 """
106 super().__init__(auto_id=True, label_suffix="", *args, **kwargs)
107
108 self.title = title
109 self.headline = headline
110 self.blurb = blurb
111
112 self.classes = "col-lg-6"
113 sub_widget = widgets.FormControlTextInput(placeholder=sub_placeholder)
114 if sub_pattern:
115 sub_widget.attrs.update({"pattern": sub_pattern})
116 if sub_input_mode:
117 sub_widget.attrs.update({"inputmode": sub_input_mode})
118 if sub_max_length:
119 sub_widget.attrs.update({"maxlength": sub_max_length})
120
121 self.fields["sub"] = forms.CharField(
122 label=sub_label,
123 widget=sub_widget,
124 help_text=sub_help_text,
125 )
126
127 name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)
128 if name_max_length:
129 name_widget.attrs.update({"maxlength": name_max_length})
130
131 self.fields["name"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)
132
133 def clean(self):
134 if not recaptcha.verify(self.data):
135 raise forms.ValidationError("reCAPTCHA failed")
136
137
138 class MSTCourtesyCard(EligibilityVerificationForm):
139 """EligibilityVerification form for the MST Courtesy Card."""
140
141 def __init__(self, *args, **kwargs):
142 super().__init__(
143 title=_("Agency card information"),
144 headline=_("Let’s see if we can confirm your eligibility."),
145 blurb=_("Please input your Courtesy Card number and last name below to confirm your eligibility."),
146 name_label=_("Last name (as it appears on Courtesy Card)"),
147 name_placeholder="Garcia",
148 name_help_text=_("We use this to help confirm your Courtesy Card."),
149 sub_label=_("MST Courtesy Card number"),
150 sub_help_text=_("This is a 5-digit number on the front and back of your card."),
151 sub_placeholder="12345",
152 name_max_length=255,
153 sub_input_mode="numeric",
154 sub_max_length=5,
155 sub_pattern=r"\d{5}",
156 *args,
157 **kwargs,
158 )
159
160
161 class SBMTDMobilityPass(EligibilityVerificationForm):
162 """EligibilityVerification form for the SBMTD Reduced Fare Mobility ID."""
163
164 def __init__(self, *args, **kwargs):
165 super().__init__(
166 title=_("Agency card information"),
167 headline=_("Let’s see if we can confirm your eligibility."),
168 blurb=_("Please input your Reduced Fare Mobility ID number and last name below to confirm your eligibility."),
169 name_label=_("Last name (as it appears on Reduced Fare Mobility ID card)"),
170 name_placeholder="Garcia",
171 name_help_text=_("We use this to help confirm your Reduced Fare Mobility ID."),
172 sub_label=_("SBMTD Reduced Fare Mobility ID number"),
173 sub_help_text=_("This is a 4-digit number on the back of your card."),
174 sub_placeholder="1234",
175 name_max_length=255,
176 sub_input_mode="numeric",
177 sub_max_length=4,
178 sub_pattern=r"\d{4}",
179 *args,
180 **kwargs,
181 )
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py
--- a/benefits/eligibility/forms.py
+++ b/benefits/eligibility/forms.py
@@ -21,7 +21,7 @@
verifier = forms.ChoiceField(label="", widget=widgets.VerifierRadioSelect)
# sets label to empty string so the radio_select template can override the label style
- submit_value = _("Choose this Benefit")
+ submit_value = _("Choose this benefit")
def __init__(self, agency: models.TransitAgency, *args, **kwargs):
super().__init__(*args, **kwargs)
| {"golden_diff": "diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py\n--- a/benefits/eligibility/forms.py\n+++ b/benefits/eligibility/forms.py\n@@ -21,7 +21,7 @@\n \n verifier = forms.ChoiceField(label=\"\", widget=widgets.VerifierRadioSelect)\n # sets label to empty string so the radio_select template can override the label style\n- submit_value = _(\"Choose this Benefit\")\n+ submit_value = _(\"Choose this benefit\")\n \n def __init__(self, agency: models.TransitAgency, *args, **kwargs):\n super().__init__(*args, **kwargs)\n", "issue": "`Eligibility index`: update copy to be more consistent and provide context for user\n| Desktop (showing MST) | Mobile (showing SBMTD) |\r\n| --- | --- |\r\n|  |  |\r\n\r\n\r\n## Figma links\r\n- [Desktop_Eligibility](https://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(Full-Application)?type=design&node-id=14387-12551&mode=design&t=11PIPi0dRtWNuQmK-4)\r\n- [Mobile_Eligibility](https://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(Full-Application)?type=design&node-id=14646-18505&mode=design&t=11PIPi0dRtWNuQmK-4)\r\n\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [x] Body copy: first body sentence was updated\r\n- [x] Remove agency name in the copy for the radio buttons subheaders\r\n - [x] \u201cMST Courtesy Card\u201d is now \u201cCourtesy Card\u201d\r\n - [x] \"SBMTD Reduced Fare Mobility ID\" is now \"Reduced Fare Mobility ID\"\r\n- [x] Radio button descriptions were updated and for SBMTD, mentions the previous name\r\n- [x] CTA copy is updated\r\n\n", "before_files": [{"content": "\"\"\"\nThe eligibility application: Form definition for the eligibility verification flow.\n\"\"\"\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom benefits.core import models, recaptcha, widgets\n\nlogger = logging.getLogger(__name__)\n\n\nclass EligibilityVerifierSelectionForm(forms.Form):\n \"\"\"Form to capture eligibility verifier selection.\"\"\"\n\n action_url = \"eligibility:index\"\n id = \"form-verifier-selection\"\n method = \"POST\"\n\n verifier = forms.ChoiceField(label=\"\", widget=widgets.VerifierRadioSelect)\n # sets label to empty string so the radio_select template can override the label style\n submit_value = _(\"Choose this Benefit\")\n\n def __init__(self, agency: models.TransitAgency, *args, **kwargs):\n super().__init__(*args, **kwargs)\n verifiers = agency.active_verifiers\n\n self.classes = \"col-lg-8\"\n # second element is not used since we render the whole label using selection_label_template,\n # therefore set to None\n self.fields[\"verifier\"].choices = [(v.id, None) for v in verifiers]\n self.fields[\"verifier\"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass EligibilityVerificationForm(forms.Form):\n \"\"\"Form to collect eligibility verification details.\"\"\"\n\n action_url = \"eligibility:confirm\"\n id = \"form-eligibility-verification\"\n method = \"POST\"\n\n submit_value = _(\"Check eligibility\")\n submitting_value = _(\"Checking\")\n\n _error_messages = {\n \"invalid\": _(\"Check your input. The format looks wrong.\"),\n \"missing\": _(\"This field is required.\"),\n }\n\n def __init__(\n self,\n title,\n headline,\n blurb,\n name_label,\n name_placeholder,\n name_help_text,\n sub_label,\n sub_placeholder,\n sub_help_text,\n name_max_length=None,\n sub_input_mode=None,\n sub_max_length=None,\n sub_pattern=None,\n *args,\n **kwargs,\n ):\n \"\"\"Initialize a new EligibilityVerifier form.\n\n Args:\n title (str): The page (i.e. tab) title for the form's page.\n\n headline (str): The <h1> on the form's page.\n\n blurb (str): Intro <p> on the form's page.\n\n name_label (str): Label for the name form field.\n\n name_placeholder (str): Field placeholder for the name form field.\n\n name_help_text (str): Extra help text for the name form field.\n\n sub_label (str): Label for the sub form field.\n\n sub_placeholder (str): Field placeholder for the sub form field.\n\n sub_help_text (str): Extra help text for the sub form field.\n\n name_max_length (int): The maximum length accepted for the 'name' API field before sending to this verifier\n\n sub_input_mode (str): Input mode can be \"numeric\", \"tel\", \"search\", etc. to override default \"text\" keyboard on\n mobile devices\n\n sub_max_length (int): The maximum length accepted for the 'sub' API field before sending to this verifier\n\n sub_pattern (str): A regular expression used to validate the 'sub' API field before sending to this verifier\n\n Extra args and kwargs are passed through to the underlying django.forms.Form.\n \"\"\"\n super().__init__(auto_id=True, label_suffix=\"\", *args, **kwargs)\n\n self.title = title\n self.headline = headline\n self.blurb = blurb\n\n self.classes = \"col-lg-6\"\n sub_widget = widgets.FormControlTextInput(placeholder=sub_placeholder)\n if sub_pattern:\n sub_widget.attrs.update({\"pattern\": sub_pattern})\n if sub_input_mode:\n sub_widget.attrs.update({\"inputmode\": sub_input_mode})\n if sub_max_length:\n sub_widget.attrs.update({\"maxlength\": sub_max_length})\n\n self.fields[\"sub\"] = forms.CharField(\n label=sub_label,\n widget=sub_widget,\n help_text=sub_help_text,\n )\n\n name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)\n if name_max_length:\n name_widget.attrs.update({\"maxlength\": name_max_length})\n\n self.fields[\"name\"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass MSTCourtesyCard(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the MST Courtesy Card.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n blurb=_(\"Please input your Courtesy Card number and last name below to confirm your eligibility.\"),\n name_label=_(\"Last name (as it appears on Courtesy Card)\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\"We use this to help confirm your Courtesy Card.\"),\n sub_label=_(\"MST Courtesy Card number\"),\n sub_help_text=_(\"This is a 5-digit number on the front and back of your card.\"),\n sub_placeholder=\"12345\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=5,\n sub_pattern=r\"\\d{5}\",\n *args,\n **kwargs,\n )\n\n\nclass SBMTDMobilityPass(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the SBMTD Reduced Fare Mobility ID.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n blurb=_(\"Please input your Reduced Fare Mobility ID number and last name below to confirm your eligibility.\"),\n name_label=_(\"Last name (as it appears on Reduced Fare Mobility ID card)\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\"We use this to help confirm your Reduced Fare Mobility ID.\"),\n sub_label=_(\"SBMTD Reduced Fare Mobility ID number\"),\n sub_help_text=_(\"This is a 4-digit number on the back of your card.\"),\n sub_placeholder=\"1234\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=4,\n sub_pattern=r\"\\d{4}\",\n *args,\n **kwargs,\n )\n", "path": "benefits/eligibility/forms.py"}], "after_files": [{"content": "\"\"\"\nThe eligibility application: Form definition for the eligibility verification flow.\n\"\"\"\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom benefits.core import models, recaptcha, widgets\n\nlogger = logging.getLogger(__name__)\n\n\nclass EligibilityVerifierSelectionForm(forms.Form):\n \"\"\"Form to capture eligibility verifier selection.\"\"\"\n\n action_url = \"eligibility:index\"\n id = \"form-verifier-selection\"\n method = \"POST\"\n\n verifier = forms.ChoiceField(label=\"\", widget=widgets.VerifierRadioSelect)\n # sets label to empty string so the radio_select template can override the label style\n submit_value = _(\"Choose this benefit\")\n\n def __init__(self, agency: models.TransitAgency, *args, **kwargs):\n super().__init__(*args, **kwargs)\n verifiers = agency.active_verifiers\n\n self.classes = \"col-lg-8\"\n # second element is not used since we render the whole label using selection_label_template,\n # therefore set to None\n self.fields[\"verifier\"].choices = [(v.id, None) for v in verifiers]\n self.fields[\"verifier\"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass EligibilityVerificationForm(forms.Form):\n \"\"\"Form to collect eligibility verification details.\"\"\"\n\n action_url = \"eligibility:confirm\"\n id = \"form-eligibility-verification\"\n method = \"POST\"\n\n submit_value = _(\"Check eligibility\")\n submitting_value = _(\"Checking\")\n\n _error_messages = {\n \"invalid\": _(\"Check your input. The format looks wrong.\"),\n \"missing\": _(\"This field is required.\"),\n }\n\n def __init__(\n self,\n title,\n headline,\n blurb,\n name_label,\n name_placeholder,\n name_help_text,\n sub_label,\n sub_placeholder,\n sub_help_text,\n name_max_length=None,\n sub_input_mode=None,\n sub_max_length=None,\n sub_pattern=None,\n *args,\n **kwargs,\n ):\n \"\"\"Initialize a new EligibilityVerifier form.\n\n Args:\n title (str): The page (i.e. tab) title for the form's page.\n\n headline (str): The <h1> on the form's page.\n\n blurb (str): Intro <p> on the form's page.\n\n name_label (str): Label for the name form field.\n\n name_placeholder (str): Field placeholder for the name form field.\n\n name_help_text (str): Extra help text for the name form field.\n\n sub_label (str): Label for the sub form field.\n\n sub_placeholder (str): Field placeholder for the sub form field.\n\n sub_help_text (str): Extra help text for the sub form field.\n\n name_max_length (int): The maximum length accepted for the 'name' API field before sending to this verifier\n\n sub_input_mode (str): Input mode can be \"numeric\", \"tel\", \"search\", etc. to override default \"text\" keyboard on\n mobile devices\n\n sub_max_length (int): The maximum length accepted for the 'sub' API field before sending to this verifier\n\n sub_pattern (str): A regular expression used to validate the 'sub' API field before sending to this verifier\n\n Extra args and kwargs are passed through to the underlying django.forms.Form.\n \"\"\"\n super().__init__(auto_id=True, label_suffix=\"\", *args, **kwargs)\n\n self.title = title\n self.headline = headline\n self.blurb = blurb\n\n self.classes = \"col-lg-6\"\n sub_widget = widgets.FormControlTextInput(placeholder=sub_placeholder)\n if sub_pattern:\n sub_widget.attrs.update({\"pattern\": sub_pattern})\n if sub_input_mode:\n sub_widget.attrs.update({\"inputmode\": sub_input_mode})\n if sub_max_length:\n sub_widget.attrs.update({\"maxlength\": sub_max_length})\n\n self.fields[\"sub\"] = forms.CharField(\n label=sub_label,\n widget=sub_widget,\n help_text=sub_help_text,\n )\n\n name_widget = widgets.FormControlTextInput(placeholder=name_placeholder)\n if name_max_length:\n name_widget.attrs.update({\"maxlength\": name_max_length})\n\n self.fields[\"name\"] = forms.CharField(label=name_label, widget=name_widget, help_text=name_help_text)\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass MSTCourtesyCard(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the MST Courtesy Card.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n blurb=_(\"Please input your Courtesy Card number and last name below to confirm your eligibility.\"),\n name_label=_(\"Last name (as it appears on Courtesy Card)\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\"We use this to help confirm your Courtesy Card.\"),\n sub_label=_(\"MST Courtesy Card number\"),\n sub_help_text=_(\"This is a 5-digit number on the front and back of your card.\"),\n sub_placeholder=\"12345\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=5,\n sub_pattern=r\"\\d{5}\",\n *args,\n **kwargs,\n )\n\n\nclass SBMTDMobilityPass(EligibilityVerificationForm):\n \"\"\"EligibilityVerification form for the SBMTD Reduced Fare Mobility ID.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(\n title=_(\"Agency card information\"),\n headline=_(\"Let\u2019s see if we can confirm your eligibility.\"),\n blurb=_(\"Please input your Reduced Fare Mobility ID number and last name below to confirm your eligibility.\"),\n name_label=_(\"Last name (as it appears on Reduced Fare Mobility ID card)\"),\n name_placeholder=\"Garcia\",\n name_help_text=_(\"We use this to help confirm your Reduced Fare Mobility ID.\"),\n sub_label=_(\"SBMTD Reduced Fare Mobility ID number\"),\n sub_help_text=_(\"This is a 4-digit number on the back of your card.\"),\n sub_placeholder=\"1234\",\n name_max_length=255,\n sub_input_mode=\"numeric\",\n sub_max_length=4,\n sub_pattern=r\"\\d{4}\",\n *args,\n **kwargs,\n )\n", "path": "benefits/eligibility/forms.py"}]} | 2,546 | 143 |
gh_patches_debug_16179 | rasdani/github-patches | git_diff | quantumlib__Cirq-6026 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`MatrixGate` names don't survive serialization
**Description of the issue**
`_name` is excluded from `MatrixGate._json_dict_()` and so is reset to `None` by serialization+deserialization
**How to reproduce the issue**
```python
gate = cirq.MatrixGate(np.eye(2), name="name")
gate_after_serialization = cirq.read_json(json_text=cirq.to_json(gate))
print(gate._name) # "name"
print(gate_after_serialization._name) # None
```
**Cirq version**
```
1.2.0.dev20230105212249
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-core/cirq/ops/matrix_gates.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Quantum gates defined by a matrix."""
16
17 from typing import Any, Dict, Iterable, Optional, Tuple, TYPE_CHECKING
18
19 import numpy as np
20
21 from cirq import linalg, protocols, _import
22 from cirq._compat import proper_repr
23 from cirq.ops import raw_types, phased_x_z_gate
24
25 if TYPE_CHECKING:
26 import cirq
27
28 single_qubit_decompositions = _import.LazyLoader(
29 'single_qubit_decompositions', globals(), 'cirq.transformers.analytical_decompositions'
30 )
31 two_qubit_to_cz = _import.LazyLoader(
32 'two_qubit_to_cz', globals(), 'cirq.transformers.analytical_decompositions'
33 )
34 three_qubit_decomposition = _import.LazyLoader(
35 'three_qubit_decomposition', globals(), 'cirq.transformers.analytical_decompositions'
36 )
37
38
39 class MatrixGate(raw_types.Gate):
40 r"""A unitary qubit or qudit gate defined entirely by its numpy matrix.
41
42 For example `cirq.MatrixGate(np.array([[0, 1j], [1, 0]]))` has the unitary matrix:
43
44 $$
45 \begin{bmatrix}
46 0 & i \\
47 1 & 0
48 \end{bmatrix}
49 $$
50 """
51
52 def __init__(
53 self,
54 matrix: np.ndarray,
55 *,
56 name: str = None,
57 qid_shape: Optional[Iterable[int]] = None,
58 unitary_check: bool = True,
59 unitary_check_rtol: float = 1e-5,
60 unitary_check_atol: float = 1e-8,
61 ) -> None:
62 """Initializes a matrix gate.
63
64 Args:
65 matrix: The matrix that defines the gate.
66 name: The optional name of the gate to be displayed.
67 qid_shape: The shape of state tensor that the matrix applies to.
68 If not specified, this value is inferred by assuming that the
69 matrix is supposed to apply to qubits.
70 unitary_check: If True, check that the supplied matrix is unitary up to the
71 given tolerances. This should only be disabled if the matrix has already been
72 checked for unitarity, in which case we get a slight performance improvement by
73 not checking again.
74 unitary_check_rtol: The relative tolerance for checking whether the supplied matrix
75 is unitary. See `cirq.is_unitary`.
76 unitary_check_atol: The absolute tolerance for checking whether the supplied matrix
77 is unitary. See `cirq.is_unitary`.
78
79 Raises:
80 ValueError: If the matrix is not a square numpy array, if the matrix does not match
81 the `qid_shape`, if `qid_shape` is not supplied and the matrix dimension is
82 not a power of 2, or if the matrix not unitary (to the supplied precisions).
83 """
84 if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]:
85 raise ValueError('`matrix` must be a square 2d numpy array.')
86
87 if qid_shape is None:
88 n = int(np.round(np.log2(matrix.shape[0] or 1)))
89 if 2**n != matrix.shape[0]:
90 raise ValueError(
91 f'Matrix width ({matrix.shape[0]}) is not a power of 2 and '
92 f'qid_shape is not specified.'
93 )
94 qid_shape = (2,) * n
95
96 self._matrix = matrix
97 self._qid_shape = tuple(qid_shape)
98 self._name = name
99 m = int(np.prod(self._qid_shape, dtype=np.int64))
100 if self._matrix.shape != (m, m):
101 raise ValueError(
102 'Wrong matrix shape for qid_shape.\n'
103 f'Matrix shape: {self._matrix.shape}\n'
104 f'qid_shape: {self._qid_shape}\n'
105 )
106
107 if unitary_check and not linalg.is_unitary(
108 matrix, rtol=unitary_check_rtol, atol=unitary_check_atol
109 ):
110 raise ValueError(f'Not a unitary matrix: {matrix}')
111
112 def with_name(self, name: str) -> 'MatrixGate':
113 """Creates a new MatrixGate with the same matrix and a new name."""
114 return MatrixGate(self._matrix, name=name, qid_shape=self._qid_shape, unitary_check=False)
115
116 def _json_dict_(self) -> Dict[str, Any]:
117 return {'matrix': self._matrix.tolist(), 'qid_shape': self._qid_shape}
118
119 @classmethod
120 def _from_json_dict_(cls, matrix, qid_shape, **kwargs):
121 return cls(matrix=np.array(matrix), qid_shape=qid_shape)
122
123 def _qid_shape_(self) -> Tuple[int, ...]:
124 return self._qid_shape
125
126 def __pow__(self, exponent: Any) -> 'MatrixGate':
127 if not isinstance(exponent, (int, float)):
128 return NotImplemented
129 new_mat = linalg.map_eigenvalues(self._matrix, lambda b: b**exponent)
130 return MatrixGate(new_mat, qid_shape=self._qid_shape)
131
132 def _phase_by_(self, phase_turns: float, qubit_index: int) -> 'MatrixGate':
133 if not isinstance(phase_turns, (int, float)):
134 return NotImplemented
135 if self._qid_shape[qubit_index] != 2:
136 return NotImplemented
137 result = np.copy(self._matrix).reshape(self._qid_shape * 2)
138
139 p = np.exp(2j * np.pi * phase_turns)
140 i = qubit_index
141 j = qubit_index + len(self._qid_shape)
142 result[linalg.slice_for_qubits_equal_to([i], 1)] *= p
143 result[linalg.slice_for_qubits_equal_to([j], 1)] *= np.conj(p)
144 return MatrixGate(matrix=result.reshape(self._matrix.shape), qid_shape=self._qid_shape)
145
146 def _decompose_(self, qubits: Tuple['cirq.Qid', ...]) -> 'cirq.OP_TREE':
147 if self._qid_shape == (2,):
148 return [
149 g.on(qubits[0])
150 for g in single_qubit_decompositions.single_qubit_matrix_to_gates(self._matrix)
151 ]
152 if self._qid_shape == (2,) * 2:
153 return two_qubit_to_cz.two_qubit_matrix_to_cz_operations(
154 *qubits, self._matrix, allow_partial_czs=True
155 )
156 if self._qid_shape == (2,) * 3:
157 return three_qubit_decomposition.three_qubit_matrix_to_operations(*qubits, self._matrix)
158 return NotImplemented
159
160 def _has_unitary_(self) -> bool:
161 return True
162
163 def _unitary_(self) -> np.ndarray:
164 return np.copy(self._matrix)
165
166 def _circuit_diagram_info_(
167 self, args: 'cirq.CircuitDiagramInfoArgs'
168 ) -> 'cirq.CircuitDiagramInfo':
169 n_qubits = len(self._qid_shape)
170 if self._name is not None:
171 symbols = (
172 [self._name] if n_qubits == 1 else [f'{self._name}[{i+1}]' for i in range(n_qubits)]
173 )
174 return protocols.CircuitDiagramInfo(wire_symbols=symbols)
175 main = _matrix_to_diagram_symbol(self._matrix, args)
176 rest = [f'#{i+1}' for i in range(1, n_qubits)]
177 return protocols.CircuitDiagramInfo(wire_symbols=[main, *rest])
178
179 def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:
180 args.validate_version('2.0')
181 if self._qid_shape == (2,):
182 return protocols.qasm(
183 phased_x_z_gate.PhasedXZGate.from_matrix(self._matrix), args=args, qubits=qubits
184 )
185 return None
186
187 def __hash__(self) -> int:
188 vals = tuple(v for _, v in np.ndenumerate(self._matrix))
189 return hash((MatrixGate, vals))
190
191 def _approx_eq_(self, other: Any, atol) -> bool:
192 if not isinstance(other, type(self)):
193 return NotImplemented
194 return np.allclose(self._matrix, other._matrix, rtol=0, atol=atol)
195
196 def __eq__(self, other):
197 if not isinstance(other, type(self)):
198 return NotImplemented
199 return self._qid_shape == other._qid_shape and np.array_equal(self._matrix, other._matrix)
200
201 def __ne__(self, other):
202 return not self == other
203
204 def __repr__(self) -> str:
205 if all(e == 2 for e in self._qid_shape):
206 return f'cirq.MatrixGate({proper_repr(self._matrix)})'
207 return f'cirq.MatrixGate({proper_repr(self._matrix)}, qid_shape={self._qid_shape})'
208
209 def __str__(self) -> str:
210 return str(self._matrix.round(3))
211
212
213 def _matrix_to_diagram_symbol(matrix: np.ndarray, args: 'protocols.CircuitDiagramInfoArgs') -> str:
214 if args.precision is not None:
215 matrix = matrix.round(args.precision)
216 result = str(matrix)
217 if args.use_unicode_characters:
218 lines = result.split('\n')
219 for i in range(len(lines)):
220 lines[i] = lines[i].replace('[[', '')
221 lines[i] = lines[i].replace(' [', '')
222 lines[i] = lines[i].replace(']', '')
223 w = max(len(line) for line in lines)
224 for i in range(len(lines)):
225 lines[i] = '│' + lines[i].ljust(w) + '│'
226 lines.insert(0, '┌' + ' ' * w + '┐')
227 lines.append('└' + ' ' * w + '┘')
228 result = '\n'.join(lines)
229 return result
230
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq-core/cirq/ops/matrix_gates.py b/cirq-core/cirq/ops/matrix_gates.py
--- a/cirq-core/cirq/ops/matrix_gates.py
+++ b/cirq-core/cirq/ops/matrix_gates.py
@@ -114,11 +114,15 @@
return MatrixGate(self._matrix, name=name, qid_shape=self._qid_shape, unitary_check=False)
def _json_dict_(self) -> Dict[str, Any]:
- return {'matrix': self._matrix.tolist(), 'qid_shape': self._qid_shape}
+ return {
+ 'matrix': self._matrix.tolist(),
+ 'qid_shape': self._qid_shape,
+ **({'name': self._name} if self._name is not None else {}),
+ }
@classmethod
- def _from_json_dict_(cls, matrix, qid_shape, **kwargs):
- return cls(matrix=np.array(matrix), qid_shape=qid_shape)
+ def _from_json_dict_(cls, matrix, qid_shape, name=None, **kwargs):
+ return cls(matrix=np.array(matrix), qid_shape=qid_shape, name=name)
def _qid_shape_(self) -> Tuple[int, ...]:
return self._qid_shape
| {"golden_diff": "diff --git a/cirq-core/cirq/ops/matrix_gates.py b/cirq-core/cirq/ops/matrix_gates.py\n--- a/cirq-core/cirq/ops/matrix_gates.py\n+++ b/cirq-core/cirq/ops/matrix_gates.py\n@@ -114,11 +114,15 @@\n return MatrixGate(self._matrix, name=name, qid_shape=self._qid_shape, unitary_check=False)\n \n def _json_dict_(self) -> Dict[str, Any]:\n- return {'matrix': self._matrix.tolist(), 'qid_shape': self._qid_shape}\n+ return {\n+ 'matrix': self._matrix.tolist(),\n+ 'qid_shape': self._qid_shape,\n+ **({'name': self._name} if self._name is not None else {}),\n+ }\n \n @classmethod\n- def _from_json_dict_(cls, matrix, qid_shape, **kwargs):\n- return cls(matrix=np.array(matrix), qid_shape=qid_shape)\n+ def _from_json_dict_(cls, matrix, qid_shape, name=None, **kwargs):\n+ return cls(matrix=np.array(matrix), qid_shape=qid_shape, name=name)\n \n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n", "issue": "`MatrixGate` names don't survive serialization\n**Description of the issue**\r\n\r\n`_name` is excluded from `MatrixGate._json_dict_()` and so is reset to `None` by serialization+deserialization\r\n\r\n**How to reproduce the issue**\r\n\r\n```python\r\ngate = cirq.MatrixGate(np.eye(2), name=\"name\")\r\ngate_after_serialization = cirq.read_json(json_text=cirq.to_json(gate))\r\n\r\nprint(gate._name) # \"name\"\r\nprint(gate_after_serialization._name) # None\r\n```\r\n\r\n**Cirq version**\r\n\r\n```\r\n1.2.0.dev20230105212249\r\n```\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Quantum gates defined by a matrix.\"\"\"\n\nfrom typing import Any, Dict, Iterable, Optional, Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nfrom cirq import linalg, protocols, _import\nfrom cirq._compat import proper_repr\nfrom cirq.ops import raw_types, phased_x_z_gate\n\nif TYPE_CHECKING:\n import cirq\n\nsingle_qubit_decompositions = _import.LazyLoader(\n 'single_qubit_decompositions', globals(), 'cirq.transformers.analytical_decompositions'\n)\ntwo_qubit_to_cz = _import.LazyLoader(\n 'two_qubit_to_cz', globals(), 'cirq.transformers.analytical_decompositions'\n)\nthree_qubit_decomposition = _import.LazyLoader(\n 'three_qubit_decomposition', globals(), 'cirq.transformers.analytical_decompositions'\n)\n\n\nclass MatrixGate(raw_types.Gate):\n r\"\"\"A unitary qubit or qudit gate defined entirely by its numpy matrix.\n\n For example `cirq.MatrixGate(np.array([[0, 1j], [1, 0]]))` has the unitary matrix:\n\n $$\n \\begin{bmatrix}\n 0 & i \\\\\n 1 & 0\n \\end{bmatrix}\n $$\n \"\"\"\n\n def __init__(\n self,\n matrix: np.ndarray,\n *,\n name: str = None,\n qid_shape: Optional[Iterable[int]] = None,\n unitary_check: bool = True,\n unitary_check_rtol: float = 1e-5,\n unitary_check_atol: float = 1e-8,\n ) -> None:\n \"\"\"Initializes a matrix gate.\n\n Args:\n matrix: The matrix that defines the gate.\n name: The optional name of the gate to be displayed.\n qid_shape: The shape of state tensor that the matrix applies to.\n If not specified, this value is inferred by assuming that the\n matrix is supposed to apply to qubits.\n unitary_check: If True, check that the supplied matrix is unitary up to the\n given tolerances. This should only be disabled if the matrix has already been\n checked for unitarity, in which case we get a slight performance improvement by\n not checking again.\n unitary_check_rtol: The relative tolerance for checking whether the supplied matrix\n is unitary. See `cirq.is_unitary`.\n unitary_check_atol: The absolute tolerance for checking whether the supplied matrix\n is unitary. See `cirq.is_unitary`.\n\n Raises:\n ValueError: If the matrix is not a square numpy array, if the matrix does not match\n the `qid_shape`, if `qid_shape` is not supplied and the matrix dimension is\n not a power of 2, or if the matrix not unitary (to the supplied precisions).\n \"\"\"\n if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]:\n raise ValueError('`matrix` must be a square 2d numpy array.')\n\n if qid_shape is None:\n n = int(np.round(np.log2(matrix.shape[0] or 1)))\n if 2**n != matrix.shape[0]:\n raise ValueError(\n f'Matrix width ({matrix.shape[0]}) is not a power of 2 and '\n f'qid_shape is not specified.'\n )\n qid_shape = (2,) * n\n\n self._matrix = matrix\n self._qid_shape = tuple(qid_shape)\n self._name = name\n m = int(np.prod(self._qid_shape, dtype=np.int64))\n if self._matrix.shape != (m, m):\n raise ValueError(\n 'Wrong matrix shape for qid_shape.\\n'\n f'Matrix shape: {self._matrix.shape}\\n'\n f'qid_shape: {self._qid_shape}\\n'\n )\n\n if unitary_check and not linalg.is_unitary(\n matrix, rtol=unitary_check_rtol, atol=unitary_check_atol\n ):\n raise ValueError(f'Not a unitary matrix: {matrix}')\n\n def with_name(self, name: str) -> 'MatrixGate':\n \"\"\"Creates a new MatrixGate with the same matrix and a new name.\"\"\"\n return MatrixGate(self._matrix, name=name, qid_shape=self._qid_shape, unitary_check=False)\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {'matrix': self._matrix.tolist(), 'qid_shape': self._qid_shape}\n\n @classmethod\n def _from_json_dict_(cls, matrix, qid_shape, **kwargs):\n return cls(matrix=np.array(matrix), qid_shape=qid_shape)\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def __pow__(self, exponent: Any) -> 'MatrixGate':\n if not isinstance(exponent, (int, float)):\n return NotImplemented\n new_mat = linalg.map_eigenvalues(self._matrix, lambda b: b**exponent)\n return MatrixGate(new_mat, qid_shape=self._qid_shape)\n\n def _phase_by_(self, phase_turns: float, qubit_index: int) -> 'MatrixGate':\n if not isinstance(phase_turns, (int, float)):\n return NotImplemented\n if self._qid_shape[qubit_index] != 2:\n return NotImplemented\n result = np.copy(self._matrix).reshape(self._qid_shape * 2)\n\n p = np.exp(2j * np.pi * phase_turns)\n i = qubit_index\n j = qubit_index + len(self._qid_shape)\n result[linalg.slice_for_qubits_equal_to([i], 1)] *= p\n result[linalg.slice_for_qubits_equal_to([j], 1)] *= np.conj(p)\n return MatrixGate(matrix=result.reshape(self._matrix.shape), qid_shape=self._qid_shape)\n\n def _decompose_(self, qubits: Tuple['cirq.Qid', ...]) -> 'cirq.OP_TREE':\n if self._qid_shape == (2,):\n return [\n g.on(qubits[0])\n for g in single_qubit_decompositions.single_qubit_matrix_to_gates(self._matrix)\n ]\n if self._qid_shape == (2,) * 2:\n return two_qubit_to_cz.two_qubit_matrix_to_cz_operations(\n *qubits, self._matrix, allow_partial_czs=True\n )\n if self._qid_shape == (2,) * 3:\n return three_qubit_decomposition.three_qubit_matrix_to_operations(*qubits, self._matrix)\n return NotImplemented\n\n def _has_unitary_(self) -> bool:\n return True\n\n def _unitary_(self) -> np.ndarray:\n return np.copy(self._matrix)\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n n_qubits = len(self._qid_shape)\n if self._name is not None:\n symbols = (\n [self._name] if n_qubits == 1 else [f'{self._name}[{i+1}]' for i in range(n_qubits)]\n )\n return protocols.CircuitDiagramInfo(wire_symbols=symbols)\n main = _matrix_to_diagram_symbol(self._matrix, args)\n rest = [f'#{i+1}' for i in range(1, n_qubits)]\n return protocols.CircuitDiagramInfo(wire_symbols=[main, *rest])\n\n def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._qid_shape == (2,):\n return protocols.qasm(\n phased_x_z_gate.PhasedXZGate.from_matrix(self._matrix), args=args, qubits=qubits\n )\n return None\n\n def __hash__(self) -> int:\n vals = tuple(v for _, v in np.ndenumerate(self._matrix))\n return hash((MatrixGate, vals))\n\n def _approx_eq_(self, other: Any, atol) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n return np.allclose(self._matrix, other._matrix, rtol=0, atol=atol)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return self._qid_shape == other._qid_shape and np.array_equal(self._matrix, other._matrix)\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self) -> str:\n if all(e == 2 for e in self._qid_shape):\n return f'cirq.MatrixGate({proper_repr(self._matrix)})'\n return f'cirq.MatrixGate({proper_repr(self._matrix)}, qid_shape={self._qid_shape})'\n\n def __str__(self) -> str:\n return str(self._matrix.round(3))\n\n\ndef _matrix_to_diagram_symbol(matrix: np.ndarray, args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if args.precision is not None:\n matrix = matrix.round(args.precision)\n result = str(matrix)\n if args.use_unicode_characters:\n lines = result.split('\\n')\n for i in range(len(lines)):\n lines[i] = lines[i].replace('[[', '')\n lines[i] = lines[i].replace(' [', '')\n lines[i] = lines[i].replace(']', '')\n w = max(len(line) for line in lines)\n for i in range(len(lines)):\n lines[i] = '\u2502' + lines[i].ljust(w) + '\u2502'\n lines.insert(0, '\u250c' + ' ' * w + '\u2510')\n lines.append('\u2514' + ' ' * w + '\u2518')\n result = '\\n'.join(lines)\n return result\n", "path": "cirq-core/cirq/ops/matrix_gates.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Quantum gates defined by a matrix.\"\"\"\n\nfrom typing import Any, Dict, Iterable, Optional, Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nfrom cirq import linalg, protocols, _import\nfrom cirq._compat import proper_repr\nfrom cirq.ops import raw_types, phased_x_z_gate\n\nif TYPE_CHECKING:\n import cirq\n\nsingle_qubit_decompositions = _import.LazyLoader(\n 'single_qubit_decompositions', globals(), 'cirq.transformers.analytical_decompositions'\n)\ntwo_qubit_to_cz = _import.LazyLoader(\n 'two_qubit_to_cz', globals(), 'cirq.transformers.analytical_decompositions'\n)\nthree_qubit_decomposition = _import.LazyLoader(\n 'three_qubit_decomposition', globals(), 'cirq.transformers.analytical_decompositions'\n)\n\n\nclass MatrixGate(raw_types.Gate):\n r\"\"\"A unitary qubit or qudit gate defined entirely by its numpy matrix.\n\n For example `cirq.MatrixGate(np.array([[0, 1j], [1, 0]]))` has the unitary matrix:\n\n $$\n \\begin{bmatrix}\n 0 & i \\\\\n 1 & 0\n \\end{bmatrix}\n $$\n \"\"\"\n\n def __init__(\n self,\n matrix: np.ndarray,\n *,\n name: str = None,\n qid_shape: Optional[Iterable[int]] = None,\n unitary_check: bool = True,\n unitary_check_rtol: float = 1e-5,\n unitary_check_atol: float = 1e-8,\n ) -> None:\n \"\"\"Initializes a matrix gate.\n\n Args:\n matrix: The matrix that defines the gate.\n name: The optional name of the gate to be displayed.\n qid_shape: The shape of state tensor that the matrix applies to.\n If not specified, this value is inferred by assuming that the\n matrix is supposed to apply to qubits.\n unitary_check: If True, check that the supplied matrix is unitary up to the\n given tolerances. This should only be disabled if the matrix has already been\n checked for unitarity, in which case we get a slight performance improvement by\n not checking again.\n unitary_check_rtol: The relative tolerance for checking whether the supplied matrix\n is unitary. See `cirq.is_unitary`.\n unitary_check_atol: The absolute tolerance for checking whether the supplied matrix\n is unitary. See `cirq.is_unitary`.\n\n Raises:\n ValueError: If the matrix is not a square numpy array, if the matrix does not match\n the `qid_shape`, if `qid_shape` is not supplied and the matrix dimension is\n not a power of 2, or if the matrix not unitary (to the supplied precisions).\n \"\"\"\n if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]:\n raise ValueError('`matrix` must be a square 2d numpy array.')\n\n if qid_shape is None:\n n = int(np.round(np.log2(matrix.shape[0] or 1)))\n if 2**n != matrix.shape[0]:\n raise ValueError(\n f'Matrix width ({matrix.shape[0]}) is not a power of 2 and '\n f'qid_shape is not specified.'\n )\n qid_shape = (2,) * n\n\n self._matrix = matrix\n self._qid_shape = tuple(qid_shape)\n self._name = name\n m = int(np.prod(self._qid_shape, dtype=np.int64))\n if self._matrix.shape != (m, m):\n raise ValueError(\n 'Wrong matrix shape for qid_shape.\\n'\n f'Matrix shape: {self._matrix.shape}\\n'\n f'qid_shape: {self._qid_shape}\\n'\n )\n\n if unitary_check and not linalg.is_unitary(\n matrix, rtol=unitary_check_rtol, atol=unitary_check_atol\n ):\n raise ValueError(f'Not a unitary matrix: {matrix}')\n\n def with_name(self, name: str) -> 'MatrixGate':\n \"\"\"Creates a new MatrixGate with the same matrix and a new name.\"\"\"\n return MatrixGate(self._matrix, name=name, qid_shape=self._qid_shape, unitary_check=False)\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {\n 'matrix': self._matrix.tolist(),\n 'qid_shape': self._qid_shape,\n **({'name': self._name} if self._name is not None else {}),\n }\n\n @classmethod\n def _from_json_dict_(cls, matrix, qid_shape, name=None, **kwargs):\n return cls(matrix=np.array(matrix), qid_shape=qid_shape, name=name)\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def __pow__(self, exponent: Any) -> 'MatrixGate':\n if not isinstance(exponent, (int, float)):\n return NotImplemented\n new_mat = linalg.map_eigenvalues(self._matrix, lambda b: b**exponent)\n return MatrixGate(new_mat, qid_shape=self._qid_shape)\n\n def _phase_by_(self, phase_turns: float, qubit_index: int) -> 'MatrixGate':\n if not isinstance(phase_turns, (int, float)):\n return NotImplemented\n if self._qid_shape[qubit_index] != 2:\n return NotImplemented\n result = np.copy(self._matrix).reshape(self._qid_shape * 2)\n\n p = np.exp(2j * np.pi * phase_turns)\n i = qubit_index\n j = qubit_index + len(self._qid_shape)\n result[linalg.slice_for_qubits_equal_to([i], 1)] *= p\n result[linalg.slice_for_qubits_equal_to([j], 1)] *= np.conj(p)\n return MatrixGate(matrix=result.reshape(self._matrix.shape), qid_shape=self._qid_shape)\n\n def _decompose_(self, qubits: Tuple['cirq.Qid', ...]) -> 'cirq.OP_TREE':\n if self._qid_shape == (2,):\n return [\n g.on(qubits[0])\n for g in single_qubit_decompositions.single_qubit_matrix_to_gates(self._matrix)\n ]\n if self._qid_shape == (2,) * 2:\n return two_qubit_to_cz.two_qubit_matrix_to_cz_operations(\n *qubits, self._matrix, allow_partial_czs=True\n )\n if self._qid_shape == (2,) * 3:\n return three_qubit_decomposition.three_qubit_matrix_to_operations(*qubits, self._matrix)\n return NotImplemented\n\n def _has_unitary_(self) -> bool:\n return True\n\n def _unitary_(self) -> np.ndarray:\n return np.copy(self._matrix)\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n n_qubits = len(self._qid_shape)\n if self._name is not None:\n symbols = (\n [self._name] if n_qubits == 1 else [f'{self._name}[{i+1}]' for i in range(n_qubits)]\n )\n return protocols.CircuitDiagramInfo(wire_symbols=symbols)\n main = _matrix_to_diagram_symbol(self._matrix, args)\n rest = [f'#{i+1}' for i in range(1, n_qubits)]\n return protocols.CircuitDiagramInfo(wire_symbols=[main, *rest])\n\n def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._qid_shape == (2,):\n return protocols.qasm(\n phased_x_z_gate.PhasedXZGate.from_matrix(self._matrix), args=args, qubits=qubits\n )\n return None\n\n def __hash__(self) -> int:\n vals = tuple(v for _, v in np.ndenumerate(self._matrix))\n return hash((MatrixGate, vals))\n\n def _approx_eq_(self, other: Any, atol) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n return np.allclose(self._matrix, other._matrix, rtol=0, atol=atol)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return self._qid_shape == other._qid_shape and np.array_equal(self._matrix, other._matrix)\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self) -> str:\n if all(e == 2 for e in self._qid_shape):\n return f'cirq.MatrixGate({proper_repr(self._matrix)})'\n return f'cirq.MatrixGate({proper_repr(self._matrix)}, qid_shape={self._qid_shape})'\n\n def __str__(self) -> str:\n return str(self._matrix.round(3))\n\n\ndef _matrix_to_diagram_symbol(matrix: np.ndarray, args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if args.precision is not None:\n matrix = matrix.round(args.precision)\n result = str(matrix)\n if args.use_unicode_characters:\n lines = result.split('\\n')\n for i in range(len(lines)):\n lines[i] = lines[i].replace('[[', '')\n lines[i] = lines[i].replace(' [', '')\n lines[i] = lines[i].replace(']', '')\n w = max(len(line) for line in lines)\n for i in range(len(lines)):\n lines[i] = '\u2502' + lines[i].ljust(w) + '\u2502'\n lines.insert(0, '\u250c' + ' ' * w + '\u2510')\n lines.append('\u2514' + ' ' * w + '\u2518')\n result = '\\n'.join(lines)\n return result\n", "path": "cirq-core/cirq/ops/matrix_gates.py"}]} | 3,320 | 285 |
gh_patches_debug_50452 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-364 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
404 on single user notebook server anytime I restart jupyterhub
I just deployed jupyterhub from scratch using my ansible scripts this morning. The script are unchanged from 2-3 weeks ago when I did it previously and it was all working. I am running from latest master of jupyterhub.
Anytime I restart jupyterhub (hub+proxy) I see the following behavior:
- I can log in (using GitHub OAuth)
- Trying to start my server gives me a 404 on `/hub/users/ellisonbg`. I can tell from the server log that the single user server isn't being started.
- I can go to the Admin page and start my server from there.
- But then, I get a redirect loop when I try to go to my single user server.
- If I clear all my cookies while my single user server (started through the Admin page) is still running, it starts to work as expected. I can start and stop the single user server on the control panel page just fine.
- If a restart jupyterhub, the problems start all over again.
I am using a fixed proxy_auth_token and cookie secret.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/handlers/pages.py`
Content:
```
1 """Basic html-rendering handlers."""
2
3 # Copyright (c) Jupyter Development Team.
4 # Distributed under the terms of the Modified BSD License.
5
6 from tornado import web, gen
7
8 from .. import orm
9 from ..utils import admin_only, url_path_join
10 from .base import BaseHandler
11 from .login import LoginHandler
12
13
14 class RootHandler(BaseHandler):
15 """Render the Hub root page.
16
17 If logged in, redirects to:
18
19 - single-user server if running
20 - hub home, otherwise
21
22 Otherwise, renders login page.
23 """
24 def get(self):
25 user = self.get_current_user()
26 if user:
27 if user.running:
28 url = user.server.base_url
29 self.log.debug("User is running: %s", url)
30 else:
31 url = url_path_join(self.hub.server.base_url, 'home')
32 self.log.debug("User is not running: %s", url)
33 self.redirect(url)
34 return
35 url = url_path_join(self.hub.server.base_url, 'login')
36 self.redirect(url)
37
38
39 class HomeHandler(BaseHandler):
40 """Render the user's home page."""
41
42 @web.authenticated
43 def get(self):
44 html = self.render_template('home.html',
45 user=self.get_current_user(),
46 )
47 self.finish(html)
48
49
50 class SpawnHandler(BaseHandler):
51 """Handle spawning of single-user servers via form.
52
53 GET renders the form, POST handles form submission.
54
55 Only enabled when Spawner.options_form is defined.
56 """
57 @web.authenticated
58 def get(self):
59 """GET renders form for spawning with user-specified options"""
60 user = self.get_current_user()
61 if user.running:
62 url = user.server.base_url
63 self.log.debug("User is running: %s", url)
64 self.redirect(url)
65 return
66 if user.spawner.options_form:
67 html = self.render_template('spawn.html',
68 user=self.get_current_user(),
69 spawner_options_form=user.spawner.options_form,
70 )
71 self.finish(html)
72 else:
73 # not running, no form. Trigger spawn.
74 url = url_path_join(self.base_url, 'users', user.name)
75 self.redirect(url)
76
77 @web.authenticated
78 @gen.coroutine
79 def post(self):
80 """POST spawns with user-specified options"""
81 user = self.get_current_user()
82 if user.running:
83 url = user.server.base_url
84 self.log.warning("User is already running: %s", url)
85 self.redirect(url)
86 return
87 form_options = {}
88 for key, byte_list in self.request.body_arguments.items():
89 form_options[key] = [ bs.decode('utf8') for bs in byte_list ]
90 options = user.spawner.options_from_form(form_options)
91 yield self.spawn_single_user(user, options=options)
92 self.set_login_cookie(user)
93 url = user.server.base_url
94 self.redirect(url)
95
96 class AdminHandler(BaseHandler):
97 """Render the admin page."""
98
99 @admin_only
100 def get(self):
101 available = {'name', 'admin', 'running', 'last_activity'}
102 default_sort = ['admin', 'name']
103 mapping = {
104 'running': '_server_id'
105 }
106 default_order = {
107 'name': 'asc',
108 'last_activity': 'desc',
109 'admin': 'desc',
110 'running': 'desc',
111 }
112 sorts = self.get_arguments('sort') or default_sort
113 orders = self.get_arguments('order')
114
115 for bad in set(sorts).difference(available):
116 self.log.warn("ignoring invalid sort: %r", bad)
117 sorts.remove(bad)
118 for bad in set(orders).difference({'asc', 'desc'}):
119 self.log.warn("ignoring invalid order: %r", bad)
120 orders.remove(bad)
121
122 # add default sort as secondary
123 for s in default_sort:
124 if s not in sorts:
125 sorts.append(s)
126 if len(orders) < len(sorts):
127 for col in sorts[len(orders):]:
128 orders.append(default_order[col])
129 else:
130 orders = orders[:len(sorts)]
131
132 # this could be one incomprehensible nested list comprehension
133 # get User columns
134 cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]
135 # get User.col.desc() order objects
136 ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]
137
138 users = self.db.query(orm.User).order_by(*ordered)
139 users = [ self._user_from_orm(u) for u in users ]
140 running = [ u for u in users if u.running ]
141
142 html = self.render_template('admin.html',
143 user=self.get_current_user(),
144 admin_access=self.settings.get('admin_access', False),
145 users=users,
146 running=running,
147 sort={s:o for s,o in zip(sorts, orders)},
148 )
149 self.finish(html)
150
151
152 default_handlers = [
153 (r'/', RootHandler),
154 (r'/home', HomeHandler),
155 (r'/admin', AdminHandler),
156 (r'/spawn', SpawnHandler),
157 ]
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jupyterhub/handlers/pages.py b/jupyterhub/handlers/pages.py
--- a/jupyterhub/handlers/pages.py
+++ b/jupyterhub/handlers/pages.py
@@ -71,7 +71,7 @@
self.finish(html)
else:
# not running, no form. Trigger spawn.
- url = url_path_join(self.base_url, 'users', user.name)
+ url = url_path_join(self.base_url, 'user', user.name)
self.redirect(url)
@web.authenticated
| {"golden_diff": "diff --git a/jupyterhub/handlers/pages.py b/jupyterhub/handlers/pages.py\n--- a/jupyterhub/handlers/pages.py\n+++ b/jupyterhub/handlers/pages.py\n@@ -71,7 +71,7 @@\n self.finish(html)\n else:\n # not running, no form. Trigger spawn.\n- url = url_path_join(self.base_url, 'users', user.name)\n+ url = url_path_join(self.base_url, 'user', user.name)\n self.redirect(url)\n \n @web.authenticated\n", "issue": "404 on single user notebook server anytime I restart jupyterhub\nI just deployed jupyterhub from scratch using my ansible scripts this morning. The script are unchanged from 2-3 weeks ago when I did it previously and it was all working. I am running from latest master of jupyterhub.\n\nAnytime I restart jupyterhub (hub+proxy) I see the following behavior:\n- I can log in (using GitHub OAuth)\n- Trying to start my server gives me a 404 on `/hub/users/ellisonbg`. I can tell from the server log that the single user server isn't being started.\n- I can go to the Admin page and start my server from there.\n- But then, I get a redirect loop when I try to go to my single user server.\n- If I clear all my cookies while my single user server (started through the Admin page) is still running, it starts to work as expected. I can start and stop the single user server on the control panel page just fine.\n- If a restart jupyterhub, the problems start all over again.\n\nI am using a fixed proxy_auth_token and cookie secret.\n\n", "before_files": [{"content": "\"\"\"Basic html-rendering handlers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom tornado import web, gen\n\nfrom .. import orm\nfrom ..utils import admin_only, url_path_join\nfrom .base import BaseHandler\nfrom .login import LoginHandler\n\n\nclass RootHandler(BaseHandler):\n \"\"\"Render the Hub root page.\n \n If logged in, redirects to:\n \n - single-user server if running\n - hub home, otherwise\n \n Otherwise, renders login page.\n \"\"\"\n def get(self):\n user = self.get_current_user()\n if user:\n if user.running:\n url = user.server.base_url\n self.log.debug(\"User is running: %s\", url)\n else:\n url = url_path_join(self.hub.server.base_url, 'home')\n self.log.debug(\"User is not running: %s\", url)\n self.redirect(url)\n return\n url = url_path_join(self.hub.server.base_url, 'login')\n self.redirect(url)\n\n\nclass HomeHandler(BaseHandler):\n \"\"\"Render the user's home page.\"\"\"\n\n @web.authenticated\n def get(self):\n html = self.render_template('home.html',\n user=self.get_current_user(),\n )\n self.finish(html)\n\n\nclass SpawnHandler(BaseHandler):\n \"\"\"Handle spawning of single-user servers via form.\n \n GET renders the form, POST handles form submission.\n \n Only enabled when Spawner.options_form is defined.\n \"\"\"\n @web.authenticated\n def get(self):\n \"\"\"GET renders form for spawning with user-specified options\"\"\"\n user = self.get_current_user()\n if user.running:\n url = user.server.base_url\n self.log.debug(\"User is running: %s\", url)\n self.redirect(url)\n return\n if user.spawner.options_form:\n html = self.render_template('spawn.html',\n user=self.get_current_user(),\n spawner_options_form=user.spawner.options_form,\n )\n self.finish(html)\n else:\n # not running, no form. Trigger spawn.\n url = url_path_join(self.base_url, 'users', user.name)\n self.redirect(url)\n \n @web.authenticated\n @gen.coroutine\n def post(self):\n \"\"\"POST spawns with user-specified options\"\"\"\n user = self.get_current_user()\n if user.running:\n url = user.server.base_url\n self.log.warning(\"User is already running: %s\", url)\n self.redirect(url)\n return\n form_options = {}\n for key, byte_list in self.request.body_arguments.items():\n form_options[key] = [ bs.decode('utf8') for bs in byte_list ]\n options = user.spawner.options_from_form(form_options)\n yield self.spawn_single_user(user, options=options)\n self.set_login_cookie(user)\n url = user.server.base_url\n self.redirect(url)\n\nclass AdminHandler(BaseHandler):\n \"\"\"Render the admin page.\"\"\"\n\n @admin_only\n def get(self):\n available = {'name', 'admin', 'running', 'last_activity'}\n default_sort = ['admin', 'name']\n mapping = {\n 'running': '_server_id'\n }\n default_order = {\n 'name': 'asc',\n 'last_activity': 'desc',\n 'admin': 'desc',\n 'running': 'desc',\n }\n sorts = self.get_arguments('sort') or default_sort\n orders = self.get_arguments('order')\n \n for bad in set(sorts).difference(available):\n self.log.warn(\"ignoring invalid sort: %r\", bad)\n sorts.remove(bad)\n for bad in set(orders).difference({'asc', 'desc'}):\n self.log.warn(\"ignoring invalid order: %r\", bad)\n orders.remove(bad)\n \n # add default sort as secondary\n for s in default_sort:\n if s not in sorts:\n sorts.append(s)\n if len(orders) < len(sorts):\n for col in sorts[len(orders):]:\n orders.append(default_order[col])\n else:\n orders = orders[:len(sorts)]\n \n # this could be one incomprehensible nested list comprehension\n # get User columns\n cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]\n # get User.col.desc() order objects\n ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]\n \n users = self.db.query(orm.User).order_by(*ordered)\n users = [ self._user_from_orm(u) for u in users ]\n running = [ u for u in users if u.running ]\n \n html = self.render_template('admin.html',\n user=self.get_current_user(),\n admin_access=self.settings.get('admin_access', False),\n users=users,\n running=running,\n sort={s:o for s,o in zip(sorts, orders)},\n )\n self.finish(html)\n\n\ndefault_handlers = [\n (r'/', RootHandler),\n (r'/home', HomeHandler),\n (r'/admin', AdminHandler),\n (r'/spawn', SpawnHandler),\n]\n", "path": "jupyterhub/handlers/pages.py"}], "after_files": [{"content": "\"\"\"Basic html-rendering handlers.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nfrom tornado import web, gen\n\nfrom .. import orm\nfrom ..utils import admin_only, url_path_join\nfrom .base import BaseHandler\nfrom .login import LoginHandler\n\n\nclass RootHandler(BaseHandler):\n \"\"\"Render the Hub root page.\n \n If logged in, redirects to:\n \n - single-user server if running\n - hub home, otherwise\n \n Otherwise, renders login page.\n \"\"\"\n def get(self):\n user = self.get_current_user()\n if user:\n if user.running:\n url = user.server.base_url\n self.log.debug(\"User is running: %s\", url)\n else:\n url = url_path_join(self.hub.server.base_url, 'home')\n self.log.debug(\"User is not running: %s\", url)\n self.redirect(url)\n return\n url = url_path_join(self.hub.server.base_url, 'login')\n self.redirect(url)\n\n\nclass HomeHandler(BaseHandler):\n \"\"\"Render the user's home page.\"\"\"\n\n @web.authenticated\n def get(self):\n html = self.render_template('home.html',\n user=self.get_current_user(),\n )\n self.finish(html)\n\n\nclass SpawnHandler(BaseHandler):\n \"\"\"Handle spawning of single-user servers via form.\n \n GET renders the form, POST handles form submission.\n \n Only enabled when Spawner.options_form is defined.\n \"\"\"\n @web.authenticated\n def get(self):\n \"\"\"GET renders form for spawning with user-specified options\"\"\"\n user = self.get_current_user()\n if user.running:\n url = user.server.base_url\n self.log.debug(\"User is running: %s\", url)\n self.redirect(url)\n return\n if user.spawner.options_form:\n html = self.render_template('spawn.html',\n user=self.get_current_user(),\n spawner_options_form=user.spawner.options_form,\n )\n self.finish(html)\n else:\n # not running, no form. Trigger spawn.\n url = url_path_join(self.base_url, 'user', user.name)\n self.redirect(url)\n \n @web.authenticated\n @gen.coroutine\n def post(self):\n \"\"\"POST spawns with user-specified options\"\"\"\n user = self.get_current_user()\n if user.running:\n url = user.server.base_url\n self.log.warning(\"User is already running: %s\", url)\n self.redirect(url)\n return\n form_options = {}\n for key, byte_list in self.request.body_arguments.items():\n form_options[key] = [ bs.decode('utf8') for bs in byte_list ]\n options = user.spawner.options_from_form(form_options)\n yield self.spawn_single_user(user, options=options)\n self.set_login_cookie(user)\n url = user.server.base_url\n self.redirect(url)\n\nclass AdminHandler(BaseHandler):\n \"\"\"Render the admin page.\"\"\"\n\n @admin_only\n def get(self):\n available = {'name', 'admin', 'running', 'last_activity'}\n default_sort = ['admin', 'name']\n mapping = {\n 'running': '_server_id'\n }\n default_order = {\n 'name': 'asc',\n 'last_activity': 'desc',\n 'admin': 'desc',\n 'running': 'desc',\n }\n sorts = self.get_arguments('sort') or default_sort\n orders = self.get_arguments('order')\n \n for bad in set(sorts).difference(available):\n self.log.warn(\"ignoring invalid sort: %r\", bad)\n sorts.remove(bad)\n for bad in set(orders).difference({'asc', 'desc'}):\n self.log.warn(\"ignoring invalid order: %r\", bad)\n orders.remove(bad)\n \n # add default sort as secondary\n for s in default_sort:\n if s not in sorts:\n sorts.append(s)\n if len(orders) < len(sorts):\n for col in sorts[len(orders):]:\n orders.append(default_order[col])\n else:\n orders = orders[:len(sorts)]\n \n # this could be one incomprehensible nested list comprehension\n # get User columns\n cols = [ getattr(orm.User, mapping.get(c, c)) for c in sorts ]\n # get User.col.desc() order objects\n ordered = [ getattr(c, o)() for c, o in zip(cols, orders) ]\n \n users = self.db.query(orm.User).order_by(*ordered)\n users = [ self._user_from_orm(u) for u in users ]\n running = [ u for u in users if u.running ]\n \n html = self.render_template('admin.html',\n user=self.get_current_user(),\n admin_access=self.settings.get('admin_access', False),\n users=users,\n running=running,\n sort={s:o for s,o in zip(sorts, orders)},\n )\n self.finish(html)\n\n\ndefault_handlers = [\n (r'/', RootHandler),\n (r'/home', HomeHandler),\n (r'/admin', AdminHandler),\n (r'/spawn', SpawnHandler),\n]\n", "path": "jupyterhub/handlers/pages.py"}]} | 1,971 | 121 |
gh_patches_debug_25460 | rasdani/github-patches | git_diff | DataDog__integrations-extras-662 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Importing ABC directly from collections will be removed in Python 3.10
Use collections for Python 2 and collections.abc for Python 3
https://github.com/DataDog/integrations-extras/blob/7e676a2d256e142ddf4cb44ac11a6e590a502779/filebeat/datadog_checks/filebeat/filebeat.py#L148
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `filebeat/datadog_checks/filebeat/filebeat.py`
Content:
```
1 # (C) Datadog, Inc. 2010-2016
2 # All rights reserved
3 # Licensed under Simplified BSD License (see LICENSE)
4
5 # stdlib
6 import collections
7 import errno
8 import json
9 import numbers
10 import os
11 import re
12 import sre_constants
13
14 import requests
15 from six import iteritems
16
17 from datadog_checks.base import AgentCheck, is_affirmative
18 from datadog_checks.base.utils.containers import hash_mutable
19
20 EVENT_TYPE = SOURCE_TYPE_NAME = "filebeat"
21
22
23 class FilebeatCheckHttpProfiler:
24 """
25 Filebeat's HTTP profiler gives a bunch of counter variables; their value holds little interest,
26 what we really want is the delta in between runs. This class is responsible for caching the
27 values from the previous run
28 """
29
30 INCREMENT_METRIC_NAMES = [
31 "filebeat.events.done",
32 "filebeat.harvester.closed",
33 "filebeat.harvester.files.truncated",
34 "filebeat.harvester.open_files",
35 "filebeat.harvester.skipped",
36 "filebeat.harvester.started",
37 "filebeat.prospector.log.files.renamed",
38 "filebeat.prospector.log.files.truncated",
39 "libbeat.config.module.running",
40 "libbeat.config.module.starts",
41 "libbeat.config.module.stops",
42 "libbeat.config.reloads",
43 "libbeat.es.call_count.PublishEvents",
44 "libbeat.es.publish.read_bytes",
45 "libbeat.es.publish.read_errors",
46 "libbeat.es.publish.write_bytes",
47 "libbeat.es.publish.write_errors",
48 "libbeat.es.published_and_acked_events",
49 "libbeat.es.published_but_not_acked_events",
50 "libbeat.kafka.call_count.PublishEvents",
51 "libbeat.kafka.published_and_acked_events",
52 "libbeat.kafka.published_but_not_acked_events",
53 "libbeat.logstash.call_count.PublishEvents",
54 "libbeat.logstash.publish.read_bytes",
55 "libbeat.logstash.publish.read_errors",
56 "libbeat.logstash.publish.write_bytes",
57 "libbeat.logstash.publish.write_errors",
58 "libbeat.logstash.published_and_acked_events",
59 "libbeat.logstash.published_but_not_acked_events",
60 "libbeat.output.events.acked",
61 "libbeat.output.events.dropped",
62 "libbeat.output.events.failed",
63 "libbeat.output.events.total",
64 "libbeat.pipeline.events.dropped",
65 "libbeat.pipeline.events.failed",
66 "libbeat.pipeline.events.filtered",
67 "libbeat.pipeline.events.published",
68 "libbeat.pipeline.events.total",
69 "libbeat.publisher.messages_in_worker_queues",
70 "libbeat.publisher.published_events",
71 "libbeat.redis.publish.read_bytes",
72 "libbeat.redis.publish.read_errors",
73 "libbeat.redis.publish.write_bytes",
74 "libbeat.redis.publish.write_errors",
75 "publish.events",
76 "registrar.states.cleanup",
77 "registrar.states.current",
78 "registrar.states.update",
79 "registrar.writes",
80 ]
81
82 GAUGE_METRIC_NAMES = ["filebeat.harvester.running"]
83
84 VARS_ROUTE = "debug/vars"
85
86 def __init__(self, config):
87 self._config = config
88 self._previous_increment_values = {}
89 # regex matching ain't free, let's cache this
90 self._should_keep_metrics = {}
91
92 def gather_metrics(self):
93 if not self._config.stats_endpoint:
94 return {}
95
96 response = self._make_request()
97
98 return {"increment": self._gather_increment_metrics(response), "gauge": self._gather_gauge_metrics(response)}
99
100 def _make_request(self):
101
102 response = requests.get(self._config.stats_endpoint, timeout=self._config.timeout)
103 response.raise_for_status()
104
105 return self.flatten(response.json())
106
107 def _gather_increment_metrics(self, response):
108 new_values = {
109 name: response[name]
110 for name in self.INCREMENT_METRIC_NAMES
111 if self._should_keep_metric(name) and name in response
112 }
113
114 deltas = self._compute_increment_deltas(new_values)
115
116 self._previous_increment_values = new_values
117
118 return deltas
119
120 def _compute_increment_deltas(self, new_values):
121 deltas = {}
122
123 for name, new_value in iteritems(new_values):
124 if name not in self._previous_increment_values or self._previous_increment_values[name] > new_value:
125 # either the agent or filebeat got restarted, we're not
126 # reporting anything this time around
127 return {}
128 deltas[name] = new_value - self._previous_increment_values[name]
129
130 return deltas
131
132 def _gather_gauge_metrics(self, response):
133 return {
134 name: response[name]
135 for name in self.GAUGE_METRIC_NAMES
136 if self._should_keep_metric(name) and name in response
137 }
138
139 def _should_keep_metric(self, name):
140 if name not in self._should_keep_metrics:
141 self._should_keep_metrics[name] = self._config.should_keep_metric(name)
142 return self._should_keep_metrics[name]
143
144 def flatten(self, d, parent_key="", sep="."):
145 items = []
146 for k, v in d.items():
147 new_key = parent_key + sep + k if parent_key else k
148 if isinstance(v, collections.MutableMapping):
149 items.extend(self.flatten(v, new_key, sep=sep).items())
150 else:
151 items.append((new_key, v))
152 return dict(items)
153
154
155 class FilebeatCheckInstanceConfig:
156
157 _only_metrics_regexes = None
158
159 def __init__(self, instance):
160 self._registry_file_path = instance.get("registry_file_path")
161 if self._registry_file_path is None:
162 raise Exception("An absolute path to a filebeat registry path must be specified")
163
164 self._stats_endpoint = instance.get("stats_endpoint")
165
166 self._only_metrics = instance.get("only_metrics", [])
167
168 if not isinstance(self._only_metrics, list):
169 raise Exception(
170 "If given, filebeat's only_metrics must be a list of regexes, got %s" % (self._only_metrics,)
171 )
172
173 self._timeout = instance.get("timeout", 2)
174 if not isinstance(self._timeout, numbers.Real) or self._timeout <= 0:
175 raise Exception("If given, filebeats timeout must be a positive number, got %s" % (self._timeout,))
176
177 @property
178 def registry_file_path(self):
179 return self._registry_file_path
180
181 @property
182 def stats_endpoint(self):
183 return self._stats_endpoint
184
185 @property
186 def timeout(self):
187 return self._timeout
188
189 def should_keep_metric(self, metric_name):
190
191 if not self._only_metrics:
192 return True
193
194 return any(re.search(regex, metric_name) for regex in self._compiled_regexes())
195
196 def _compiled_regexes(self):
197 if self._only_metrics_regexes is None:
198 self._only_metrics_regexes = self._compile_regexes()
199 return self._only_metrics_regexes
200
201 def _compile_regexes(self):
202 compiled_regexes = []
203
204 for regex in self._only_metrics:
205 try:
206 compiled_regexes.append(re.compile(regex))
207 except sre_constants.error as ex:
208 raise Exception('Invalid only_metric regex for filebeat: "%s", error: %s' % (regex, ex))
209
210 return compiled_regexes
211
212
213 class FilebeatCheck(AgentCheck):
214
215 METRIC_PREFIX = "filebeat."
216
217 def __init__(self, *args, **kwargs):
218 AgentCheck.__init__(self, *args, **kwargs)
219 self.instance_cache = {}
220
221 def check(self, instance):
222 normalize_metrics = is_affirmative(instance.get("normalize_metrics", False))
223
224 instance_key = hash_mutable(instance)
225 if instance_key in self.instance_cache:
226 config = self.instance_cache[instance_key]["config"]
227 profiler = self.instance_cache[instance_key]["profiler"]
228 else:
229 config = FilebeatCheckInstanceConfig(instance)
230 profiler = FilebeatCheckHttpProfiler(config)
231 self.instance_cache[instance_key] = {"config": config, "profiler": profiler}
232
233 self._process_registry(config)
234 self._gather_http_profiler_metrics(config, profiler, normalize_metrics)
235
236 def _process_registry(self, config):
237 registry_contents = self._parse_registry_file(config.registry_file_path)
238
239 if isinstance(registry_contents, dict):
240 # filebeat version < 5
241 registry_contents = registry_contents.values()
242
243 for item in registry_contents:
244 self._process_registry_item(item)
245
246 def _parse_registry_file(self, registry_file_path):
247 try:
248 with open(registry_file_path) as registry_file:
249 return json.load(registry_file)
250 except IOError as ex:
251 self.log.error("Cannot read the registry log file at %s: %s", registry_file_path, ex)
252
253 if ex.errno == errno.EACCES:
254 self.log.error(
255 "You might be interesting in having a look at " "https://github.com/elastic/beats/pull/6455"
256 )
257
258 return []
259
260 def _process_registry_item(self, item):
261 source = item["source"]
262 offset = item["offset"]
263
264 try:
265 stats = os.stat(source)
266
267 if self._is_same_file(stats, item["FileStateOS"]):
268 unprocessed_bytes = stats.st_size - offset
269
270 self.gauge("filebeat.registry.unprocessed_bytes", unprocessed_bytes, tags=["source:{0}".format(source)])
271 else:
272 self.log.debug("Filebeat source %s appears to have changed", source)
273 except OSError:
274 self.log.debug("Unable to get stats on filebeat source %s", source)
275
276 def _is_same_file(self, stats, file_state_os):
277 return stats.st_dev == file_state_os["device"] and stats.st_ino == file_state_os["inode"]
278
279 def _gather_http_profiler_metrics(self, config, profiler, normalize_metrics):
280 try:
281 all_metrics = profiler.gather_metrics()
282 except Exception as ex:
283 self.log.error("Error when fetching metrics from %s: %s", config.stats_endpoint, ex)
284 return
285
286 tags = ["stats_endpoint:{0}".format(config.stats_endpoint)]
287
288 for action, metrics in iteritems(all_metrics):
289 method = getattr(self, action)
290
291 for name, value in iteritems(metrics):
292 if not name.startswith(self.METRIC_PREFIX) and normalize_metrics:
293 name = self.METRIC_PREFIX + name
294 method(name, value, tags)
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/filebeat/datadog_checks/filebeat/filebeat.py b/filebeat/datadog_checks/filebeat/filebeat.py
--- a/filebeat/datadog_checks/filebeat/filebeat.py
+++ b/filebeat/datadog_checks/filebeat/filebeat.py
@@ -3,7 +3,6 @@
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
-import collections
import errno
import json
import numbers
@@ -12,11 +11,17 @@
import sre_constants
import requests
+import six
from six import iteritems
from datadog_checks.base import AgentCheck, is_affirmative
from datadog_checks.base.utils.containers import hash_mutable
+if six.PY3:
+ from collections.abc import MutableMapping
+else:
+ from collections import MutableMapping
+
EVENT_TYPE = SOURCE_TYPE_NAME = "filebeat"
@@ -145,7 +150,7 @@
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
- if isinstance(v, collections.MutableMapping):
+ if isinstance(v, MutableMapping):
items.extend(self.flatten(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
| {"golden_diff": "diff --git a/filebeat/datadog_checks/filebeat/filebeat.py b/filebeat/datadog_checks/filebeat/filebeat.py\n--- a/filebeat/datadog_checks/filebeat/filebeat.py\n+++ b/filebeat/datadog_checks/filebeat/filebeat.py\n@@ -3,7 +3,6 @@\n # Licensed under Simplified BSD License (see LICENSE)\n \n # stdlib\n-import collections\n import errno\n import json\n import numbers\n@@ -12,11 +11,17 @@\n import sre_constants\n \n import requests\n+import six\n from six import iteritems\n \n from datadog_checks.base import AgentCheck, is_affirmative\n from datadog_checks.base.utils.containers import hash_mutable\n \n+if six.PY3:\n+ from collections.abc import MutableMapping\n+else:\n+ from collections import MutableMapping\n+\n EVENT_TYPE = SOURCE_TYPE_NAME = \"filebeat\"\n \n \n@@ -145,7 +150,7 @@\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n- if isinstance(v, collections.MutableMapping):\n+ if isinstance(v, MutableMapping):\n items.extend(self.flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n", "issue": " Importing ABC directly from collections will be removed in Python 3.10\nUse collections for Python 2 and collections.abc for Python 3\r\n\r\nhttps://github.com/DataDog/integrations-extras/blob/7e676a2d256e142ddf4cb44ac11a6e590a502779/filebeat/datadog_checks/filebeat/filebeat.py#L148\n", "before_files": [{"content": "# (C) Datadog, Inc. 2010-2016\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n# stdlib\nimport collections\nimport errno\nimport json\nimport numbers\nimport os\nimport re\nimport sre_constants\n\nimport requests\nfrom six import iteritems\n\nfrom datadog_checks.base import AgentCheck, is_affirmative\nfrom datadog_checks.base.utils.containers import hash_mutable\n\nEVENT_TYPE = SOURCE_TYPE_NAME = \"filebeat\"\n\n\nclass FilebeatCheckHttpProfiler:\n \"\"\"\n Filebeat's HTTP profiler gives a bunch of counter variables; their value holds little interest,\n what we really want is the delta in between runs. This class is responsible for caching the\n values from the previous run\n \"\"\"\n\n INCREMENT_METRIC_NAMES = [\n \"filebeat.events.done\",\n \"filebeat.harvester.closed\",\n \"filebeat.harvester.files.truncated\",\n \"filebeat.harvester.open_files\",\n \"filebeat.harvester.skipped\",\n \"filebeat.harvester.started\",\n \"filebeat.prospector.log.files.renamed\",\n \"filebeat.prospector.log.files.truncated\",\n \"libbeat.config.module.running\",\n \"libbeat.config.module.starts\",\n \"libbeat.config.module.stops\",\n \"libbeat.config.reloads\",\n \"libbeat.es.call_count.PublishEvents\",\n \"libbeat.es.publish.read_bytes\",\n \"libbeat.es.publish.read_errors\",\n \"libbeat.es.publish.write_bytes\",\n \"libbeat.es.publish.write_errors\",\n \"libbeat.es.published_and_acked_events\",\n \"libbeat.es.published_but_not_acked_events\",\n \"libbeat.kafka.call_count.PublishEvents\",\n \"libbeat.kafka.published_and_acked_events\",\n \"libbeat.kafka.published_but_not_acked_events\",\n \"libbeat.logstash.call_count.PublishEvents\",\n \"libbeat.logstash.publish.read_bytes\",\n \"libbeat.logstash.publish.read_errors\",\n \"libbeat.logstash.publish.write_bytes\",\n \"libbeat.logstash.publish.write_errors\",\n \"libbeat.logstash.published_and_acked_events\",\n \"libbeat.logstash.published_but_not_acked_events\",\n \"libbeat.output.events.acked\",\n \"libbeat.output.events.dropped\",\n \"libbeat.output.events.failed\",\n \"libbeat.output.events.total\",\n \"libbeat.pipeline.events.dropped\",\n \"libbeat.pipeline.events.failed\",\n \"libbeat.pipeline.events.filtered\",\n \"libbeat.pipeline.events.published\",\n \"libbeat.pipeline.events.total\",\n \"libbeat.publisher.messages_in_worker_queues\",\n \"libbeat.publisher.published_events\",\n \"libbeat.redis.publish.read_bytes\",\n \"libbeat.redis.publish.read_errors\",\n \"libbeat.redis.publish.write_bytes\",\n \"libbeat.redis.publish.write_errors\",\n \"publish.events\",\n \"registrar.states.cleanup\",\n \"registrar.states.current\",\n \"registrar.states.update\",\n \"registrar.writes\",\n ]\n\n GAUGE_METRIC_NAMES = [\"filebeat.harvester.running\"]\n\n VARS_ROUTE = \"debug/vars\"\n\n def __init__(self, config):\n self._config = config\n self._previous_increment_values = {}\n # regex matching ain't free, let's cache this\n self._should_keep_metrics = {}\n\n def gather_metrics(self):\n if not self._config.stats_endpoint:\n return {}\n\n response = self._make_request()\n\n return {\"increment\": self._gather_increment_metrics(response), \"gauge\": self._gather_gauge_metrics(response)}\n\n def _make_request(self):\n\n response = requests.get(self._config.stats_endpoint, timeout=self._config.timeout)\n response.raise_for_status()\n\n return self.flatten(response.json())\n\n def _gather_increment_metrics(self, response):\n new_values = {\n name: response[name]\n for name in self.INCREMENT_METRIC_NAMES\n if self._should_keep_metric(name) and name in response\n }\n\n deltas = self._compute_increment_deltas(new_values)\n\n self._previous_increment_values = new_values\n\n return deltas\n\n def _compute_increment_deltas(self, new_values):\n deltas = {}\n\n for name, new_value in iteritems(new_values):\n if name not in self._previous_increment_values or self._previous_increment_values[name] > new_value:\n # either the agent or filebeat got restarted, we're not\n # reporting anything this time around\n return {}\n deltas[name] = new_value - self._previous_increment_values[name]\n\n return deltas\n\n def _gather_gauge_metrics(self, response):\n return {\n name: response[name]\n for name in self.GAUGE_METRIC_NAMES\n if self._should_keep_metric(name) and name in response\n }\n\n def _should_keep_metric(self, name):\n if name not in self._should_keep_metrics:\n self._should_keep_metrics[name] = self._config.should_keep_metric(name)\n return self._should_keep_metrics[name]\n\n def flatten(self, d, parent_key=\"\", sep=\".\"):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(self.flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\nclass FilebeatCheckInstanceConfig:\n\n _only_metrics_regexes = None\n\n def __init__(self, instance):\n self._registry_file_path = instance.get(\"registry_file_path\")\n if self._registry_file_path is None:\n raise Exception(\"An absolute path to a filebeat registry path must be specified\")\n\n self._stats_endpoint = instance.get(\"stats_endpoint\")\n\n self._only_metrics = instance.get(\"only_metrics\", [])\n\n if not isinstance(self._only_metrics, list):\n raise Exception(\n \"If given, filebeat's only_metrics must be a list of regexes, got %s\" % (self._only_metrics,)\n )\n\n self._timeout = instance.get(\"timeout\", 2)\n if not isinstance(self._timeout, numbers.Real) or self._timeout <= 0:\n raise Exception(\"If given, filebeats timeout must be a positive number, got %s\" % (self._timeout,))\n\n @property\n def registry_file_path(self):\n return self._registry_file_path\n\n @property\n def stats_endpoint(self):\n return self._stats_endpoint\n\n @property\n def timeout(self):\n return self._timeout\n\n def should_keep_metric(self, metric_name):\n\n if not self._only_metrics:\n return True\n\n return any(re.search(regex, metric_name) for regex in self._compiled_regexes())\n\n def _compiled_regexes(self):\n if self._only_metrics_regexes is None:\n self._only_metrics_regexes = self._compile_regexes()\n return self._only_metrics_regexes\n\n def _compile_regexes(self):\n compiled_regexes = []\n\n for regex in self._only_metrics:\n try:\n compiled_regexes.append(re.compile(regex))\n except sre_constants.error as ex:\n raise Exception('Invalid only_metric regex for filebeat: \"%s\", error: %s' % (regex, ex))\n\n return compiled_regexes\n\n\nclass FilebeatCheck(AgentCheck):\n\n METRIC_PREFIX = \"filebeat.\"\n\n def __init__(self, *args, **kwargs):\n AgentCheck.__init__(self, *args, **kwargs)\n self.instance_cache = {}\n\n def check(self, instance):\n normalize_metrics = is_affirmative(instance.get(\"normalize_metrics\", False))\n\n instance_key = hash_mutable(instance)\n if instance_key in self.instance_cache:\n config = self.instance_cache[instance_key][\"config\"]\n profiler = self.instance_cache[instance_key][\"profiler\"]\n else:\n config = FilebeatCheckInstanceConfig(instance)\n profiler = FilebeatCheckHttpProfiler(config)\n self.instance_cache[instance_key] = {\"config\": config, \"profiler\": profiler}\n\n self._process_registry(config)\n self._gather_http_profiler_metrics(config, profiler, normalize_metrics)\n\n def _process_registry(self, config):\n registry_contents = self._parse_registry_file(config.registry_file_path)\n\n if isinstance(registry_contents, dict):\n # filebeat version < 5\n registry_contents = registry_contents.values()\n\n for item in registry_contents:\n self._process_registry_item(item)\n\n def _parse_registry_file(self, registry_file_path):\n try:\n with open(registry_file_path) as registry_file:\n return json.load(registry_file)\n except IOError as ex:\n self.log.error(\"Cannot read the registry log file at %s: %s\", registry_file_path, ex)\n\n if ex.errno == errno.EACCES:\n self.log.error(\n \"You might be interesting in having a look at \" \"https://github.com/elastic/beats/pull/6455\"\n )\n\n return []\n\n def _process_registry_item(self, item):\n source = item[\"source\"]\n offset = item[\"offset\"]\n\n try:\n stats = os.stat(source)\n\n if self._is_same_file(stats, item[\"FileStateOS\"]):\n unprocessed_bytes = stats.st_size - offset\n\n self.gauge(\"filebeat.registry.unprocessed_bytes\", unprocessed_bytes, tags=[\"source:{0}\".format(source)])\n else:\n self.log.debug(\"Filebeat source %s appears to have changed\", source)\n except OSError:\n self.log.debug(\"Unable to get stats on filebeat source %s\", source)\n\n def _is_same_file(self, stats, file_state_os):\n return stats.st_dev == file_state_os[\"device\"] and stats.st_ino == file_state_os[\"inode\"]\n\n def _gather_http_profiler_metrics(self, config, profiler, normalize_metrics):\n try:\n all_metrics = profiler.gather_metrics()\n except Exception as ex:\n self.log.error(\"Error when fetching metrics from %s: %s\", config.stats_endpoint, ex)\n return\n\n tags = [\"stats_endpoint:{0}\".format(config.stats_endpoint)]\n\n for action, metrics in iteritems(all_metrics):\n method = getattr(self, action)\n\n for name, value in iteritems(metrics):\n if not name.startswith(self.METRIC_PREFIX) and normalize_metrics:\n name = self.METRIC_PREFIX + name\n method(name, value, tags)\n", "path": "filebeat/datadog_checks/filebeat/filebeat.py"}], "after_files": [{"content": "# (C) Datadog, Inc. 2010-2016\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\n# stdlib\nimport errno\nimport json\nimport numbers\nimport os\nimport re\nimport sre_constants\n\nimport requests\nimport six\nfrom six import iteritems\n\nfrom datadog_checks.base import AgentCheck, is_affirmative\nfrom datadog_checks.base.utils.containers import hash_mutable\n\nif six.PY3:\n from collections.abc import MutableMapping\nelse:\n from collections import MutableMapping\n\nEVENT_TYPE = SOURCE_TYPE_NAME = \"filebeat\"\n\n\nclass FilebeatCheckHttpProfiler:\n \"\"\"\n Filebeat's HTTP profiler gives a bunch of counter variables; their value holds little interest,\n what we really want is the delta in between runs. This class is responsible for caching the\n values from the previous run\n \"\"\"\n\n INCREMENT_METRIC_NAMES = [\n \"filebeat.events.done\",\n \"filebeat.harvester.closed\",\n \"filebeat.harvester.files.truncated\",\n \"filebeat.harvester.open_files\",\n \"filebeat.harvester.skipped\",\n \"filebeat.harvester.started\",\n \"filebeat.prospector.log.files.renamed\",\n \"filebeat.prospector.log.files.truncated\",\n \"libbeat.config.module.running\",\n \"libbeat.config.module.starts\",\n \"libbeat.config.module.stops\",\n \"libbeat.config.reloads\",\n \"libbeat.es.call_count.PublishEvents\",\n \"libbeat.es.publish.read_bytes\",\n \"libbeat.es.publish.read_errors\",\n \"libbeat.es.publish.write_bytes\",\n \"libbeat.es.publish.write_errors\",\n \"libbeat.es.published_and_acked_events\",\n \"libbeat.es.published_but_not_acked_events\",\n \"libbeat.kafka.call_count.PublishEvents\",\n \"libbeat.kafka.published_and_acked_events\",\n \"libbeat.kafka.published_but_not_acked_events\",\n \"libbeat.logstash.call_count.PublishEvents\",\n \"libbeat.logstash.publish.read_bytes\",\n \"libbeat.logstash.publish.read_errors\",\n \"libbeat.logstash.publish.write_bytes\",\n \"libbeat.logstash.publish.write_errors\",\n \"libbeat.logstash.published_and_acked_events\",\n \"libbeat.logstash.published_but_not_acked_events\",\n \"libbeat.output.events.acked\",\n \"libbeat.output.events.dropped\",\n \"libbeat.output.events.failed\",\n \"libbeat.output.events.total\",\n \"libbeat.pipeline.events.dropped\",\n \"libbeat.pipeline.events.failed\",\n \"libbeat.pipeline.events.filtered\",\n \"libbeat.pipeline.events.published\",\n \"libbeat.pipeline.events.total\",\n \"libbeat.publisher.messages_in_worker_queues\",\n \"libbeat.publisher.published_events\",\n \"libbeat.redis.publish.read_bytes\",\n \"libbeat.redis.publish.read_errors\",\n \"libbeat.redis.publish.write_bytes\",\n \"libbeat.redis.publish.write_errors\",\n \"publish.events\",\n \"registrar.states.cleanup\",\n \"registrar.states.current\",\n \"registrar.states.update\",\n \"registrar.writes\",\n ]\n\n GAUGE_METRIC_NAMES = [\"filebeat.harvester.running\"]\n\n VARS_ROUTE = \"debug/vars\"\n\n def __init__(self, config):\n self._config = config\n self._previous_increment_values = {}\n # regex matching ain't free, let's cache this\n self._should_keep_metrics = {}\n\n def gather_metrics(self):\n if not self._config.stats_endpoint:\n return {}\n\n response = self._make_request()\n\n return {\"increment\": self._gather_increment_metrics(response), \"gauge\": self._gather_gauge_metrics(response)}\n\n def _make_request(self):\n\n response = requests.get(self._config.stats_endpoint, timeout=self._config.timeout)\n response.raise_for_status()\n\n return self.flatten(response.json())\n\n def _gather_increment_metrics(self, response):\n new_values = {\n name: response[name]\n for name in self.INCREMENT_METRIC_NAMES\n if self._should_keep_metric(name) and name in response\n }\n\n deltas = self._compute_increment_deltas(new_values)\n\n self._previous_increment_values = new_values\n\n return deltas\n\n def _compute_increment_deltas(self, new_values):\n deltas = {}\n\n for name, new_value in iteritems(new_values):\n if name not in self._previous_increment_values or self._previous_increment_values[name] > new_value:\n # either the agent or filebeat got restarted, we're not\n # reporting anything this time around\n return {}\n deltas[name] = new_value - self._previous_increment_values[name]\n\n return deltas\n\n def _gather_gauge_metrics(self, response):\n return {\n name: response[name]\n for name in self.GAUGE_METRIC_NAMES\n if self._should_keep_metric(name) and name in response\n }\n\n def _should_keep_metric(self, name):\n if name not in self._should_keep_metrics:\n self._should_keep_metrics[name] = self._config.should_keep_metric(name)\n return self._should_keep_metrics[name]\n\n def flatten(self, d, parent_key=\"\", sep=\".\"):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(self.flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\nclass FilebeatCheckInstanceConfig:\n\n _only_metrics_regexes = None\n\n def __init__(self, instance):\n self._registry_file_path = instance.get(\"registry_file_path\")\n if self._registry_file_path is None:\n raise Exception(\"An absolute path to a filebeat registry path must be specified\")\n\n self._stats_endpoint = instance.get(\"stats_endpoint\")\n\n self._only_metrics = instance.get(\"only_metrics\", [])\n\n if not isinstance(self._only_metrics, list):\n raise Exception(\n \"If given, filebeat's only_metrics must be a list of regexes, got %s\" % (self._only_metrics,)\n )\n\n self._timeout = instance.get(\"timeout\", 2)\n if not isinstance(self._timeout, numbers.Real) or self._timeout <= 0:\n raise Exception(\"If given, filebeats timeout must be a positive number, got %s\" % (self._timeout,))\n\n @property\n def registry_file_path(self):\n return self._registry_file_path\n\n @property\n def stats_endpoint(self):\n return self._stats_endpoint\n\n @property\n def timeout(self):\n return self._timeout\n\n def should_keep_metric(self, metric_name):\n\n if not self._only_metrics:\n return True\n\n return any(re.search(regex, metric_name) for regex in self._compiled_regexes())\n\n def _compiled_regexes(self):\n if self._only_metrics_regexes is None:\n self._only_metrics_regexes = self._compile_regexes()\n return self._only_metrics_regexes\n\n def _compile_regexes(self):\n compiled_regexes = []\n\n for regex in self._only_metrics:\n try:\n compiled_regexes.append(re.compile(regex))\n except sre_constants.error as ex:\n raise Exception('Invalid only_metric regex for filebeat: \"%s\", error: %s' % (regex, ex))\n\n return compiled_regexes\n\n\nclass FilebeatCheck(AgentCheck):\n\n METRIC_PREFIX = \"filebeat.\"\n\n def __init__(self, *args, **kwargs):\n AgentCheck.__init__(self, *args, **kwargs)\n self.instance_cache = {}\n\n def check(self, instance):\n normalize_metrics = is_affirmative(instance.get(\"normalize_metrics\", False))\n\n instance_key = hash_mutable(instance)\n if instance_key in self.instance_cache:\n config = self.instance_cache[instance_key][\"config\"]\n profiler = self.instance_cache[instance_key][\"profiler\"]\n else:\n config = FilebeatCheckInstanceConfig(instance)\n profiler = FilebeatCheckHttpProfiler(config)\n self.instance_cache[instance_key] = {\"config\": config, \"profiler\": profiler}\n\n self._process_registry(config)\n self._gather_http_profiler_metrics(config, profiler, normalize_metrics)\n\n def _process_registry(self, config):\n registry_contents = self._parse_registry_file(config.registry_file_path)\n\n if isinstance(registry_contents, dict):\n # filebeat version < 5\n registry_contents = registry_contents.values()\n\n for item in registry_contents:\n self._process_registry_item(item)\n\n def _parse_registry_file(self, registry_file_path):\n try:\n with open(registry_file_path) as registry_file:\n return json.load(registry_file)\n except IOError as ex:\n self.log.error(\"Cannot read the registry log file at %s: %s\", registry_file_path, ex)\n\n if ex.errno == errno.EACCES:\n self.log.error(\n \"You might be interesting in having a look at \" \"https://github.com/elastic/beats/pull/6455\"\n )\n\n return []\n\n def _process_registry_item(self, item):\n source = item[\"source\"]\n offset = item[\"offset\"]\n\n try:\n stats = os.stat(source)\n\n if self._is_same_file(stats, item[\"FileStateOS\"]):\n unprocessed_bytes = stats.st_size - offset\n\n self.gauge(\"filebeat.registry.unprocessed_bytes\", unprocessed_bytes, tags=[\"source:{0}\".format(source)])\n else:\n self.log.debug(\"Filebeat source %s appears to have changed\", source)\n except OSError:\n self.log.debug(\"Unable to get stats on filebeat source %s\", source)\n\n def _is_same_file(self, stats, file_state_os):\n return stats.st_dev == file_state_os[\"device\"] and stats.st_ino == file_state_os[\"inode\"]\n\n def _gather_http_profiler_metrics(self, config, profiler, normalize_metrics):\n try:\n all_metrics = profiler.gather_metrics()\n except Exception as ex:\n self.log.error(\"Error when fetching metrics from %s: %s\", config.stats_endpoint, ex)\n return\n\n tags = [\"stats_endpoint:{0}\".format(config.stats_endpoint)]\n\n for action, metrics in iteritems(all_metrics):\n method = getattr(self, action)\n\n for name, value in iteritems(metrics):\n if not name.startswith(self.METRIC_PREFIX) and normalize_metrics:\n name = self.METRIC_PREFIX + name\n method(name, value, tags)\n", "path": "filebeat/datadog_checks/filebeat/filebeat.py"}]} | 3,408 | 286 |
gh_patches_debug_33154 | rasdani/github-patches | git_diff | scikit-image__scikit-image-2509 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistent cython version requirements
## Description
setup.py requires cython >= 0.21 while skimage/_build.py requires >= 0.23. Also, the two checks seem redundant.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python
2
3 descr = """Image Processing SciKit
4
5 Image processing algorithms for SciPy, including IO, morphology, filtering,
6 warping, color manipulation, object detection, etc.
7
8 Please refer to the online documentation at
9 http://scikit-image.org/
10 """
11
12 DISTNAME = 'scikit-image'
13 DESCRIPTION = 'Image processing routines for SciPy'
14 LONG_DESCRIPTION = descr
15 MAINTAINER = 'Stefan van der Walt'
16 MAINTAINER_EMAIL = '[email protected]'
17 URL = 'http://scikit-image.org'
18 LICENSE = 'Modified BSD'
19 DOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'
20
21 import os
22 import sys
23
24 import setuptools
25 from distutils.command.build_py import build_py
26 from distutils.command.sdist import sdist
27
28 if sys.version_info[0] < 3:
29 import __builtin__ as builtins
30 else:
31 import builtins
32
33 # This is a bit (!) hackish: we are setting a global variable so that the main
34 # skimage __init__ can detect if it is being loaded by the setup routine, to
35 # avoid attempting to load components that aren't built yet:
36 # the numpy distutils extensions that are used by scikit-image to recursively
37 # build the compiled extensions in sub-packages is based on the Python import
38 # machinery.
39 builtins.__SKIMAGE_SETUP__ = True
40
41
42 with open('skimage/__init__.py') as fid:
43 for line in fid:
44 if line.startswith('__version__'):
45 VERSION = line.strip().split()[-1][1:-1]
46 break
47
48 with open('requirements.txt') as fid:
49 INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
50
51 # requirements for those browsing PyPI
52 REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
53 REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
54 REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
55
56
57 def configuration(parent_package='', top_path=None):
58 if os.path.exists('MANIFEST'):
59 os.remove('MANIFEST')
60
61 from numpy.distutils.misc_util import Configuration
62 config = Configuration(None, parent_package, top_path)
63
64 config.set_options(
65 ignore_setup_xxx_py=True,
66 assume_default_configuration=True,
67 delegate_options_to_subpackages=True,
68 quiet=True)
69
70 config.add_subpackage('skimage')
71 config.add_data_dir('skimage/data')
72
73 return config
74
75
76 if __name__ == "__main__":
77 try:
78 from numpy.distutils.core import setup
79 extra = {'configuration': configuration}
80 # Do not try and upgrade larger dependencies
81 for lib in ['scipy', 'numpy', 'matplotlib', 'pillow']:
82 try:
83 __import__(lib)
84 INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES
85 if lib not in i]
86 except ImportError:
87 pass
88 except ImportError:
89 if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
90 sys.argv[1] in ('--help-commands',
91 '--version',
92 'clean',
93 'egg_info',
94 'install_egg_info',
95 'rotate')):
96 # For these actions, NumPy is not required.
97 #
98 # They are required to succeed without Numpy for example when
99 # pip is used to install scikit-image when Numpy is not yet
100 # present in the system.
101 from setuptools import setup
102 extra = {}
103 else:
104 print('To install scikit-image from source, you will need numpy.\n' +
105 'Install numpy with pip:\n' +
106 'pip install numpy\n'
107 'Or use your operating system package manager. For more\n' +
108 'details, see http://scikit-image.org/docs/stable/install.html')
109 sys.exit(1)
110
111 setup(
112 name=DISTNAME,
113 description=DESCRIPTION,
114 long_description=LONG_DESCRIPTION,
115 maintainer=MAINTAINER,
116 maintainer_email=MAINTAINER_EMAIL,
117 url=URL,
118 license=LICENSE,
119 download_url=DOWNLOAD_URL,
120 version=VERSION,
121
122 classifiers=[
123 'Development Status :: 4 - Beta',
124 'Environment :: Console',
125 'Intended Audience :: Developers',
126 'Intended Audience :: Science/Research',
127 'License :: OSI Approved :: BSD License',
128 'Programming Language :: C',
129 'Programming Language :: Python',
130 'Programming Language :: Python :: 3',
131 'Topic :: Scientific/Engineering',
132 'Operating System :: Microsoft :: Windows',
133 'Operating System :: POSIX',
134 'Operating System :: Unix',
135 'Operating System :: MacOS',
136 ],
137 install_requires=INSTALL_REQUIRES,
138 # install cython when running setup.py (source install)
139 setup_requires=['cython>=0.21'],
140 requires=REQUIRES,
141 packages=setuptools.find_packages(exclude=['doc']),
142 include_package_data=True,
143 zip_safe=False, # the package can run out of an .egg file
144
145 entry_points={
146 'console_scripts': ['skivi = skimage.scripts.skivi:main'],
147 },
148
149 cmdclass={'build_py': build_py,
150 'sdist': sdist},
151 **extra
152 )
153
```
Path: `skimage/_build.py`
Content:
```
1 import sys
2 import os
3 import hashlib
4 from distutils.version import LooseVersion
5
6 # WindowsError is not defined on unix systems
7 try:
8 WindowsError
9 except NameError:
10 class WindowsError(Exception):
11 pass
12
13
14 def cython(pyx_files, working_path=''):
15 """Use Cython to convert the given files to C.
16
17 Parameters
18 ----------
19 pyx_files : list of str
20 The input .pyx files.
21
22 """
23 # Do not build cython files if target is clean
24 if len(sys.argv) >= 2 and sys.argv[1] == 'clean':
25 return
26
27 try:
28 from Cython import __version__
29 if LooseVersion(__version__) < '0.23':
30 raise RuntimeError('Cython >= 0.23 needed to build scikit-image')
31
32 from Cython.Build import cythonize
33 except ImportError:
34 # If cython is not found, we do nothing -- the build will make use of
35 # the distributed .c files
36 print("Cython not found; falling back to pre-built %s"
37 % " ".join([f.replace('.pyx.in', 'c').replace('.pyx', '.c')
38 for f in pyx_files]))
39 else:
40 for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:
41
42 # if the .pyx file stayed the same, we don't need to recompile
43 if not _changed(pyxfile):
44 continue
45
46 if pyxfile.endswith('.pyx.in'):
47 process_tempita_pyx(pyxfile)
48 pyxfile = pyxfile.replace('.pyx.in', '.pyx')
49
50 cythonize(pyxfile)
51
52
53 def _md5sum(f):
54 m = hashlib.new('md5')
55 while True:
56 # Hash one 8096 byte block at a time
57 d = f.read(8096)
58 if not d:
59 break
60 m.update(d)
61 return m.hexdigest()
62
63
64 def _changed(filename):
65 """Compare the hash of a Cython file to the cached hash value on disk.
66
67 """
68 filename_cache = filename + '.md5'
69
70 try:
71 md5_cached = open(filename_cache, 'rb').read()
72 except IOError:
73 md5_cached = '0'
74
75 with open(filename, 'rb') as f:
76 md5_new = _md5sum(f)
77
78 with open(filename_cache, 'wb') as cf:
79 cf.write(md5_new.encode('utf-8'))
80
81 return md5_cached != md5_new.encode('utf-8')
82
83
84 def process_tempita_pyx(fromfile):
85 try:
86 try:
87 from Cython import Tempita as tempita
88 except ImportError:
89 import tempita
90 except ImportError:
91 raise Exception('Building requires Tempita: '
92 'pip install --user Tempita')
93 template = tempita.Template.from_filename(fromfile,
94 encoding=sys.getdefaultencoding())
95 pyxcontent = template.substitute()
96 if not fromfile.endswith('.pyx.in'):
97 raise ValueError("Unexpected extension of %s." % fromfile)
98
99 pyxfile = os.path.splitext(fromfile)[0] # split off the .in ending
100 with open(pyxfile, "w") as f:
101 f.write(pyxcontent)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -135,8 +135,6 @@
'Operating System :: MacOS',
],
install_requires=INSTALL_REQUIRES,
- # install cython when running setup.py (source install)
- setup_requires=['cython>=0.21'],
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
diff --git a/skimage/_build.py b/skimage/_build.py
--- a/skimage/_build.py
+++ b/skimage/_build.py
@@ -3,6 +3,8 @@
import hashlib
from distutils.version import LooseVersion
+CYTHON_VERSION = '0.23'
+
# WindowsError is not defined on unix systems
try:
WindowsError
@@ -26,16 +28,21 @@
try:
from Cython import __version__
- if LooseVersion(__version__) < '0.23':
- raise RuntimeError('Cython >= 0.23 needed to build scikit-image')
+ if LooseVersion(__version__) < CYTHON_VERSION:
+ raise RuntimeError('Cython >= %s needed to build scikit-image' % CYTHON_VERSION)
from Cython.Build import cythonize
except ImportError:
- # If cython is not found, we do nothing -- the build will make use of
- # the distributed .c files
- print("Cython not found; falling back to pre-built %s"
- % " ".join([f.replace('.pyx.in', 'c').replace('.pyx', '.c')
- for f in pyx_files]))
+ # If cython is not found, the build will make use of
+ # the distributed .c files if present
+ c_files = [f.replace('.pyx.in', '.c').replace('.pyx', '.c') for f in pyx_files]
+ for cfile in [os.path.join(working_path, f) for f in c_files]:
+ if not os.path.isfile(cfile):
+ raise RuntimeError('Cython >= %s is required to build scikit-image from git checkout' \
+ % CYTHON_VERSION)
+
+ print("Cython >= %s not found; falling back to pre-built %s" \
+ % (CYTHON_VERSION, " ".join(c_files)))
else:
for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -135,8 +135,6 @@\n 'Operating System :: MacOS',\n ],\n install_requires=INSTALL_REQUIRES,\n- # install cython when running setup.py (source install)\n- setup_requires=['cython>=0.21'],\n requires=REQUIRES,\n packages=setuptools.find_packages(exclude=['doc']),\n include_package_data=True,\ndiff --git a/skimage/_build.py b/skimage/_build.py\n--- a/skimage/_build.py\n+++ b/skimage/_build.py\n@@ -3,6 +3,8 @@\n import hashlib\n from distutils.version import LooseVersion\n \n+CYTHON_VERSION = '0.23'\n+\n # WindowsError is not defined on unix systems\n try:\n WindowsError\n@@ -26,16 +28,21 @@\n \n try:\n from Cython import __version__\n- if LooseVersion(__version__) < '0.23':\n- raise RuntimeError('Cython >= 0.23 needed to build scikit-image')\n+ if LooseVersion(__version__) < CYTHON_VERSION:\n+ raise RuntimeError('Cython >= %s needed to build scikit-image' % CYTHON_VERSION)\n \n from Cython.Build import cythonize\n except ImportError:\n- # If cython is not found, we do nothing -- the build will make use of\n- # the distributed .c files\n- print(\"Cython not found; falling back to pre-built %s\"\n- % \" \".join([f.replace('.pyx.in', 'c').replace('.pyx', '.c')\n- for f in pyx_files]))\n+ # If cython is not found, the build will make use of\n+ # the distributed .c files if present\n+ c_files = [f.replace('.pyx.in', '.c').replace('.pyx', '.c') for f in pyx_files]\n+ for cfile in [os.path.join(working_path, f) for f in c_files]:\n+ if not os.path.isfile(cfile):\n+ raise RuntimeError('Cython >= %s is required to build scikit-image from git checkout' \\\n+ % CYTHON_VERSION)\n+\n+ print(\"Cython >= %s not found; falling back to pre-built %s\" \\\n+ % (CYTHON_VERSION, \" \".join(c_files)))\n else:\n for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:\n", "issue": "Inconsistent cython version requirements\n## Description\n\nsetup.py requires cython >= 0.21 while skimage/_build.py requires >= 0.23. Also, the two checks seem redundant.\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\ndescr = \"\"\"Image Processing SciKit\n\nImage processing algorithms for SciPy, including IO, morphology, filtering,\nwarping, color manipulation, object detection, etc.\n\nPlease refer to the online documentation at\nhttp://scikit-image.org/\n\"\"\"\n\nDISTNAME = 'scikit-image'\nDESCRIPTION = 'Image processing routines for SciPy'\nLONG_DESCRIPTION = descr\nMAINTAINER = 'Stefan van der Walt'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'http://scikit-image.org'\nLICENSE = 'Modified BSD'\nDOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'\n\nimport os\nimport sys\n\nimport setuptools\nfrom distutils.command.build_py import build_py\nfrom distutils.command.sdist import sdist\n\nif sys.version_info[0] < 3:\n import __builtin__ as builtins\nelse:\n import builtins\n\n# This is a bit (!) hackish: we are setting a global variable so that the main\n# skimage __init__ can detect if it is being loaded by the setup routine, to\n# avoid attempting to load components that aren't built yet:\n# the numpy distutils extensions that are used by scikit-image to recursively\n# build the compiled extensions in sub-packages is based on the Python import\n# machinery.\nbuiltins.__SKIMAGE_SETUP__ = True\n\n\nwith open('skimage/__init__.py') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\n\nwith open('requirements.txt') as fid:\n INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]\n\n# requirements for those browsing PyPI\nREQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]\nREQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]\nREQUIRES = [r.replace('[array]', '') for r in REQUIRES]\n\n\ndef configuration(parent_package='', top_path=None):\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n\n config.set_options(\n ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('skimage')\n config.add_data_dir('skimage/data')\n\n return config\n\n\nif __name__ == \"__main__\":\n try:\n from numpy.distutils.core import setup\n extra = {'configuration': configuration}\n # Do not try and upgrade larger dependencies\n for lib in ['scipy', 'numpy', 'matplotlib', 'pillow']:\n try:\n __import__(lib)\n INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES\n if lib not in i]\n except ImportError:\n pass\n except ImportError:\n if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands',\n '--version',\n 'clean',\n 'egg_info',\n 'install_egg_info',\n 'rotate')):\n # For these actions, NumPy is not required.\n #\n # They are required to succeed without Numpy for example when\n # pip is used to install scikit-image when Numpy is not yet\n # present in the system.\n from setuptools import setup\n extra = {}\n else:\n print('To install scikit-image from source, you will need numpy.\\n' +\n 'Install numpy with pip:\\n' +\n 'pip install numpy\\n'\n 'Or use your operating system package manager. For more\\n' +\n 'details, see http://scikit-image.org/docs/stable/install.html')\n sys.exit(1)\n\n setup(\n name=DISTNAME,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n url=URL,\n license=LICENSE,\n download_url=DOWNLOAD_URL,\n version=VERSION,\n\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n install_requires=INSTALL_REQUIRES,\n # install cython when running setup.py (source install)\n setup_requires=['cython>=0.21'],\n requires=REQUIRES,\n packages=setuptools.find_packages(exclude=['doc']),\n include_package_data=True,\n zip_safe=False, # the package can run out of an .egg file\n\n entry_points={\n 'console_scripts': ['skivi = skimage.scripts.skivi:main'],\n },\n\n cmdclass={'build_py': build_py,\n 'sdist': sdist},\n **extra\n )\n", "path": "setup.py"}, {"content": "import sys\nimport os\nimport hashlib\nfrom distutils.version import LooseVersion\n\n# WindowsError is not defined on unix systems\ntry:\n WindowsError\nexcept NameError:\n class WindowsError(Exception):\n pass\n\n\ndef cython(pyx_files, working_path=''):\n \"\"\"Use Cython to convert the given files to C.\n\n Parameters\n ----------\n pyx_files : list of str\n The input .pyx files.\n\n \"\"\"\n # Do not build cython files if target is clean\n if len(sys.argv) >= 2 and sys.argv[1] == 'clean':\n return\n\n try:\n from Cython import __version__\n if LooseVersion(__version__) < '0.23':\n raise RuntimeError('Cython >= 0.23 needed to build scikit-image')\n\n from Cython.Build import cythonize\n except ImportError:\n # If cython is not found, we do nothing -- the build will make use of\n # the distributed .c files\n print(\"Cython not found; falling back to pre-built %s\"\n % \" \".join([f.replace('.pyx.in', 'c').replace('.pyx', '.c')\n for f in pyx_files]))\n else:\n for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:\n\n # if the .pyx file stayed the same, we don't need to recompile\n if not _changed(pyxfile):\n continue\n\n if pyxfile.endswith('.pyx.in'):\n process_tempita_pyx(pyxfile)\n pyxfile = pyxfile.replace('.pyx.in', '.pyx')\n\n cythonize(pyxfile)\n\n\ndef _md5sum(f):\n m = hashlib.new('md5')\n while True:\n # Hash one 8096 byte block at a time\n d = f.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef _changed(filename):\n \"\"\"Compare the hash of a Cython file to the cached hash value on disk.\n\n \"\"\"\n filename_cache = filename + '.md5'\n\n try:\n md5_cached = open(filename_cache, 'rb').read()\n except IOError:\n md5_cached = '0'\n\n with open(filename, 'rb') as f:\n md5_new = _md5sum(f)\n\n with open(filename_cache, 'wb') as cf:\n cf.write(md5_new.encode('utf-8'))\n\n return md5_cached != md5_new.encode('utf-8')\n\n\ndef process_tempita_pyx(fromfile):\n try:\n try:\n from Cython import Tempita as tempita\n except ImportError:\n import tempita\n except ImportError:\n raise Exception('Building requires Tempita: '\n 'pip install --user Tempita')\n template = tempita.Template.from_filename(fromfile,\n encoding=sys.getdefaultencoding())\n pyxcontent = template.substitute()\n if not fromfile.endswith('.pyx.in'):\n raise ValueError(\"Unexpected extension of %s.\" % fromfile)\n\n pyxfile = os.path.splitext(fromfile)[0] # split off the .in ending\n with open(pyxfile, \"w\") as f:\n f.write(pyxcontent)\n", "path": "skimage/_build.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\ndescr = \"\"\"Image Processing SciKit\n\nImage processing algorithms for SciPy, including IO, morphology, filtering,\nwarping, color manipulation, object detection, etc.\n\nPlease refer to the online documentation at\nhttp://scikit-image.org/\n\"\"\"\n\nDISTNAME = 'scikit-image'\nDESCRIPTION = 'Image processing routines for SciPy'\nLONG_DESCRIPTION = descr\nMAINTAINER = 'Stefan van der Walt'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'http://scikit-image.org'\nLICENSE = 'Modified BSD'\nDOWNLOAD_URL = 'http://github.com/scikit-image/scikit-image'\n\nimport os\nimport sys\n\nimport setuptools\nfrom distutils.command.build_py import build_py\nfrom distutils.command.sdist import sdist\n\nif sys.version_info[0] < 3:\n import __builtin__ as builtins\nelse:\n import builtins\n\n# This is a bit (!) hackish: we are setting a global variable so that the main\n# skimage __init__ can detect if it is being loaded by the setup routine, to\n# avoid attempting to load components that aren't built yet:\n# the numpy distutils extensions that are used by scikit-image to recursively\n# build the compiled extensions in sub-packages is based on the Python import\n# machinery.\nbuiltins.__SKIMAGE_SETUP__ = True\n\n\nwith open('skimage/__init__.py') as fid:\n for line in fid:\n if line.startswith('__version__'):\n VERSION = line.strip().split()[-1][1:-1]\n break\n\nwith open('requirements.txt') as fid:\n INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]\n\n# requirements for those browsing PyPI\nREQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]\nREQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]\nREQUIRES = [r.replace('[array]', '') for r in REQUIRES]\n\n\ndef configuration(parent_package='', top_path=None):\n if os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n from numpy.distutils.misc_util import Configuration\n config = Configuration(None, parent_package, top_path)\n\n config.set_options(\n ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('skimage')\n config.add_data_dir('skimage/data')\n\n return config\n\n\nif __name__ == \"__main__\":\n try:\n from numpy.distutils.core import setup\n extra = {'configuration': configuration}\n # Do not try and upgrade larger dependencies\n for lib in ['scipy', 'numpy', 'matplotlib', 'pillow']:\n try:\n __import__(lib)\n INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES\n if lib not in i]\n except ImportError:\n pass\n except ImportError:\n if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or\n sys.argv[1] in ('--help-commands',\n '--version',\n 'clean',\n 'egg_info',\n 'install_egg_info',\n 'rotate')):\n # For these actions, NumPy is not required.\n #\n # They are required to succeed without Numpy for example when\n # pip is used to install scikit-image when Numpy is not yet\n # present in the system.\n from setuptools import setup\n extra = {}\n else:\n print('To install scikit-image from source, you will need numpy.\\n' +\n 'Install numpy with pip:\\n' +\n 'pip install numpy\\n'\n 'Or use your operating system package manager. For more\\n' +\n 'details, see http://scikit-image.org/docs/stable/install.html')\n sys.exit(1)\n\n setup(\n name=DISTNAME,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n url=URL,\n license=LICENSE,\n download_url=DOWNLOAD_URL,\n version=VERSION,\n\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n install_requires=INSTALL_REQUIRES,\n requires=REQUIRES,\n packages=setuptools.find_packages(exclude=['doc']),\n include_package_data=True,\n zip_safe=False, # the package can run out of an .egg file\n\n entry_points={\n 'console_scripts': ['skivi = skimage.scripts.skivi:main'],\n },\n\n cmdclass={'build_py': build_py,\n 'sdist': sdist},\n **extra\n )\n", "path": "setup.py"}, {"content": "import sys\nimport os\nimport hashlib\nfrom distutils.version import LooseVersion\n\nCYTHON_VERSION = '0.23'\n\n# WindowsError is not defined on unix systems\ntry:\n WindowsError\nexcept NameError:\n class WindowsError(Exception):\n pass\n\n\ndef cython(pyx_files, working_path=''):\n \"\"\"Use Cython to convert the given files to C.\n\n Parameters\n ----------\n pyx_files : list of str\n The input .pyx files.\n\n \"\"\"\n # Do not build cython files if target is clean\n if len(sys.argv) >= 2 and sys.argv[1] == 'clean':\n return\n\n try:\n from Cython import __version__\n if LooseVersion(__version__) < CYTHON_VERSION:\n raise RuntimeError('Cython >= %s needed to build scikit-image' % CYTHON_VERSION)\n\n from Cython.Build import cythonize\n except ImportError:\n # If cython is not found, the build will make use of\n # the distributed .c files if present\n c_files = [f.replace('.pyx.in', '.c').replace('.pyx', '.c') for f in pyx_files]\n for cfile in [os.path.join(working_path, f) for f in c_files]:\n if not os.path.isfile(cfile):\n raise RuntimeError('Cython >= %s is required to build scikit-image from git checkout' \\\n % CYTHON_VERSION)\n\n print(\"Cython >= %s not found; falling back to pre-built %s\" \\\n % (CYTHON_VERSION, \" \".join(c_files)))\n else:\n for pyxfile in [os.path.join(working_path, f) for f in pyx_files]:\n\n # if the .pyx file stayed the same, we don't need to recompile\n if not _changed(pyxfile):\n continue\n\n if pyxfile.endswith('.pyx.in'):\n process_tempita_pyx(pyxfile)\n pyxfile = pyxfile.replace('.pyx.in', '.pyx')\n\n cythonize(pyxfile)\n\n\ndef _md5sum(f):\n m = hashlib.new('md5')\n while True:\n # Hash one 8096 byte block at a time\n d = f.read(8096)\n if not d:\n break\n m.update(d)\n return m.hexdigest()\n\n\ndef _changed(filename):\n \"\"\"Compare the hash of a Cython file to the cached hash value on disk.\n\n \"\"\"\n filename_cache = filename + '.md5'\n\n try:\n md5_cached = open(filename_cache, 'rb').read()\n except IOError:\n md5_cached = '0'\n\n with open(filename, 'rb') as f:\n md5_new = _md5sum(f)\n\n with open(filename_cache, 'wb') as cf:\n cf.write(md5_new.encode('utf-8'))\n\n return md5_cached != md5_new.encode('utf-8')\n\n\ndef process_tempita_pyx(fromfile):\n try:\n try:\n from Cython import Tempita as tempita\n except ImportError:\n import tempita\n except ImportError:\n raise Exception('Building requires Tempita: '\n 'pip install --user Tempita')\n template = tempita.Template.from_filename(fromfile,\n encoding=sys.getdefaultencoding())\n pyxcontent = template.substitute()\n if not fromfile.endswith('.pyx.in'):\n raise ValueError(\"Unexpected extension of %s.\" % fromfile)\n\n pyxfile = os.path.splitext(fromfile)[0] # split off the .in ending\n with open(pyxfile, \"w\") as f:\n f.write(pyxcontent)\n", "path": "skimage/_build.py"}]} | 2,726 | 556 |
gh_patches_debug_395 | rasdani/github-patches | git_diff | wright-group__WrightTools-640 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CITATION text file not distributed
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #! /usr/bin/env python3
2
3 import os
4 from setuptools import setup, find_packages
5
6
7 here = os.path.abspath(os.path.dirname(__file__))
8
9
10 def read(fname):
11 return open(os.path.join(here, fname)).read()
12
13
14 extra_files = {
15 "WrightTools": [
16 "datasets",
17 "datasets/*",
18 "datasets/*/*",
19 "datasets/*/*/*",
20 "datasets/*/*/*/*",
21 "VERSION",
22 "WT5_VERSION",
23 ]
24 }
25
26 with open(os.path.join(here, "WrightTools", "VERSION")) as version_file:
27 version = version_file.read().strip()
28
29 setup(
30 name="WrightTools",
31 packages=find_packages(exclude=("tests", "tests.*")),
32 package_data=extra_files,
33 python_requires=">=3.5",
34 setup_requires=["pytest-runner"],
35 tests_require=[
36 "pytest",
37 "pytest-cov",
38 "sphinx==1.6.5",
39 "sphinx-gallery==0.1.12",
40 "sphinx-rtd-theme",
41 ],
42 install_requires=[
43 "h5py",
44 "imageio",
45 "matplotlib>=2.0",
46 "numexpr",
47 "numpy",
48 "python-dateutil",
49 "pytz",
50 "scipy",
51 "tidy_headers",
52 ],
53 extras_require={
54 "docs": ["sphinx-gallery==0.1.12"],
55 "dev": ["black", "pre-commit", "pydocstyle"],
56 },
57 version=version,
58 description="Tools for loading, processing, and plotting multidimensional spectroscopy data.",
59 long_description=read("README.rst"),
60 author="WrightTools Developers",
61 license="MIT",
62 url="http://wright.tools",
63 keywords="spectroscopy science multidimensional visualization",
64 classifiers=[
65 "Development Status :: 5 - Production/Stable",
66 "Intended Audience :: Science/Research",
67 "License :: OSI Approved :: MIT License",
68 "Natural Language :: English",
69 "Programming Language :: Python :: 3",
70 "Programming Language :: Python :: 3.5",
71 "Programming Language :: Python :: 3.6",
72 "Topic :: Scientific/Engineering",
73 ],
74 )
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,6 +18,7 @@
"datasets/*/*",
"datasets/*/*/*",
"datasets/*/*/*/*",
+ "CITATION",
"VERSION",
"WT5_VERSION",
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,6 +18,7 @@\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n+ \"CITATION\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n", "issue": "CITATION text file not distributed\n\n", "before_files": [{"content": "#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(fname):\n return open(os.path.join(here, fname)).read()\n\n\nextra_files = {\n \"WrightTools\": [\n \"datasets\",\n \"datasets/*\",\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n}\n\nwith open(os.path.join(here, \"WrightTools\", \"VERSION\")) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name=\"WrightTools\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data=extra_files,\n python_requires=\">=3.5\",\n setup_requires=[\"pytest-runner\"],\n tests_require=[\n \"pytest\",\n \"pytest-cov\",\n \"sphinx==1.6.5\",\n \"sphinx-gallery==0.1.12\",\n \"sphinx-rtd-theme\",\n ],\n install_requires=[\n \"h5py\",\n \"imageio\",\n \"matplotlib>=2.0\",\n \"numexpr\",\n \"numpy\",\n \"python-dateutil\",\n \"pytz\",\n \"scipy\",\n \"tidy_headers\",\n ],\n extras_require={\n \"docs\": [\"sphinx-gallery==0.1.12\"],\n \"dev\": [\"black\", \"pre-commit\", \"pydocstyle\"],\n },\n version=version,\n description=\"Tools for loading, processing, and plotting multidimensional spectroscopy data.\",\n long_description=read(\"README.rst\"),\n author=\"WrightTools Developers\",\n license=\"MIT\",\n url=\"http://wright.tools\",\n keywords=\"spectroscopy science multidimensional visualization\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python3\n\nimport os\nfrom setuptools import setup, find_packages\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read(fname):\n return open(os.path.join(here, fname)).read()\n\n\nextra_files = {\n \"WrightTools\": [\n \"datasets\",\n \"datasets/*\",\n \"datasets/*/*\",\n \"datasets/*/*/*\",\n \"datasets/*/*/*/*\",\n \"CITATION\",\n \"VERSION\",\n \"WT5_VERSION\",\n ]\n}\n\nwith open(os.path.join(here, \"WrightTools\", \"VERSION\")) as version_file:\n version = version_file.read().strip()\n\nsetup(\n name=\"WrightTools\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data=extra_files,\n python_requires=\">=3.5\",\n setup_requires=[\"pytest-runner\"],\n tests_require=[\n \"pytest\",\n \"pytest-cov\",\n \"sphinx==1.6.5\",\n \"sphinx-gallery==0.1.12\",\n \"sphinx-rtd-theme\",\n ],\n install_requires=[\n \"h5py\",\n \"imageio\",\n \"matplotlib>=2.0\",\n \"numexpr\",\n \"numpy\",\n \"python-dateutil\",\n \"pytz\",\n \"scipy\",\n \"tidy_headers\",\n ],\n extras_require={\n \"docs\": [\"sphinx-gallery==0.1.12\"],\n \"dev\": [\"black\", \"pre-commit\", \"pydocstyle\"],\n },\n version=version,\n description=\"Tools for loading, processing, and plotting multidimensional spectroscopy data.\",\n long_description=read(\"README.rst\"),\n author=\"WrightTools Developers\",\n license=\"MIT\",\n url=\"http://wright.tools\",\n keywords=\"spectroscopy science multidimensional visualization\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Scientific/Engineering\",\n ],\n)\n", "path": "setup.py"}]} | 882 | 72 |
gh_patches_debug_8858 | rasdani/github-patches | git_diff | acl-org__acl-anthology-447 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Volume page aliases generated incorrectly
The Volume page Spoken Language Translation [here](https://aclweb.org/anthology/volumes/spoken-language-translation/) does not exist. Same with the [Tutorials volume](https://aclweb.org/anthology/volumes/tutorials/). I couldn't concoct any other volume names to test out, but both pages appear on Google.
_Originally posted by @aryamccarthy in https://github.com/acl-org/acl-anthology/issues/170#issuecomment-507788799_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bin/create_hugo_pages.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright 2019 Marcel Bollmann <[email protected]>
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17
18 """Usage: create_hugo_pages.py [--dir=DIR] [-c] [--debug]
19
20 Creates page stubs for the full anthology based on the YAML data files.
21
22 This script can only be run after create_hugo_yaml.py!
23
24 Options:
25 --dir=DIR Hugo project directory. [default: {scriptdir}/../build/]
26 --debug Output debug-level log messages.
27 -c, --clean Delete existing files in target directory before generation.
28 -h, --help Display this helpful text.
29 """
30
31 from docopt import docopt
32 from glob import glob
33 from slugify import slugify
34 from tqdm import tqdm
35 import logging as log
36 import os
37 import shutil
38 import yaml
39
40 try:
41 from yaml import CLoader as Loader
42 except ImportError:
43 from yaml import Loader
44
45 from anthology.utils import SeverityTracker
46
47
48 def check_directory(cdir, clean=False):
49 if not os.path.isdir(cdir) and not os.path.exists(cdir):
50 os.mkdir(cdir)
51 return True
52 entries = os.listdir(cdir)
53 if "_index.md" in entries:
54 entries.remove("_index.md")
55 if entries and not clean:
56 log.critical("Directory already exists and has content files: {}".format(cdir))
57 log.info(
58 "Call this script with the -c/--clean flag to automatically DELETE existing files"
59 )
60 return False
61 for entry in entries:
62 entry = "{}/{}".format(cdir, entry)
63 if os.path.isdir(entry):
64 shutil.rmtree(entry)
65 else:
66 os.remove(entry)
67 return True
68
69
70 def create_papers(srcdir, clean=False):
71 """Creates page stubs for all papers in the Anthology."""
72 log.info("Creating stubs for papers...")
73 if not check_directory("{}/content/papers".format(srcdir), clean=clean):
74 return
75
76 # Go through all paper volumes
77 for yamlfile in tqdm(glob("{}/data/papers/*.yaml".format(srcdir))):
78 log.debug("Processing {}".format(yamlfile))
79 with open(yamlfile, "r") as f:
80 data = yaml.load(f, Loader=Loader)
81 # Create a paper stub for each entry in the volume
82 for anthology_id, entry in data.items():
83 paper_dir = "{}/content/papers/{}/{}".format(
84 srcdir, anthology_id[0], anthology_id[:3]
85 )
86 if not os.path.exists(paper_dir):
87 os.makedirs(paper_dir)
88 with open("{}/{}.md".format(paper_dir, anthology_id), "w") as f:
89 print("---", file=f)
90 yaml.dump(
91 {"anthology_id": anthology_id, "title": entry["title"]},
92 default_flow_style=False,
93 stream=f,
94 )
95 print("---", file=f)
96
97
98 def create_volumes(srcdir, clean=False):
99 """Creates page stubs for all proceedings volumes in the Anthology."""
100 log.info("Creating stubs for volumes...")
101 if not check_directory("{}/content/volumes".format(srcdir), clean=clean):
102 return
103
104 yamlfile = "{}/data/volumes.yaml".format(srcdir)
105 log.debug("Processing {}".format(yamlfile))
106 with open(yamlfile, "r") as f:
107 data = yaml.load(f, Loader=Loader)
108 # Create a paper stub for each proceedings volume
109 for anthology_id, entry in data.items():
110 with open("{}/content/volumes/{}.md".format(srcdir, anthology_id), "w") as f:
111 print("---", file=f)
112 yaml.dump(
113 {
114 "anthology_id": anthology_id,
115 "title": entry["title"],
116 "aliases": [
117 slugify(entry["title"]),
118 "/papers/{}/{}/{}/".format(
119 anthology_id[0], anthology_id[:3], anthology_id
120 ),
121 ],
122 },
123 default_flow_style=False,
124 stream=f,
125 )
126 print("---", file=f)
127
128 return data
129
130
131 def create_people(srcdir, clean=False):
132 """Creates page stubs for all authors/editors in the Anthology."""
133 log.info("Creating stubs for people...")
134 if not check_directory("{}/content/people".format(srcdir), clean=clean):
135 return
136
137 for yamlfile in tqdm(glob("{}/data/people/*.yaml".format(srcdir))):
138 log.debug("Processing {}".format(yamlfile))
139 with open(yamlfile, "r") as f:
140 data = yaml.load(f, Loader=Loader)
141 # Create a page stub for each person
142 for name, entry in data.items():
143 person_dir = "{}/content/people/{}".format(srcdir, name[0])
144 if not os.path.exists(person_dir):
145 os.makedirs(person_dir)
146 yaml_data = {
147 "name": name,
148 "title": entry["full"],
149 "lastname": entry["last"],
150 }
151 with open("{}/{}.md".format(person_dir, name), "w") as f:
152 print("---", file=f)
153 # "lastname" is dumped to allow sorting by it in Hugo
154 yaml.dump(yaml_data, default_flow_style=False, stream=f)
155 print("---", file=f)
156
157 return data
158
159
160 def create_venues_and_events(srcdir, clean=False):
161 """Creates page stubs for all venues and events in the Anthology."""
162 yamlfile = "{}/data/venues.yaml".format(srcdir)
163 log.debug("Processing {}".format(yamlfile))
164 with open(yamlfile, "r") as f:
165 data = yaml.load(f, Loader=Loader)
166
167 log.info("Creating stubs for venues...")
168 if not check_directory("{}/content/venues".format(srcdir), clean=clean):
169 return
170 # Create a paper stub for each venue (e.g. ACL)
171 for venue, venue_data in data.items():
172 venue_str = venue_data["slug"]
173 with open("{}/content/venues/{}.md".format(srcdir, venue_str), "w") as f:
174 print("---", file=f)
175 yaml_data = {"venue": venue, "title": venue_data["name"]}
176 if venue_data["is_toplevel"]:
177 main_letter = venue_data["main_letter"]
178 yaml_data["aliases"] = ["/papers/{}/".format(main_letter)]
179 yaml.dump(yaml_data, default_flow_style=False, stream=f)
180 print("---", file=f)
181
182 log.info("Creating stubs for events...")
183 if not check_directory("{}/content/events".format(srcdir), clean=clean):
184 return
185 # Create a paper stub for each event (= venue + year, e.g. ACL 2018)
186 for venue, venue_data in data.items():
187 venue_str = venue_data["slug"]
188 for year in venue_data["volumes_by_year"]:
189 with open(
190 "{}/content/events/{}-{}.md".format(srcdir, venue_str, year), "w"
191 ) as f:
192 print("---", file=f)
193 yaml_data = {
194 "venue": venue,
195 "year": year,
196 "title": "{} ({})".format(venue_data["name"], year),
197 }
198 if venue_data["is_toplevel"]:
199 main_letter = venue_data["main_letter"]
200 main_prefix = main_letter + year[-2:] # e.g., P05
201 yaml_data["aliases"] = [
202 "/papers/{}/{}/".format(main_letter, main_prefix)
203 ]
204 yaml.dump(yaml_data, default_flow_style=False, stream=f)
205 print("---", file=f)
206
207
208 def create_sigs(srcdir, clean=False):
209 """Creates page stubs for all SIGs in the Anthology."""
210 yamlfile = "{}/data/sigs.yaml".format(srcdir)
211 log.debug("Processing {}".format(yamlfile))
212 with open(yamlfile, "r") as f:
213 data = yaml.load(f, Loader=Loader)
214
215 log.info("Creating stubs for SIGs...")
216 if not check_directory("{}/content/sigs".format(srcdir), clean=clean):
217 return
218 # Create a paper stub for each SIGS (e.g. SIGMORPHON)
219 for sig, sig_data in data.items():
220 sig_str = sig_data["slug"]
221 with open("{}/content/sigs/{}.md".format(srcdir, sig_str), "w") as f:
222 print("---", file=f)
223 yaml.dump(
224 {
225 "acronym": sig,
226 "short_acronym": sig[3:] if sig.startswith("SIG") else sig,
227 "title": sig_data["name"],
228 },
229 default_flow_style=False,
230 stream=f,
231 )
232 print("---", file=f)
233
234
235 if __name__ == "__main__":
236 args = docopt(__doc__)
237 scriptdir = os.path.dirname(os.path.abspath(__file__))
238 if "{scriptdir}" in args["--dir"]:
239 args["--dir"] = args["--dir"].format(scriptdir=scriptdir)
240 dir_ = os.path.abspath(args["--dir"])
241
242 log_level = log.DEBUG if args["--debug"] else log.INFO
243 log.basicConfig(format="%(levelname)-8s %(message)s", level=log_level)
244 tracker = SeverityTracker()
245 log.getLogger().addHandler(tracker)
246
247 create_papers(dir_, clean=args["--clean"])
248 create_volumes(dir_, clean=args["--clean"])
249 create_people(dir_, clean=args["--clean"])
250 create_venues_and_events(dir_, clean=args["--clean"])
251 create_sigs(dir_, clean=args["--clean"])
252
253 if tracker.highest >= log.ERROR:
254 exit(1)
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bin/create_hugo_pages.py b/bin/create_hugo_pages.py
--- a/bin/create_hugo_pages.py
+++ b/bin/create_hugo_pages.py
@@ -114,7 +114,7 @@
"anthology_id": anthology_id,
"title": entry["title"],
"aliases": [
- slugify(entry["title"]),
+ "/volumes/{}/".format(slugify(entry["title"])),
"/papers/{}/{}/{}/".format(
anthology_id[0], anthology_id[:3], anthology_id
),
| {"golden_diff": "diff --git a/bin/create_hugo_pages.py b/bin/create_hugo_pages.py\n--- a/bin/create_hugo_pages.py\n+++ b/bin/create_hugo_pages.py\n@@ -114,7 +114,7 @@\n \"anthology_id\": anthology_id,\n \"title\": entry[\"title\"],\n \"aliases\": [\n- slugify(entry[\"title\"]),\n+ \"/volumes/{}/\".format(slugify(entry[\"title\"])),\n \"/papers/{}/{}/{}/\".format(\n anthology_id[0], anthology_id[:3], anthology_id\n ),\n", "issue": "Volume page aliases generated incorrectly\nThe Volume page Spoken Language Translation [here](https://aclweb.org/anthology/volumes/spoken-language-translation/) does not exist. Same with the [Tutorials volume](https://aclweb.org/anthology/volumes/tutorials/). I couldn't concoct any other volume names to test out, but both pages appear on Google.\r\n\r\n_Originally posted by @aryamccarthy in https://github.com/acl-org/acl-anthology/issues/170#issuecomment-507788799_\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_hugo_pages.py [--dir=DIR] [-c] [--debug]\n\nCreates page stubs for the full anthology based on the YAML data files.\n\nThis script can only be run after create_hugo_yaml.py!\n\nOptions:\n --dir=DIR Hugo project directory. [default: {scriptdir}/../build/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom docopt import docopt\nfrom glob import glob\nfrom slugify import slugify\nfrom tqdm import tqdm\nimport logging as log\nimport os\nimport shutil\nimport yaml\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\nfrom anthology.utils import SeverityTracker\n\n\ndef check_directory(cdir, clean=False):\n if not os.path.isdir(cdir) and not os.path.exists(cdir):\n os.mkdir(cdir)\n return True\n entries = os.listdir(cdir)\n if \"_index.md\" in entries:\n entries.remove(\"_index.md\")\n if entries and not clean:\n log.critical(\"Directory already exists and has content files: {}\".format(cdir))\n log.info(\n \"Call this script with the -c/--clean flag to automatically DELETE existing files\"\n )\n return False\n for entry in entries:\n entry = \"{}/{}\".format(cdir, entry)\n if os.path.isdir(entry):\n shutil.rmtree(entry)\n else:\n os.remove(entry)\n return True\n\n\ndef create_papers(srcdir, clean=False):\n \"\"\"Creates page stubs for all papers in the Anthology.\"\"\"\n log.info(\"Creating stubs for papers...\")\n if not check_directory(\"{}/content/papers\".format(srcdir), clean=clean):\n return\n\n # Go through all paper volumes\n for yamlfile in tqdm(glob(\"{}/data/papers/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each entry in the volume\n for anthology_id, entry in data.items():\n paper_dir = \"{}/content/papers/{}/{}\".format(\n srcdir, anthology_id[0], anthology_id[:3]\n )\n if not os.path.exists(paper_dir):\n os.makedirs(paper_dir)\n with open(\"{}/{}.md\".format(paper_dir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\"anthology_id\": anthology_id, \"title\": entry[\"title\"]},\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\ndef create_volumes(srcdir, clean=False):\n \"\"\"Creates page stubs for all proceedings volumes in the Anthology.\"\"\"\n log.info(\"Creating stubs for volumes...\")\n if not check_directory(\"{}/content/volumes\".format(srcdir), clean=clean):\n return\n\n yamlfile = \"{}/data/volumes.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each proceedings volume\n for anthology_id, entry in data.items():\n with open(\"{}/content/volumes/{}.md\".format(srcdir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"anthology_id\": anthology_id,\n \"title\": entry[\"title\"],\n \"aliases\": [\n slugify(entry[\"title\"]),\n \"/papers/{}/{}/{}/\".format(\n anthology_id[0], anthology_id[:3], anthology_id\n ),\n ],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n return data\n\n\ndef create_people(srcdir, clean=False):\n \"\"\"Creates page stubs for all authors/editors in the Anthology.\"\"\"\n log.info(\"Creating stubs for people...\")\n if not check_directory(\"{}/content/people\".format(srcdir), clean=clean):\n return\n\n for yamlfile in tqdm(glob(\"{}/data/people/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a page stub for each person\n for name, entry in data.items():\n person_dir = \"{}/content/people/{}\".format(srcdir, name[0])\n if not os.path.exists(person_dir):\n os.makedirs(person_dir)\n yaml_data = {\n \"name\": name,\n \"title\": entry[\"full\"],\n \"lastname\": entry[\"last\"],\n }\n with open(\"{}/{}.md\".format(person_dir, name), \"w\") as f:\n print(\"---\", file=f)\n # \"lastname\" is dumped to allow sorting by it in Hugo\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n return data\n\n\ndef create_venues_and_events(srcdir, clean=False):\n \"\"\"Creates page stubs for all venues and events in the Anthology.\"\"\"\n yamlfile = \"{}/data/venues.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for venues...\")\n if not check_directory(\"{}/content/venues\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each venue (e.g. ACL)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n with open(\"{}/content/venues/{}.md\".format(srcdir, venue_str), \"w\") as f:\n print(\"---\", file=f)\n yaml_data = {\"venue\": venue, \"title\": venue_data[\"name\"]}\n if venue_data[\"is_toplevel\"]:\n main_letter = venue_data[\"main_letter\"]\n yaml_data[\"aliases\"] = [\"/papers/{}/\".format(main_letter)]\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n log.info(\"Creating stubs for events...\")\n if not check_directory(\"{}/content/events\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each event (= venue + year, e.g. ACL 2018)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n for year in venue_data[\"volumes_by_year\"]:\n with open(\n \"{}/content/events/{}-{}.md\".format(srcdir, venue_str, year), \"w\"\n ) as f:\n print(\"---\", file=f)\n yaml_data = {\n \"venue\": venue,\n \"year\": year,\n \"title\": \"{} ({})\".format(venue_data[\"name\"], year),\n }\n if venue_data[\"is_toplevel\"]:\n main_letter = venue_data[\"main_letter\"]\n main_prefix = main_letter + year[-2:] # e.g., P05\n yaml_data[\"aliases\"] = [\n \"/papers/{}/{}/\".format(main_letter, main_prefix)\n ]\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n\ndef create_sigs(srcdir, clean=False):\n \"\"\"Creates page stubs for all SIGs in the Anthology.\"\"\"\n yamlfile = \"{}/data/sigs.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for SIGs...\")\n if not check_directory(\"{}/content/sigs\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each SIGS (e.g. SIGMORPHON)\n for sig, sig_data in data.items():\n sig_str = sig_data[\"slug\"]\n with open(\"{}/content/sigs/{}.md\".format(srcdir, sig_str), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"acronym\": sig,\n \"short_acronym\": sig[3:] if sig.startswith(\"SIG\") else sig,\n \"title\": sig_data[\"name\"],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--dir\"]:\n args[\"--dir\"] = args[\"--dir\"].format(scriptdir=scriptdir)\n dir_ = os.path.abspath(args[\"--dir\"])\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n create_papers(dir_, clean=args[\"--clean\"])\n create_volumes(dir_, clean=args[\"--clean\"])\n create_people(dir_, clean=args[\"--clean\"])\n create_venues_and_events(dir_, clean=args[\"--clean\"])\n create_sigs(dir_, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_hugo_pages.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Marcel Bollmann <[email protected]>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Usage: create_hugo_pages.py [--dir=DIR] [-c] [--debug]\n\nCreates page stubs for the full anthology based on the YAML data files.\n\nThis script can only be run after create_hugo_yaml.py!\n\nOptions:\n --dir=DIR Hugo project directory. [default: {scriptdir}/../build/]\n --debug Output debug-level log messages.\n -c, --clean Delete existing files in target directory before generation.\n -h, --help Display this helpful text.\n\"\"\"\n\nfrom docopt import docopt\nfrom glob import glob\nfrom slugify import slugify\nfrom tqdm import tqdm\nimport logging as log\nimport os\nimport shutil\nimport yaml\n\ntry:\n from yaml import CLoader as Loader\nexcept ImportError:\n from yaml import Loader\n\nfrom anthology.utils import SeverityTracker\n\n\ndef check_directory(cdir, clean=False):\n if not os.path.isdir(cdir) and not os.path.exists(cdir):\n os.mkdir(cdir)\n return True\n entries = os.listdir(cdir)\n if \"_index.md\" in entries:\n entries.remove(\"_index.md\")\n if entries and not clean:\n log.critical(\"Directory already exists and has content files: {}\".format(cdir))\n log.info(\n \"Call this script with the -c/--clean flag to automatically DELETE existing files\"\n )\n return False\n for entry in entries:\n entry = \"{}/{}\".format(cdir, entry)\n if os.path.isdir(entry):\n shutil.rmtree(entry)\n else:\n os.remove(entry)\n return True\n\n\ndef create_papers(srcdir, clean=False):\n \"\"\"Creates page stubs for all papers in the Anthology.\"\"\"\n log.info(\"Creating stubs for papers...\")\n if not check_directory(\"{}/content/papers\".format(srcdir), clean=clean):\n return\n\n # Go through all paper volumes\n for yamlfile in tqdm(glob(\"{}/data/papers/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each entry in the volume\n for anthology_id, entry in data.items():\n paper_dir = \"{}/content/papers/{}/{}\".format(\n srcdir, anthology_id[0], anthology_id[:3]\n )\n if not os.path.exists(paper_dir):\n os.makedirs(paper_dir)\n with open(\"{}/{}.md\".format(paper_dir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\"anthology_id\": anthology_id, \"title\": entry[\"title\"]},\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\ndef create_volumes(srcdir, clean=False):\n \"\"\"Creates page stubs for all proceedings volumes in the Anthology.\"\"\"\n log.info(\"Creating stubs for volumes...\")\n if not check_directory(\"{}/content/volumes\".format(srcdir), clean=clean):\n return\n\n yamlfile = \"{}/data/volumes.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a paper stub for each proceedings volume\n for anthology_id, entry in data.items():\n with open(\"{}/content/volumes/{}.md\".format(srcdir, anthology_id), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"anthology_id\": anthology_id,\n \"title\": entry[\"title\"],\n \"aliases\": [\n \"/volumes/{}/\".format(slugify(entry[\"title\"])),\n \"/papers/{}/{}/{}/\".format(\n anthology_id[0], anthology_id[:3], anthology_id\n ),\n ],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n return data\n\n\ndef create_people(srcdir, clean=False):\n \"\"\"Creates page stubs for all authors/editors in the Anthology.\"\"\"\n log.info(\"Creating stubs for people...\")\n if not check_directory(\"{}/content/people\".format(srcdir), clean=clean):\n return\n\n for yamlfile in tqdm(glob(\"{}/data/people/*.yaml\".format(srcdir))):\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n # Create a page stub for each person\n for name, entry in data.items():\n person_dir = \"{}/content/people/{}\".format(srcdir, name[0])\n if not os.path.exists(person_dir):\n os.makedirs(person_dir)\n yaml_data = {\n \"name\": name,\n \"title\": entry[\"full\"],\n \"lastname\": entry[\"last\"],\n }\n with open(\"{}/{}.md\".format(person_dir, name), \"w\") as f:\n print(\"---\", file=f)\n # \"lastname\" is dumped to allow sorting by it in Hugo\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n return data\n\n\ndef create_venues_and_events(srcdir, clean=False):\n \"\"\"Creates page stubs for all venues and events in the Anthology.\"\"\"\n yamlfile = \"{}/data/venues.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for venues...\")\n if not check_directory(\"{}/content/venues\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each venue (e.g. ACL)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n with open(\"{}/content/venues/{}.md\".format(srcdir, venue_str), \"w\") as f:\n print(\"---\", file=f)\n yaml_data = {\"venue\": venue, \"title\": venue_data[\"name\"]}\n if venue_data[\"is_toplevel\"]:\n main_letter = venue_data[\"main_letter\"]\n yaml_data[\"aliases\"] = [\"/papers/{}/\".format(main_letter)]\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n log.info(\"Creating stubs for events...\")\n if not check_directory(\"{}/content/events\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each event (= venue + year, e.g. ACL 2018)\n for venue, venue_data in data.items():\n venue_str = venue_data[\"slug\"]\n for year in venue_data[\"volumes_by_year\"]:\n with open(\n \"{}/content/events/{}-{}.md\".format(srcdir, venue_str, year), \"w\"\n ) as f:\n print(\"---\", file=f)\n yaml_data = {\n \"venue\": venue,\n \"year\": year,\n \"title\": \"{} ({})\".format(venue_data[\"name\"], year),\n }\n if venue_data[\"is_toplevel\"]:\n main_letter = venue_data[\"main_letter\"]\n main_prefix = main_letter + year[-2:] # e.g., P05\n yaml_data[\"aliases\"] = [\n \"/papers/{}/{}/\".format(main_letter, main_prefix)\n ]\n yaml.dump(yaml_data, default_flow_style=False, stream=f)\n print(\"---\", file=f)\n\n\ndef create_sigs(srcdir, clean=False):\n \"\"\"Creates page stubs for all SIGs in the Anthology.\"\"\"\n yamlfile = \"{}/data/sigs.yaml\".format(srcdir)\n log.debug(\"Processing {}\".format(yamlfile))\n with open(yamlfile, \"r\") as f:\n data = yaml.load(f, Loader=Loader)\n\n log.info(\"Creating stubs for SIGs...\")\n if not check_directory(\"{}/content/sigs\".format(srcdir), clean=clean):\n return\n # Create a paper stub for each SIGS (e.g. SIGMORPHON)\n for sig, sig_data in data.items():\n sig_str = sig_data[\"slug\"]\n with open(\"{}/content/sigs/{}.md\".format(srcdir, sig_str), \"w\") as f:\n print(\"---\", file=f)\n yaml.dump(\n {\n \"acronym\": sig,\n \"short_acronym\": sig[3:] if sig.startswith(\"SIG\") else sig,\n \"title\": sig_data[\"name\"],\n },\n default_flow_style=False,\n stream=f,\n )\n print(\"---\", file=f)\n\n\nif __name__ == \"__main__\":\n args = docopt(__doc__)\n scriptdir = os.path.dirname(os.path.abspath(__file__))\n if \"{scriptdir}\" in args[\"--dir\"]:\n args[\"--dir\"] = args[\"--dir\"].format(scriptdir=scriptdir)\n dir_ = os.path.abspath(args[\"--dir\"])\n\n log_level = log.DEBUG if args[\"--debug\"] else log.INFO\n log.basicConfig(format=\"%(levelname)-8s %(message)s\", level=log_level)\n tracker = SeverityTracker()\n log.getLogger().addHandler(tracker)\n\n create_papers(dir_, clean=args[\"--clean\"])\n create_volumes(dir_, clean=args[\"--clean\"])\n create_people(dir_, clean=args[\"--clean\"])\n create_venues_and_events(dir_, clean=args[\"--clean\"])\n create_sigs(dir_, clean=args[\"--clean\"])\n\n if tracker.highest >= log.ERROR:\n exit(1)\n", "path": "bin/create_hugo_pages.py"}]} | 3,252 | 121 |
gh_patches_debug_24933 | rasdani/github-patches | git_diff | urllib3__urllib3-1665 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Multipart mixed responses generate warnings
If you read a multipart/mixed response, the connectionpool issues a couple of warnings about defects in the message. I'm not sure what the expected, correct behavior is, but these warnings seem spurious.
Stick this perl script somewhere:
```
#!/usr/bin/perl
print "Server: Some Server Name\r\n";
print "Content-Type: multipart/mixed; boundary=36eeb8c4e26d842a\r\n";
print "Content-Length: 178\r\n";
print "\r\n\r\n";
print "--36eeb8c4e26d842a\r\n";
print "Content-Type: text/plain\r\n";
print "\r\n";
print "7\r\n";
print "--36eeb8c4e26d842a\r\n";
print "Content-Type: text/plain\r\n";
print "\r\n";
print "9\r\n";
print "--36eeb8c4e26d842a\r\n";
print "Content-Type: text/plain\r\n";
print "\r\n";
print "11\r\n";
print "--36eeb8c4e26d842a--\r\n";
```
Read it with requests (naturally, you'll have to change the URI to wherever you put the script):
```
import requests, logging
logging.basicConfig(level=logging.WARNING)
logging.getLogger("requests").setLevel(logging.DEBUG)
headers = {'accept': "multipart/mixed"}
r = requests.get("http://localhost:8124/cgi-bin/mpm.pl", headers=headers)
print(r)
```
The following errors are displayed:
```
DEBUG:requests.packages.urllib3.connectionpool:"GET http://localhost:8124/cgi-bin/mpm.pl HTTP/1.1" 200 178
WARNING:requests.packages.urllib3.connectionpool:Failed to parse headers (url=http://localhost:8888/http://localhost:8124/cgi-bin/mpm.pl): [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: ''
Traceback (most recent call last):
File "/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/connectionpool.py", line 390, in _make_request
assert_header_parsing(httplib_response.msg)
File "/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/util/response.py", line 58, in assert_header_parsing
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
requests.packages.urllib3.exceptions.HeaderParsingError: [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: ''
```
It took me quite a while to work out that they were spurious (because in real life, the server side that is generating the multipart/mixed is more complicated!)
See also kennethreitz/requests#3001
##
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/urllib3/util/response.py`
Content:
```
1 from __future__ import absolute_import
2 from ..packages.six.moves import http_client as httplib
3
4 from ..exceptions import HeaderParsingError
5
6
7 def is_fp_closed(obj):
8 """
9 Checks whether a given file-like object is closed.
10
11 :param obj:
12 The file-like object to check.
13 """
14
15 try:
16 # Check `isclosed()` first, in case Python3 doesn't set `closed`.
17 # GH Issue #928
18 return obj.isclosed()
19 except AttributeError:
20 pass
21
22 try:
23 # Check via the official file-like-object way.
24 return obj.closed
25 except AttributeError:
26 pass
27
28 try:
29 # Check if the object is a container for another file-like object that
30 # gets released on exhaustion (e.g. HTTPResponse).
31 return obj.fp is None
32 except AttributeError:
33 pass
34
35 raise ValueError("Unable to determine whether fp is closed.")
36
37
38 def assert_header_parsing(headers):
39 """
40 Asserts whether all headers have been successfully parsed.
41 Extracts encountered errors from the result of parsing headers.
42
43 Only works on Python 3.
44
45 :param headers: Headers to verify.
46 :type headers: `httplib.HTTPMessage`.
47
48 :raises urllib3.exceptions.HeaderParsingError:
49 If parsing errors are found.
50 """
51
52 # This will fail silently if we pass in the wrong kind of parameter.
53 # To make debugging easier add an explicit check.
54 if not isinstance(headers, httplib.HTTPMessage):
55 raise TypeError("expected httplib.Message, got {0}.".format(type(headers)))
56
57 defects = getattr(headers, "defects", None)
58 get_payload = getattr(headers, "get_payload", None)
59
60 unparsed_data = None
61 if get_payload:
62 # get_payload is actually email.message.Message.get_payload;
63 # we're only interested in the result if it's not a multipart message
64 if not headers.is_multipart():
65 payload = get_payload()
66
67 if isinstance(payload, (bytes, str)):
68 unparsed_data = payload
69
70 if defects or unparsed_data:
71 raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
72
73
74 def is_response_to_head(response):
75 """
76 Checks whether the request of a response has been a HEAD-request.
77 Handles the quirks of AppEngine.
78
79 :param conn:
80 :type conn: :class:`httplib.HTTPResponse`
81 """
82 # FIXME: Can we do this somehow without accessing private httplib _method?
83 method = response._method
84 if isinstance(method, int): # Platform-specific: Appengine
85 return method == 3
86 return method.upper() == "HEAD"
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py
--- a/src/urllib3/util/response.py
+++ b/src/urllib3/util/response.py
@@ -1,4 +1,5 @@
from __future__ import absolute_import
+from email.errors import StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect
from ..packages.six.moves import http_client as httplib
from ..exceptions import HeaderParsingError
@@ -66,6 +67,25 @@
if isinstance(payload, (bytes, str)):
unparsed_data = payload
+ if defects:
+ # httplib is assuming a response body is available
+ # when parsing headers even when httplib only sends
+ # header data to parse_headers() This results in
+ # defects on multipart responses in particular.
+ # See: https://github.com/urllib3/urllib3/issues/800
+
+ # So we ignore the following defects:
+ # - StartBoundaryNotFoundDefect:
+ # The claimed start boundary was never found.
+ # - MultipartInvariantViolationDefect:
+ # A message claimed to be a multipart but no subparts were found.
+ defects = [
+ defect
+ for defect in defects
+ if not isinstance(
+ defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)
+ )
+ ]
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
| {"golden_diff": "diff --git a/src/urllib3/util/response.py b/src/urllib3/util/response.py\n--- a/src/urllib3/util/response.py\n+++ b/src/urllib3/util/response.py\n@@ -1,4 +1,5 @@\n from __future__ import absolute_import\n+from email.errors import StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect\n from ..packages.six.moves import http_client as httplib\n \n from ..exceptions import HeaderParsingError\n@@ -66,6 +67,25 @@\n \n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n+ if defects:\n+ # httplib is assuming a response body is available\n+ # when parsing headers even when httplib only sends\n+ # header data to parse_headers() This results in\n+ # defects on multipart responses in particular.\n+ # See: https://github.com/urllib3/urllib3/issues/800\n+\n+ # So we ignore the following defects:\n+ # - StartBoundaryNotFoundDefect:\n+ # The claimed start boundary was never found.\n+ # - MultipartInvariantViolationDefect:\n+ # A message claimed to be a multipart but no subparts were found.\n+ defects = [\n+ defect\n+ for defect in defects\n+ if not isinstance(\n+ defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)\n+ )\n+ ]\n \n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n", "issue": "Multipart mixed responses generate warnings\nIf you read a multipart/mixed response, the connectionpool issues a couple of warnings about defects in the message. I'm not sure what the expected, correct behavior is, but these warnings seem spurious.\n\nStick this perl script somewhere:\n\n```\n#!/usr/bin/perl\n\nprint \"Server: Some Server Name\\r\\n\";\nprint \"Content-Type: multipart/mixed; boundary=36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Length: 178\\r\\n\";\nprint \"\\r\\n\\r\\n\";\nprint \"--36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Type: text/plain\\r\\n\";\nprint \"\\r\\n\";\nprint \"7\\r\\n\";\nprint \"--36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Type: text/plain\\r\\n\";\nprint \"\\r\\n\";\nprint \"9\\r\\n\";\nprint \"--36eeb8c4e26d842a\\r\\n\";\nprint \"Content-Type: text/plain\\r\\n\";\nprint \"\\r\\n\";\nprint \"11\\r\\n\";\nprint \"--36eeb8c4e26d842a--\\r\\n\";\n```\n\nRead it with requests (naturally, you'll have to change the URI to wherever you put the script):\n\n```\nimport requests, logging\n\nlogging.basicConfig(level=logging.WARNING)\nlogging.getLogger(\"requests\").setLevel(logging.DEBUG)\n\nheaders = {'accept': \"multipart/mixed\"}\nr = requests.get(\"http://localhost:8124/cgi-bin/mpm.pl\", headers=headers)\n\nprint(r)\n```\n\nThe following errors are displayed:\n\n```\nDEBUG:requests.packages.urllib3.connectionpool:\"GET http://localhost:8124/cgi-bin/mpm.pl HTTP/1.1\" 200 178\nWARNING:requests.packages.urllib3.connectionpool:Failed to parse headers (url=http://localhost:8888/http://localhost:8124/cgi-bin/mpm.pl): [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: ''\nTraceback (most recent call last):\n File \"/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/connectionpool.py\", line 390, in _make_request\n assert_header_parsing(httplib_response.msg)\n File \"/home/ndw/.virtualenvs/pyapi/lib/python3.4/site-packages/requests-2.8.0-py3.4.egg/requests/packages/urllib3/util/response.py\", line 58, in assert_header_parsing\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\nrequests.packages.urllib3.exceptions.HeaderParsingError: [StartBoundaryNotFoundDefect(), MultipartInvariantViolationDefect()], unparsed data: ''\n```\n\nIt took me quite a while to work out that they were spurious (because in real life, the server side that is generating the multipart/mixed is more complicated!)\n\nSee also kennethreitz/requests#3001\n## \n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom ..packages.six.moves import http_client as httplib\n\nfrom ..exceptions import HeaderParsingError\n\n\ndef is_fp_closed(obj):\n \"\"\"\n Checks whether a given file-like object is closed.\n\n :param obj:\n The file-like object to check.\n \"\"\"\n\n try:\n # Check `isclosed()` first, in case Python3 doesn't set `closed`.\n # GH Issue #928\n return obj.isclosed()\n except AttributeError:\n pass\n\n try:\n # Check via the official file-like-object way.\n return obj.closed\n except AttributeError:\n pass\n\n try:\n # Check if the object is a container for another file-like object that\n # gets released on exhaustion (e.g. HTTPResponse).\n return obj.fp is None\n except AttributeError:\n pass\n\n raise ValueError(\"Unable to determine whether fp is closed.\")\n\n\ndef assert_header_parsing(headers):\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param headers: Headers to verify.\n :type headers: `httplib.HTTPMessage`.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError(\"expected httplib.Message, got {0}.\".format(type(headers)))\n\n defects = getattr(headers, \"defects\", None)\n get_payload = getattr(headers, \"get_payload\", None)\n\n unparsed_data = None\n if get_payload:\n # get_payload is actually email.message.Message.get_payload;\n # we're only interested in the result if it's not a multipart message\n if not headers.is_multipart():\n payload = get_payload()\n\n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n\n\ndef is_response_to_head(response):\n \"\"\"\n Checks whether the request of a response has been a HEAD-request.\n Handles the quirks of AppEngine.\n\n :param conn:\n :type conn: :class:`httplib.HTTPResponse`\n \"\"\"\n # FIXME: Can we do this somehow without accessing private httplib _method?\n method = response._method\n if isinstance(method, int): # Platform-specific: Appengine\n return method == 3\n return method.upper() == \"HEAD\"\n", "path": "src/urllib3/util/response.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom email.errors import StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect\nfrom ..packages.six.moves import http_client as httplib\n\nfrom ..exceptions import HeaderParsingError\n\n\ndef is_fp_closed(obj):\n \"\"\"\n Checks whether a given file-like object is closed.\n\n :param obj:\n The file-like object to check.\n \"\"\"\n\n try:\n # Check `isclosed()` first, in case Python3 doesn't set `closed`.\n # GH Issue #928\n return obj.isclosed()\n except AttributeError:\n pass\n\n try:\n # Check via the official file-like-object way.\n return obj.closed\n except AttributeError:\n pass\n\n try:\n # Check if the object is a container for another file-like object that\n # gets released on exhaustion (e.g. HTTPResponse).\n return obj.fp is None\n except AttributeError:\n pass\n\n raise ValueError(\"Unable to determine whether fp is closed.\")\n\n\ndef assert_header_parsing(headers):\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param headers: Headers to verify.\n :type headers: `httplib.HTTPMessage`.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError(\"expected httplib.Message, got {0}.\".format(type(headers)))\n\n defects = getattr(headers, \"defects\", None)\n get_payload = getattr(headers, \"get_payload\", None)\n\n unparsed_data = None\n if get_payload:\n # get_payload is actually email.message.Message.get_payload;\n # we're only interested in the result if it's not a multipart message\n if not headers.is_multipart():\n payload = get_payload()\n\n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n if defects:\n # httplib is assuming a response body is available\n # when parsing headers even when httplib only sends\n # header data to parse_headers() This results in\n # defects on multipart responses in particular.\n # See: https://github.com/urllib3/urllib3/issues/800\n\n # So we ignore the following defects:\n # - StartBoundaryNotFoundDefect:\n # The claimed start boundary was never found.\n # - MultipartInvariantViolationDefect:\n # A message claimed to be a multipart but no subparts were found.\n defects = [\n defect\n for defect in defects\n if not isinstance(\n defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)\n )\n ]\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)\n\n\ndef is_response_to_head(response):\n \"\"\"\n Checks whether the request of a response has been a HEAD-request.\n Handles the quirks of AppEngine.\n\n :param conn:\n :type conn: :class:`httplib.HTTPResponse`\n \"\"\"\n # FIXME: Can we do this somehow without accessing private httplib _method?\n method = response._method\n if isinstance(method, int): # Platform-specific: Appengine\n return method == 3\n return method.upper() == \"HEAD\"\n", "path": "src/urllib3/util/response.py"}]} | 1,698 | 347 |
gh_patches_debug_1056 | rasdani/github-patches | git_diff | DDMAL__CantusDB-776 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Chant Search Manuscript view - change URL path to match OldCantus
I understand we're trying to keep URLs the same between OldCantus and NewCantus, but there's a difference in the Chant Search Manuscript view. OldCantus uses `/searchms/` (e.g. https://cantus.uwaterloo.ca/searchms/123610?t=est), whereas NewCantus uses `/chant-search-ms/` (e.g. http://206.12.93.196/chant-search-ms/123610?t=est).
This doesn't strike me as a particularly vital difference - I doubt many people will have bookmarked and or cited a Search Manuscript page. But this would be a fairly simple fix, so we may as well make NewCantus work the same as OldCantus in this case.
Bigger picture question: how important is it that all URL paths match between OldCantus and New? @annamorphism, do you have a sense of this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/urls.py`
Content:
```
1 from django.urls import include, path, reverse
2 from django.contrib.auth.views import (
3 PasswordResetView,
4 PasswordResetDoneView,
5 PasswordResetConfirmView,
6 PasswordResetCompleteView,
7 )
8 from main_app.views import views
9 import debug_toolbar
10 from main_app.views.century import (
11 CenturyDetailView,
12 )
13 from main_app.views.chant import (
14 ChantByCantusIDView,
15 ChantCreateView,
16 ChantDeleteView,
17 ChantDetailView,
18 ChantEditSyllabificationView,
19 ChantIndexView,
20 ChantListView,
21 ChantProofreadView,
22 ChantSearchView,
23 ChantSearchMSView,
24 CISearchView,
25 MelodySearchView,
26 SourceEditChantsView,
27 )
28 from main_app.views.feast import (
29 FeastDetailView,
30 FeastListView,
31 )
32 from main_app.views.genre import (
33 GenreDetailView,
34 GenreListView,
35 )
36 from main_app.views.notation import (
37 NotationDetailView,
38 )
39 from main_app.views.office import (
40 OfficeListView,
41 OfficeDetailView,
42 )
43 from main_app.views.provenance import (
44 ProvenanceDetailView,
45 )
46 from main_app.views.sequence import (
47 SequenceDetailView,
48 SequenceEditView,
49 SequenceListView,
50 )
51 from main_app.views.source import (
52 SourceCreateView,
53 SourceDetailView,
54 SourceEditView,
55 SourceListView,
56 )
57 from main_app.views.user import (
58 CustomLoginView,
59 CustomLogoutView,
60 IndexerListView,
61 UserDetailView,
62 UserListView,
63 UserSourceListView,
64 )
65
66 urlpatterns = [
67 path("__debug__/", include(debug_toolbar.urls)),
68 path(
69 "contact/",
70 views.contact,
71 name="contact",
72 ),
73 # login/logout/user
74 path(
75 "login/",
76 CustomLoginView.as_view(redirect_authenticated_user=True),
77 name="login",
78 ),
79 path(
80 "logout/",
81 CustomLogoutView.as_view(),
82 name="logout",
83 ),
84 path(
85 "my-sources/",
86 UserSourceListView.as_view(),
87 name="my-sources",
88 ),
89 path(
90 "user/<int:pk>",
91 UserDetailView.as_view(),
92 name="user-detail",
93 ),
94 path(
95 "users/",
96 UserListView.as_view(),
97 name="user-list",
98 ),
99 path(
100 "change-password/",
101 views.change_password,
102 name="change-password",
103 ),
104 # password reset views
105 path(
106 # here, user can initiate a request to send a password reset email
107 "reset-password/",
108 PasswordResetView.as_view(
109 template_name="registration/reset_password.html",
110 email_template_name="registration/reset_password_email.html",
111 success_url="/reset-password-sent/",
112 ),
113 name="reset_password",
114 ),
115 path(
116 # we display this page once the password reset email has been sent
117 "reset-password-sent/",
118 PasswordResetDoneView.as_view(
119 template_name="registration/reset_password_sent.html",
120 ),
121 name="reset_password_done",
122 ),
123 path(
124 # here, the user can specify their new password
125 "reset/<uidb64>/<token>",
126 PasswordResetConfirmView.as_view(
127 template_name="registration/reset_password_confirm.html",
128 success_url="/reset-password-complete/",
129 ),
130 name="reset_password_confirm",
131 ),
132 path(
133 # we display this page once a user has completed a password reset
134 # depending on whether their attempt was successful, this page either shows
135 # a success message or a non-success message.
136 "reset-password-complete/",
137 PasswordResetCompleteView.as_view(
138 template_name="registration/reset_password_complete.html"
139 ),
140 name="reset_password_complete",
141 ),
142 # century
143 path("century/<int:pk>", CenturyDetailView.as_view(), name="century-detail"),
144 # chant
145 path(
146 "chants/",
147 ChantListView.as_view(),
148 name="chant-list",
149 ), # /chants/?source={source id}
150 path(
151 "chant/<int:pk>",
152 ChantDetailView.as_view(),
153 name="chant-detail",
154 ),
155 path(
156 "chant-search/",
157 ChantSearchView.as_view(),
158 name="chant-search",
159 ),
160 path(
161 "chant-create/<int:source_pk>",
162 ChantCreateView.as_view(),
163 name="chant-create",
164 ),
165 path(
166 "id/<str:cantus_id>",
167 ChantByCantusIDView.as_view(),
168 name="chant-by-cantus-id",
169 ),
170 path(
171 "chant-delete/<int:pk>",
172 ChantDeleteView.as_view(),
173 name="chant-delete",
174 ),
175 path(
176 "edit-chants/<int:source_id>",
177 SourceEditChantsView.as_view(),
178 name="source-edit-chants",
179 ),
180 path(
181 "proofread-chant/<int:source_id>",
182 ChantProofreadView.as_view(),
183 name="chant-proofread",
184 ),
185 path(
186 "edit-syllabification/<int:chant_id>",
187 ChantEditSyllabificationView.as_view(),
188 name="source-edit-syllabification",
189 ),
190 path(
191 "index/",
192 ChantIndexView.as_view(),
193 name="chant-index",
194 ), # /index/?source={source id}
195 # feast
196 path(
197 "feasts/",
198 FeastListView.as_view(),
199 name="feast-list",
200 ),
201 path(
202 "feast/<int:pk>",
203 FeastDetailView.as_view(),
204 name="feast-detail",
205 ),
206 # genre
207 path(
208 "genres/",
209 GenreListView.as_view(),
210 name="genre-list",
211 ),
212 path(
213 "genre/<int:pk>",
214 GenreDetailView.as_view(),
215 name="genre-detail",
216 ),
217 # indexer
218 path(
219 "indexers/",
220 IndexerListView.as_view(),
221 name="indexer-list",
222 ),
223 # notation
224 path(
225 "notation/<int:pk>",
226 NotationDetailView.as_view(),
227 name="notation-detail",
228 ),
229 # office
230 path(
231 "offices/",
232 OfficeListView.as_view(),
233 name="office-list",
234 ),
235 path(
236 "office/<int:pk>",
237 OfficeDetailView.as_view(),
238 name="office-detail",
239 ),
240 # provenance
241 path(
242 "provenance/<int:pk>",
243 ProvenanceDetailView.as_view(),
244 name="provenance-detail",
245 ),
246 # sequence
247 path(
248 "sequences/",
249 SequenceListView.as_view(),
250 name="sequence-list",
251 ),
252 path(
253 "sequence/<int:pk>",
254 SequenceDetailView.as_view(),
255 name="sequence-detail",
256 ),
257 path(
258 "edit-sequence/<int:sequence_id>",
259 SequenceEditView.as_view(),
260 name="sequence-edit",
261 ),
262 # source
263 path(
264 "sources/",
265 SourceListView.as_view(),
266 name="source-list",
267 ),
268 path(
269 "source/<int:pk>",
270 SourceDetailView.as_view(),
271 name="source-detail",
272 ),
273 path(
274 "source-create/",
275 SourceCreateView.as_view(),
276 name="source-create",
277 ),
278 path(
279 "edit-source/<int:source_id>",
280 SourceEditView.as_view(),
281 name="source-edit",
282 ),
283 # melody
284 path(
285 "melody/",
286 MelodySearchView.as_view(),
287 name="melody-search",
288 ),
289 path(
290 "ajax/melody/<str:cantus_id>",
291 views.ajax_melody_list,
292 name="ajax-melody",
293 ),
294 path(
295 "ajax/melody-search/",
296 views.ajax_melody_search,
297 name="ajax-melody-search",
298 ),
299 # json api
300 path(
301 "json-sources/",
302 views.json_sources_export,
303 name="json-sources-export",
304 ),
305 path(
306 "json-node/<str:id>",
307 views.json_node_export,
308 name="json-node-export",
309 ),
310 path(
311 "json-nextchants/<str:cantus_id>",
312 views.json_nextchants,
313 name="json-nextchants",
314 ),
315 path(
316 "json-melody/<str:cantus_id>",
317 views.json_melody_export,
318 name="json-melody-export",
319 ),
320 # misc search
321 path(
322 "chant-search-ms/<int:source_pk>",
323 ChantSearchMSView.as_view(),
324 name="chant-search-ms",
325 ),
326 path(
327 "ci-search/<str:search_term>",
328 CISearchView.as_view(),
329 name="ci-search",
330 ),
331 path(
332 "ajax/search-bar/<str:search_term>",
333 views.ajax_search_bar,
334 name="ajax-search-bar",
335 ),
336 # misc
337 path(
338 "content-statistics",
339 views.items_count,
340 name="items-count",
341 ),
342 path(
343 "source/<str:source_id>/csv/",
344 views.csv_export,
345 name="csv-export",
346 ),
347 path(
348 "sites/default/files/csv/<str:source_id>.csv",
349 views.csv_export_redirect_from_old_path,
350 name="csv-export-old-path",
351 ),
352 path(
353 "ajax/concordance/<str:cantus_id>",
354 views.ajax_concordance_list,
355 name="ajax-concordance",
356 ),
357 # content overview (for project managers)
358 path(
359 "content-overview/",
360 views.content_overview,
361 name="content-overview",
362 ),
363 # /node/ url redirects
364 path(
365 "node/<int:pk>",
366 views.redirect_node_url,
367 name="redirect-node-url",
368 ),
369 # /indexer/ url redirects
370 path(
371 "indexer/<int:pk>",
372 views.redirect_indexer,
373 name="redirect-indexer",
374 ),
375 ]
376
377 handler404 = "main_app.views.views.handle404"
378
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/main_app/urls.py b/django/cantusdb_project/main_app/urls.py
--- a/django/cantusdb_project/main_app/urls.py
+++ b/django/cantusdb_project/main_app/urls.py
@@ -319,7 +319,7 @@
),
# misc search
path(
- "chant-search-ms/<int:source_pk>",
+ "searchms/<int:source_pk>",
ChantSearchMSView.as_view(),
name="chant-search-ms",
),
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/urls.py b/django/cantusdb_project/main_app/urls.py\n--- a/django/cantusdb_project/main_app/urls.py\n+++ b/django/cantusdb_project/main_app/urls.py\n@@ -319,7 +319,7 @@\n ),\n # misc search\n path(\n- \"chant-search-ms/<int:source_pk>\",\n+ \"searchms/<int:source_pk>\",\n ChantSearchMSView.as_view(),\n name=\"chant-search-ms\",\n ),\n", "issue": "Chant Search Manuscript view - change URL path to match OldCantus\nI understand we're trying to keep URLs the same between OldCantus and NewCantus, but there's a difference in the Chant Search Manuscript view. OldCantus uses `/searchms/` (e.g. https://cantus.uwaterloo.ca/searchms/123610?t=est), whereas NewCantus uses `/chant-search-ms/` (e.g. http://206.12.93.196/chant-search-ms/123610?t=est).\r\n\r\nThis doesn't strike me as a particularly vital difference - I doubt many people will have bookmarked and or cited a Search Manuscript page. But this would be a fairly simple fix, so we may as well make NewCantus work the same as OldCantus in this case.\r\n\r\nBigger picture question: how important is it that all URL paths match between OldCantus and New? @annamorphism, do you have a sense of this?\n", "before_files": [{"content": "from django.urls import include, path, reverse\nfrom django.contrib.auth.views import (\n PasswordResetView,\n PasswordResetDoneView,\n PasswordResetConfirmView,\n PasswordResetCompleteView,\n)\nfrom main_app.views import views\nimport debug_toolbar\nfrom main_app.views.century import (\n CenturyDetailView,\n)\nfrom main_app.views.chant import (\n ChantByCantusIDView,\n ChantCreateView,\n ChantDeleteView,\n ChantDetailView,\n ChantEditSyllabificationView,\n ChantIndexView,\n ChantListView,\n ChantProofreadView,\n ChantSearchView,\n ChantSearchMSView,\n CISearchView,\n MelodySearchView,\n SourceEditChantsView,\n)\nfrom main_app.views.feast import (\n FeastDetailView,\n FeastListView,\n)\nfrom main_app.views.genre import (\n GenreDetailView,\n GenreListView,\n)\nfrom main_app.views.notation import (\n NotationDetailView,\n)\nfrom main_app.views.office import (\n OfficeListView,\n OfficeDetailView,\n)\nfrom main_app.views.provenance import (\n ProvenanceDetailView,\n)\nfrom main_app.views.sequence import (\n SequenceDetailView,\n SequenceEditView,\n SequenceListView,\n)\nfrom main_app.views.source import (\n SourceCreateView,\n SourceDetailView,\n SourceEditView,\n SourceListView,\n)\nfrom main_app.views.user import (\n CustomLoginView,\n CustomLogoutView,\n IndexerListView,\n UserDetailView,\n UserListView,\n UserSourceListView,\n)\n\nurlpatterns = [\n path(\"__debug__/\", include(debug_toolbar.urls)),\n path(\n \"contact/\",\n views.contact,\n name=\"contact\",\n ),\n # login/logout/user\n path(\n \"login/\",\n CustomLoginView.as_view(redirect_authenticated_user=True),\n name=\"login\",\n ),\n path(\n \"logout/\",\n CustomLogoutView.as_view(),\n name=\"logout\",\n ),\n path(\n \"my-sources/\",\n UserSourceListView.as_view(),\n name=\"my-sources\",\n ),\n path(\n \"user/<int:pk>\",\n UserDetailView.as_view(),\n name=\"user-detail\",\n ),\n path(\n \"users/\",\n UserListView.as_view(),\n name=\"user-list\",\n ),\n path(\n \"change-password/\",\n views.change_password,\n name=\"change-password\",\n ),\n # password reset views\n path(\n # here, user can initiate a request to send a password reset email\n \"reset-password/\",\n PasswordResetView.as_view(\n template_name=\"registration/reset_password.html\",\n email_template_name=\"registration/reset_password_email.html\",\n success_url=\"/reset-password-sent/\",\n ),\n name=\"reset_password\",\n ),\n path(\n # we display this page once the password reset email has been sent\n \"reset-password-sent/\",\n PasswordResetDoneView.as_view(\n template_name=\"registration/reset_password_sent.html\",\n ),\n name=\"reset_password_done\",\n ),\n path(\n # here, the user can specify their new password\n \"reset/<uidb64>/<token>\",\n PasswordResetConfirmView.as_view(\n template_name=\"registration/reset_password_confirm.html\",\n success_url=\"/reset-password-complete/\",\n ),\n name=\"reset_password_confirm\",\n ),\n path(\n # we display this page once a user has completed a password reset\n # depending on whether their attempt was successful, this page either shows\n # a success message or a non-success message.\n \"reset-password-complete/\",\n PasswordResetCompleteView.as_view(\n template_name=\"registration/reset_password_complete.html\"\n ),\n name=\"reset_password_complete\",\n ),\n # century\n path(\"century/<int:pk>\", CenturyDetailView.as_view(), name=\"century-detail\"),\n # chant\n path(\n \"chants/\",\n ChantListView.as_view(),\n name=\"chant-list\",\n ), # /chants/?source={source id}\n path(\n \"chant/<int:pk>\",\n ChantDetailView.as_view(),\n name=\"chant-detail\",\n ),\n path(\n \"chant-search/\",\n ChantSearchView.as_view(),\n name=\"chant-search\",\n ),\n path(\n \"chant-create/<int:source_pk>\",\n ChantCreateView.as_view(),\n name=\"chant-create\",\n ),\n path(\n \"id/<str:cantus_id>\",\n ChantByCantusIDView.as_view(),\n name=\"chant-by-cantus-id\",\n ),\n path(\n \"chant-delete/<int:pk>\",\n ChantDeleteView.as_view(),\n name=\"chant-delete\",\n ),\n path(\n \"edit-chants/<int:source_id>\",\n SourceEditChantsView.as_view(),\n name=\"source-edit-chants\",\n ),\n path(\n \"proofread-chant/<int:source_id>\",\n ChantProofreadView.as_view(),\n name=\"chant-proofread\",\n ),\n path(\n \"edit-syllabification/<int:chant_id>\",\n ChantEditSyllabificationView.as_view(),\n name=\"source-edit-syllabification\",\n ),\n path(\n \"index/\",\n ChantIndexView.as_view(),\n name=\"chant-index\",\n ), # /index/?source={source id}\n # feast\n path(\n \"feasts/\",\n FeastListView.as_view(),\n name=\"feast-list\",\n ),\n path(\n \"feast/<int:pk>\",\n FeastDetailView.as_view(),\n name=\"feast-detail\",\n ),\n # genre\n path(\n \"genres/\",\n GenreListView.as_view(),\n name=\"genre-list\",\n ),\n path(\n \"genre/<int:pk>\",\n GenreDetailView.as_view(),\n name=\"genre-detail\",\n ),\n # indexer\n path(\n \"indexers/\",\n IndexerListView.as_view(),\n name=\"indexer-list\",\n ),\n # notation\n path(\n \"notation/<int:pk>\",\n NotationDetailView.as_view(),\n name=\"notation-detail\",\n ),\n # office\n path(\n \"offices/\",\n OfficeListView.as_view(),\n name=\"office-list\",\n ),\n path(\n \"office/<int:pk>\",\n OfficeDetailView.as_view(),\n name=\"office-detail\",\n ),\n # provenance\n path(\n \"provenance/<int:pk>\",\n ProvenanceDetailView.as_view(),\n name=\"provenance-detail\",\n ),\n # sequence\n path(\n \"sequences/\",\n SequenceListView.as_view(),\n name=\"sequence-list\",\n ),\n path(\n \"sequence/<int:pk>\",\n SequenceDetailView.as_view(),\n name=\"sequence-detail\",\n ),\n path(\n \"edit-sequence/<int:sequence_id>\",\n SequenceEditView.as_view(),\n name=\"sequence-edit\",\n ),\n # source\n path(\n \"sources/\",\n SourceListView.as_view(),\n name=\"source-list\",\n ),\n path(\n \"source/<int:pk>\",\n SourceDetailView.as_view(),\n name=\"source-detail\",\n ),\n path(\n \"source-create/\",\n SourceCreateView.as_view(),\n name=\"source-create\",\n ),\n path(\n \"edit-source/<int:source_id>\",\n SourceEditView.as_view(),\n name=\"source-edit\",\n ),\n # melody\n path(\n \"melody/\",\n MelodySearchView.as_view(),\n name=\"melody-search\",\n ),\n path(\n \"ajax/melody/<str:cantus_id>\",\n views.ajax_melody_list,\n name=\"ajax-melody\",\n ),\n path(\n \"ajax/melody-search/\",\n views.ajax_melody_search,\n name=\"ajax-melody-search\",\n ),\n # json api\n path(\n \"json-sources/\",\n views.json_sources_export,\n name=\"json-sources-export\",\n ),\n path(\n \"json-node/<str:id>\",\n views.json_node_export,\n name=\"json-node-export\",\n ),\n path(\n \"json-nextchants/<str:cantus_id>\",\n views.json_nextchants,\n name=\"json-nextchants\",\n ),\n path(\n \"json-melody/<str:cantus_id>\",\n views.json_melody_export,\n name=\"json-melody-export\",\n ),\n # misc search\n path(\n \"chant-search-ms/<int:source_pk>\",\n ChantSearchMSView.as_view(),\n name=\"chant-search-ms\",\n ),\n path(\n \"ci-search/<str:search_term>\",\n CISearchView.as_view(),\n name=\"ci-search\",\n ),\n path(\n \"ajax/search-bar/<str:search_term>\",\n views.ajax_search_bar,\n name=\"ajax-search-bar\",\n ),\n # misc\n path(\n \"content-statistics\",\n views.items_count,\n name=\"items-count\",\n ),\n path(\n \"source/<str:source_id>/csv/\",\n views.csv_export,\n name=\"csv-export\",\n ),\n path(\n \"sites/default/files/csv/<str:source_id>.csv\",\n views.csv_export_redirect_from_old_path,\n name=\"csv-export-old-path\",\n ),\n path(\n \"ajax/concordance/<str:cantus_id>\",\n views.ajax_concordance_list,\n name=\"ajax-concordance\",\n ),\n # content overview (for project managers)\n path(\n \"content-overview/\",\n views.content_overview,\n name=\"content-overview\",\n ),\n # /node/ url redirects\n path(\n \"node/<int:pk>\",\n views.redirect_node_url,\n name=\"redirect-node-url\",\n ),\n # /indexer/ url redirects\n path(\n \"indexer/<int:pk>\",\n views.redirect_indexer,\n name=\"redirect-indexer\",\n ),\n]\n\nhandler404 = \"main_app.views.views.handle404\"\n", "path": "django/cantusdb_project/main_app/urls.py"}], "after_files": [{"content": "from django.urls import include, path, reverse\nfrom django.contrib.auth.views import (\n PasswordResetView,\n PasswordResetDoneView,\n PasswordResetConfirmView,\n PasswordResetCompleteView,\n)\nfrom main_app.views import views\nimport debug_toolbar\nfrom main_app.views.century import (\n CenturyDetailView,\n)\nfrom main_app.views.chant import (\n ChantByCantusIDView,\n ChantCreateView,\n ChantDeleteView,\n ChantDetailView,\n ChantEditSyllabificationView,\n ChantIndexView,\n ChantListView,\n ChantProofreadView,\n ChantSearchView,\n ChantSearchMSView,\n CISearchView,\n MelodySearchView,\n SourceEditChantsView,\n)\nfrom main_app.views.feast import (\n FeastDetailView,\n FeastListView,\n)\nfrom main_app.views.genre import (\n GenreDetailView,\n GenreListView,\n)\nfrom main_app.views.notation import (\n NotationDetailView,\n)\nfrom main_app.views.office import (\n OfficeListView,\n OfficeDetailView,\n)\nfrom main_app.views.provenance import (\n ProvenanceDetailView,\n)\nfrom main_app.views.sequence import (\n SequenceDetailView,\n SequenceEditView,\n SequenceListView,\n)\nfrom main_app.views.source import (\n SourceCreateView,\n SourceDetailView,\n SourceEditView,\n SourceListView,\n)\nfrom main_app.views.user import (\n CustomLoginView,\n CustomLogoutView,\n IndexerListView,\n UserDetailView,\n UserListView,\n UserSourceListView,\n)\n\nurlpatterns = [\n path(\"__debug__/\", include(debug_toolbar.urls)),\n path(\n \"contact/\",\n views.contact,\n name=\"contact\",\n ),\n # login/logout/user\n path(\n \"login/\",\n CustomLoginView.as_view(redirect_authenticated_user=True),\n name=\"login\",\n ),\n path(\n \"logout/\",\n CustomLogoutView.as_view(),\n name=\"logout\",\n ),\n path(\n \"my-sources/\",\n UserSourceListView.as_view(),\n name=\"my-sources\",\n ),\n path(\n \"user/<int:pk>\",\n UserDetailView.as_view(),\n name=\"user-detail\",\n ),\n path(\n \"users/\",\n UserListView.as_view(),\n name=\"user-list\",\n ),\n path(\n \"change-password/\",\n views.change_password,\n name=\"change-password\",\n ),\n # password reset views\n path(\n # here, user can initiate a request to send a password reset email\n \"reset-password/\",\n PasswordResetView.as_view(\n template_name=\"registration/reset_password.html\",\n email_template_name=\"registration/reset_password_email.html\",\n success_url=\"/reset-password-sent/\",\n ),\n name=\"reset_password\",\n ),\n path(\n # we display this page once the password reset email has been sent\n \"reset-password-sent/\",\n PasswordResetDoneView.as_view(\n template_name=\"registration/reset_password_sent.html\",\n ),\n name=\"reset_password_done\",\n ),\n path(\n # here, the user can specify their new password\n \"reset/<uidb64>/<token>\",\n PasswordResetConfirmView.as_view(\n template_name=\"registration/reset_password_confirm.html\",\n success_url=\"/reset-password-complete/\",\n ),\n name=\"reset_password_confirm\",\n ),\n path(\n # we display this page once a user has completed a password reset\n # depending on whether their attempt was successful, this page either shows\n # a success message or a non-success message.\n \"reset-password-complete/\",\n PasswordResetCompleteView.as_view(\n template_name=\"registration/reset_password_complete.html\"\n ),\n name=\"reset_password_complete\",\n ),\n # century\n path(\"century/<int:pk>\", CenturyDetailView.as_view(), name=\"century-detail\"),\n # chant\n path(\n \"chants/\",\n ChantListView.as_view(),\n name=\"chant-list\",\n ), # /chants/?source={source id}\n path(\n \"chant/<int:pk>\",\n ChantDetailView.as_view(),\n name=\"chant-detail\",\n ),\n path(\n \"chant-search/\",\n ChantSearchView.as_view(),\n name=\"chant-search\",\n ),\n path(\n \"chant-create/<int:source_pk>\",\n ChantCreateView.as_view(),\n name=\"chant-create\",\n ),\n path(\n \"id/<str:cantus_id>\",\n ChantByCantusIDView.as_view(),\n name=\"chant-by-cantus-id\",\n ),\n path(\n \"chant-delete/<int:pk>\",\n ChantDeleteView.as_view(),\n name=\"chant-delete\",\n ),\n path(\n \"edit-chants/<int:source_id>\",\n SourceEditChantsView.as_view(),\n name=\"source-edit-chants\",\n ),\n path(\n \"proofread-chant/<int:source_id>\",\n ChantProofreadView.as_view(),\n name=\"chant-proofread\",\n ),\n path(\n \"edit-syllabification/<int:chant_id>\",\n ChantEditSyllabificationView.as_view(),\n name=\"source-edit-syllabification\",\n ),\n path(\n \"index/\",\n ChantIndexView.as_view(),\n name=\"chant-index\",\n ), # /index/?source={source id}\n # feast\n path(\n \"feasts/\",\n FeastListView.as_view(),\n name=\"feast-list\",\n ),\n path(\n \"feast/<int:pk>\",\n FeastDetailView.as_view(),\n name=\"feast-detail\",\n ),\n # genre\n path(\n \"genres/\",\n GenreListView.as_view(),\n name=\"genre-list\",\n ),\n path(\n \"genre/<int:pk>\",\n GenreDetailView.as_view(),\n name=\"genre-detail\",\n ),\n # indexer\n path(\n \"indexers/\",\n IndexerListView.as_view(),\n name=\"indexer-list\",\n ),\n # notation\n path(\n \"notation/<int:pk>\",\n NotationDetailView.as_view(),\n name=\"notation-detail\",\n ),\n # office\n path(\n \"offices/\",\n OfficeListView.as_view(),\n name=\"office-list\",\n ),\n path(\n \"office/<int:pk>\",\n OfficeDetailView.as_view(),\n name=\"office-detail\",\n ),\n # provenance\n path(\n \"provenance/<int:pk>\",\n ProvenanceDetailView.as_view(),\n name=\"provenance-detail\",\n ),\n # sequence\n path(\n \"sequences/\",\n SequenceListView.as_view(),\n name=\"sequence-list\",\n ),\n path(\n \"sequence/<int:pk>\",\n SequenceDetailView.as_view(),\n name=\"sequence-detail\",\n ),\n path(\n \"edit-sequence/<int:sequence_id>\",\n SequenceEditView.as_view(),\n name=\"sequence-edit\",\n ),\n # source\n path(\n \"sources/\",\n SourceListView.as_view(),\n name=\"source-list\",\n ),\n path(\n \"source/<int:pk>\",\n SourceDetailView.as_view(),\n name=\"source-detail\",\n ),\n path(\n \"source-create/\",\n SourceCreateView.as_view(),\n name=\"source-create\",\n ),\n path(\n \"edit-source/<int:source_id>\",\n SourceEditView.as_view(),\n name=\"source-edit\",\n ),\n # melody\n path(\n \"melody/\",\n MelodySearchView.as_view(),\n name=\"melody-search\",\n ),\n path(\n \"ajax/melody/<str:cantus_id>\",\n views.ajax_melody_list,\n name=\"ajax-melody\",\n ),\n path(\n \"ajax/melody-search/\",\n views.ajax_melody_search,\n name=\"ajax-melody-search\",\n ),\n # json api\n path(\n \"json-sources/\",\n views.json_sources_export,\n name=\"json-sources-export\",\n ),\n path(\n \"json-node/<str:id>\",\n views.json_node_export,\n name=\"json-node-export\",\n ),\n path(\n \"json-nextchants/<str:cantus_id>\",\n views.json_nextchants,\n name=\"json-nextchants\",\n ),\n path(\n \"json-melody/<str:cantus_id>\",\n views.json_melody_export,\n name=\"json-melody-export\",\n ),\n # misc search\n path(\n \"searchms/<int:source_pk>\",\n ChantSearchMSView.as_view(),\n name=\"chant-search-ms\",\n ),\n path(\n \"ci-search/<str:search_term>\",\n CISearchView.as_view(),\n name=\"ci-search\",\n ),\n path(\n \"ajax/search-bar/<str:search_term>\",\n views.ajax_search_bar,\n name=\"ajax-search-bar\",\n ),\n # misc\n path(\n \"content-statistics\",\n views.items_count,\n name=\"items-count\",\n ),\n path(\n \"source/<str:source_id>/csv/\",\n views.csv_export,\n name=\"csv-export\",\n ),\n path(\n \"sites/default/files/csv/<str:source_id>.csv\",\n views.csv_export_redirect_from_old_path,\n name=\"csv-export-old-path\",\n ),\n path(\n \"ajax/concordance/<str:cantus_id>\",\n views.ajax_concordance_list,\n name=\"ajax-concordance\",\n ),\n # content overview (for project managers)\n path(\n \"content-overview/\",\n views.content_overview,\n name=\"content-overview\",\n ),\n # /node/ url redirects\n path(\n \"node/<int:pk>\",\n views.redirect_node_url,\n name=\"redirect-node-url\",\n ),\n # /indexer/ url redirects\n path(\n \"indexer/<int:pk>\",\n views.redirect_indexer,\n name=\"redirect-indexer\",\n ),\n]\n\nhandler404 = \"main_app.views.views.handle404\"\n", "path": "django/cantusdb_project/main_app/urls.py"}]} | 3,602 | 125 |
gh_patches_debug_22952 | rasdani/github-patches | git_diff | pwndbg__pwndbg-648 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
xuntil doesn't work with PIE binaries before being run
`xuntil` was added in #604 and works except that it doesn't work with PIE binaries before being run.
This is because it expects an int argument. I'll look into using sloppy parsing.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/commands/peda.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7
8 import argparse
9 import sys
10
11 import gdb
12
13 import pwndbg.auxv
14 import pwndbg.color.message as message
15 import pwndbg.commands
16 import pwndbg.commands.context
17 import pwndbg.commands.telescope
18 import pwndbg.proc
19
20
21 @pwndbg.commands.ArgparsedCommand("Gets the current file.")
22 @pwndbg.commands.OnlyWhenRunning
23 def getfile():
24 print(repr(pwndbg.auxv.get().AT_EXECFN))
25
26 @pwndbg.commands.ArgparsedCommand("Get the pid.")
27 @pwndbg.commands.OnlyWhenRunning
28 def getpid():
29 print(pwndbg.proc.pid)
30
31
32 parser = argparse.ArgumentParser(description='Continue execution until an address or function.')
33 parser.add_argument('target', help='Address or function to stop execution at')
34
35
36 @pwndbg.commands.ArgparsedCommand(parser)
37 def xuntil(target):
38 addr = int(target)
39
40 if not pwndbg.memory.peek(addr):
41 print(message.error('Invalid address %#x' % addr))
42 return
43
44 spec = "*%#x" % (addr)
45 b = gdb.Breakpoint(spec, temporary=True)
46 if pwndbg.proc.alive:
47 gdb.execute("continue", from_tty=False)
48 else:
49 gdb.execute("run", from_tty=False)
50
51 xinfo = pwndbg.commands.context.context
52 xprint = pwndbg.commands.telescope.telescope
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/commands/peda.py b/pwndbg/commands/peda.py
--- a/pwndbg/commands/peda.py
+++ b/pwndbg/commands/peda.py
@@ -30,18 +30,27 @@
parser = argparse.ArgumentParser(description='Continue execution until an address or function.')
-parser.add_argument('target', help='Address or function to stop execution at')
-
+parser.add_argument('target', type=str, help='Address or function to stop execution at')
@pwndbg.commands.ArgparsedCommand(parser)
def xuntil(target):
- addr = int(target)
-
- if not pwndbg.memory.peek(addr):
- print(message.error('Invalid address %#x' % addr))
- return
+ try:
+ addr = int(target,0)
+
+ if not pwndbg.memory.peek(addr):
+ print(message.error('Invalid address %#x' % addr))
+ return
+
+ spec = "*%#x" % (addr)
+ except (TypeError, ValueError):
+ #The following gdb command will throw an error if the symbol is not defined.
+ try:
+ result = gdb.execute('info address %s' % target, to_string=True, from_tty=False)
+ except gdb.error:
+ print(message.error("Unable to resolve %s" % target))
+ return
+ spec = target
- spec = "*%#x" % (addr)
b = gdb.Breakpoint(spec, temporary=True)
if pwndbg.proc.alive:
gdb.execute("continue", from_tty=False)
| {"golden_diff": "diff --git a/pwndbg/commands/peda.py b/pwndbg/commands/peda.py\n--- a/pwndbg/commands/peda.py\n+++ b/pwndbg/commands/peda.py\n@@ -30,18 +30,27 @@\n \n \n parser = argparse.ArgumentParser(description='Continue execution until an address or function.')\n-parser.add_argument('target', help='Address or function to stop execution at')\n-\n+parser.add_argument('target', type=str, help='Address or function to stop execution at')\n \n @pwndbg.commands.ArgparsedCommand(parser)\n def xuntil(target):\n- addr = int(target)\n-\n- if not pwndbg.memory.peek(addr):\n- print(message.error('Invalid address %#x' % addr))\n- return\n+ try:\n+ addr = int(target,0)\n+ \n+ if not pwndbg.memory.peek(addr):\n+ print(message.error('Invalid address %#x' % addr))\n+ return\n+\n+ spec = \"*%#x\" % (addr)\n+ except (TypeError, ValueError):\n+ #The following gdb command will throw an error if the symbol is not defined.\n+ try:\n+ result = gdb.execute('info address %s' % target, to_string=True, from_tty=False)\n+ except gdb.error:\n+ print(message.error(\"Unable to resolve %s\" % target))\n+ return \n+ spec = target\n \n- spec = \"*%#x\" % (addr)\n b = gdb.Breakpoint(spec, temporary=True)\n if pwndbg.proc.alive:\n gdb.execute(\"continue\", from_tty=False)\n", "issue": "xuntil doesn't work with PIE binaries before being run\n`xuntil` was added in #604 and works except that it doesn't work with PIE binaries before being run.\r\nThis is because it expects an int argument. I'll look into using sloppy parsing.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport sys\n\nimport gdb\n\nimport pwndbg.auxv\nimport pwndbg.color.message as message\nimport pwndbg.commands\nimport pwndbg.commands.context\nimport pwndbg.commands.telescope\nimport pwndbg.proc\n\n\[email protected](\"Gets the current file.\")\[email protected]\ndef getfile():\n print(repr(pwndbg.auxv.get().AT_EXECFN))\n\[email protected](\"Get the pid.\")\[email protected]\ndef getpid():\n print(pwndbg.proc.pid)\n\n\nparser = argparse.ArgumentParser(description='Continue execution until an address or function.')\nparser.add_argument('target', help='Address or function to stop execution at')\n\n\[email protected](parser)\ndef xuntil(target):\n addr = int(target)\n\n if not pwndbg.memory.peek(addr):\n print(message.error('Invalid address %#x' % addr))\n return\n\n spec = \"*%#x\" % (addr)\n b = gdb.Breakpoint(spec, temporary=True)\n if pwndbg.proc.alive:\n gdb.execute(\"continue\", from_tty=False)\n else:\n gdb.execute(\"run\", from_tty=False)\n\nxinfo = pwndbg.commands.context.context\nxprint = pwndbg.commands.telescope.telescope\n", "path": "pwndbg/commands/peda.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport sys\n\nimport gdb\n\nimport pwndbg.auxv\nimport pwndbg.color.message as message\nimport pwndbg.commands\nimport pwndbg.commands.context\nimport pwndbg.commands.telescope\nimport pwndbg.proc\n\n\[email protected](\"Gets the current file.\")\[email protected]\ndef getfile():\n print(repr(pwndbg.auxv.get().AT_EXECFN))\n\[email protected](\"Get the pid.\")\[email protected]\ndef getpid():\n print(pwndbg.proc.pid)\n\n\nparser = argparse.ArgumentParser(description='Continue execution until an address or function.')\nparser.add_argument('target', type=str, help='Address or function to stop execution at')\n\[email protected](parser)\ndef xuntil(target):\n try:\n addr = int(target,0)\n \n if not pwndbg.memory.peek(addr):\n print(message.error('Invalid address %#x' % addr))\n return\n\n spec = \"*%#x\" % (addr)\n except (TypeError, ValueError):\n #The following gdb command will throw an error if the symbol is not defined.\n try:\n result = gdb.execute('info address %s' % target, to_string=True, from_tty=False)\n except gdb.error:\n print(message.error(\"Unable to resolve %s\" % target))\n return \n spec = target\n\n b = gdb.Breakpoint(spec, temporary=True)\n if pwndbg.proc.alive:\n gdb.execute(\"continue\", from_tty=False)\n else:\n gdb.execute(\"run\", from_tty=False)\n\nxinfo = pwndbg.commands.context.context\nxprint = pwndbg.commands.telescope.telescope\n", "path": "pwndbg/commands/peda.py"}]} | 761 | 351 |
gh_patches_debug_859 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-169 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change development environment from docker-compose to venv
- [ ] Remove the django docker container
- [ ] Install package and requirements in venv
- [ ] Keep database docker container and manage connection to django
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/backend/settings.py`
Content:
```
1 """
2 Django settings for backend project.
3
4 Generated by 'django-admin startproject' using Django 1.11.11.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.11/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.11/ref/settings/
11 """
12
13 import os
14
15 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
16 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17
18
19 # Quick-start development settings - unsuitable for production
20 # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
21
22 # SECURITY WARNING: keep the secret key used in production secret!
23 SECRET_KEY = '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_'
24
25 # SECURITY WARNING: don't run with debug turned on in production!
26 DEBUG = True
27
28 ALLOWED_HOSTS = [
29 'localhost',
30 '127.0.0.1',
31 '0.0.0.0'
32 ]
33
34
35 # Application definition
36
37 INSTALLED_APPS = [
38 'cms.apps.CmsConfig',
39 'django.contrib.admin',
40 'django.contrib.auth',
41 'django.contrib.contenttypes',
42 'django.contrib.messages',
43 'django.contrib.sessions',
44 'django.contrib.staticfiles',
45 'widget_tweaks',
46 'easy_thumbnails',
47 'filer',
48 'drf_yasg',
49 'mptt',
50 'rest_framework',
51 'rules.apps.AutodiscoverRulesConfig',
52 ]
53
54 MIDDLEWARE = [
55 'django.middleware.security.SecurityMiddleware',
56 'django.contrib.sessions.middleware.SessionMiddleware',
57 'django.middleware.locale.LocaleMiddleware',
58 'django.middleware.common.CommonMiddleware',
59 'django.middleware.csrf.CsrfViewMiddleware',
60 'django.contrib.auth.middleware.AuthenticationMiddleware',
61 'django.contrib.messages.middleware.MessageMiddleware',
62 'django.middleware.clickjacking.XFrameOptionsMiddleware',
63 ]
64
65 ROOT_URLCONF = 'backend.urls'
66 THUMBNAIL_HIGH_RESOLUTION = True
67
68 TEMPLATES = [
69 {
70 'BACKEND': 'django.template.backends.django.DjangoTemplates',
71 'DIRS': [],
72 'APP_DIRS': True,
73 'OPTIONS': {
74 'context_processors': [
75 'django.template.context_processors.debug',
76 'django.template.context_processors.request',
77 'django.contrib.auth.context_processors.auth',
78 'django.contrib.messages.context_processors.messages',
79 'backend.context_processors.site_slug_processor',
80 ],
81 },
82 },
83 ]
84
85 WSGI_APPLICATION = 'backend.wsgi.application'
86
87
88 # Database
89 # https://docs.djangoproject.com/en/1.11/ref/settings/#databases
90
91 DATABASES = {
92 'default': {
93 'ENGINE': 'django.db.backends.postgresql_psycopg2',
94 'NAME': 'integreat',
95 'USER': 'integreat',
96 'PASSWORD': 'password',
97 'HOST': 'postgres',
98 'PORT': '5432',
99 }
100 }
101
102 # Directory for initial database contents
103
104 FIXTURE_DIRS = (
105 os.path.join(BASE_DIR, 'cms/fixtures/'),
106 )
107
108 # Authentication backends
109
110 AUTHENTICATION_BACKENDS = (
111 'rules.permissions.ObjectPermissionBackend',
112 'django.contrib.auth.backends.ModelBackend', # this is default
113 )
114
115
116 # Password validation
117 # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
118
119 AUTH_PASSWORD_VALIDATORS = [
120 {
121 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
122 },
123 {
124 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
125 },
126 {
127 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
128 },
129 {
130 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
131 },
132 ]
133
134
135 # Internationalization
136 # https://docs.djangoproject.com/en/1.11/topics/i18n/
137
138 LANGUAGES = (
139 ('en-us', 'English'),
140 ('de-de', 'Deutsch'),
141 )
142
143 LOCALE_PATHS = (
144 os.path.join(BASE_DIR, 'locale'),
145 )
146
147 LANGUAGE_CODE = 'de-de'
148
149 TIME_ZONE = 'UTC'
150
151 USE_I18N = True
152
153 USE_L10N = True
154
155 USE_TZ = True
156
157
158 # Static files (CSS, JavaScript, Images)
159 # https://docs.djangoproject.com/en/1.11/howto/static-files/
160
161 STATIC_URL = '/static/'
162
163
164 # Login
165 LOGIN_URL = '/login'
166 LOGIN_REDIRECT_URL = '/'
167 LOGOUT_REDIRECT_URL = '/login'
168
169 # API FRAMEWORK
170 REST_FRAMEWORK = {
171 # Use Django's standard `django.contrib.auth` permissions,
172 # or allow read-only access for unauthenticated users.
173 'DEFAULT_PERMISSION_CLASSES': [
174 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
175 ]
176 }
177
178 # Miscellaneous
179 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
180 CSRF_FAILURE_VIEW = 'cms.views.general.csrf_failure'
181
182 MEDIA_URL = '/media/'
183 MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
184 FILER_CANONICAL_URL = 'media/'
185
186 LOGGING = {
187 'version': 1,
188 'disable_existing_loggers': False,
189 'handlers': {
190 'console': {
191 'class': 'logging.StreamHandler'
192 },
193 },
194 'loggers': {
195 'django': {
196 'handlers': ['console'],
197 'level': 'WARN',
198 'propagate': True,
199 },
200 'api': {
201 'handlers': ['console'],
202 'level': 'INFO',
203 'propagate': True,
204 },
205 'cms': {
206 'handlers': ['console'],
207 'level': 'INFO',
208 'propagate': True,
209 },
210 'rules': {
211 'handlers': ['console'],
212 'level': 'DEBUG',
213 'propagate': True,
214 },
215 }
216 }
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/backend/settings.py b/backend/backend/settings.py
--- a/backend/backend/settings.py
+++ b/backend/backend/settings.py
@@ -94,7 +94,7 @@
'NAME': 'integreat',
'USER': 'integreat',
'PASSWORD': 'password',
- 'HOST': 'postgres',
+ 'HOST': 'localhost',
'PORT': '5432',
}
}
| {"golden_diff": "diff --git a/backend/backend/settings.py b/backend/backend/settings.py\n--- a/backend/backend/settings.py\n+++ b/backend/backend/settings.py\n@@ -94,7 +94,7 @@\n 'NAME': 'integreat',\n 'USER': 'integreat',\n 'PASSWORD': 'password',\n- 'HOST': 'postgres',\n+ 'HOST': 'localhost',\n 'PORT': '5432',\n }\n }\n", "issue": "Change development environment from docker-compose to venv\n- [ ] Remove the django docker container\r\n- [ ] Install package and requirements in venv\r\n- [ ] Keep database docker container and manage connection to django\n", "before_files": [{"content": "\"\"\"\nDjango settings for backend project.\n\nGenerated by 'django-admin startproject' using Django 1.11.11.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.11/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = [\n 'localhost',\n '127.0.0.1',\n '0.0.0.0'\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'cms.apps.CmsConfig',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.staticfiles',\n 'widget_tweaks',\n 'easy_thumbnails',\n 'filer',\n 'drf_yasg',\n 'mptt',\n 'rest_framework',\n 'rules.apps.AutodiscoverRulesConfig',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'backend.urls'\nTHUMBNAIL_HIGH_RESOLUTION = True\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'backend.context_processors.site_slug_processor',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'backend.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'integreat',\n 'USER': 'integreat',\n 'PASSWORD': 'password',\n 'HOST': 'postgres',\n 'PORT': '5432',\n }\n}\n\n# Directory for initial database contents\n\nFIXTURE_DIRS = (\n os.path.join(BASE_DIR, 'cms/fixtures/'),\n)\n\n# Authentication backends\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend', # this is default\n)\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGES = (\n ('en-us', 'English'),\n ('de-de', 'Deutsch'),\n)\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'locale'),\n)\n\nLANGUAGE_CODE = 'de-de'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n\n# Login\nLOGIN_URL = '/login'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = '/login'\n\n# API FRAMEWORK\nREST_FRAMEWORK = {\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'\n ]\n}\n\n# Miscellaneous\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nCSRF_FAILURE_VIEW = 'cms.views.general.csrf_failure'\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nFILER_CANONICAL_URL = 'media/'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': 'WARN',\n 'propagate': True,\n },\n 'api': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'cms': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'rules': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n", "path": "backend/backend/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for backend project.\n\nGenerated by 'django-admin startproject' using Django 1.11.11.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.11/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.11/ref/settings/\n\"\"\"\n\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = [\n 'localhost',\n '127.0.0.1',\n '0.0.0.0'\n]\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'cms.apps.CmsConfig',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.staticfiles',\n 'widget_tweaks',\n 'easy_thumbnails',\n 'filer',\n 'drf_yasg',\n 'mptt',\n 'rest_framework',\n 'rules.apps.AutodiscoverRulesConfig',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'backend.urls'\nTHUMBNAIL_HIGH_RESOLUTION = True\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'backend.context_processors.site_slug_processor',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'backend.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'integreat',\n 'USER': 'integreat',\n 'PASSWORD': 'password',\n 'HOST': 'localhost',\n 'PORT': '5432',\n }\n}\n\n# Directory for initial database contents\n\nFIXTURE_DIRS = (\n os.path.join(BASE_DIR, 'cms/fixtures/'),\n)\n\n# Authentication backends\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend', # this is default\n)\n\n\n# Password validation\n# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.11/topics/i18n/\n\nLANGUAGES = (\n ('en-us', 'English'),\n ('de-de', 'Deutsch'),\n)\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, 'locale'),\n)\n\nLANGUAGE_CODE = 'de-de'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.11/howto/static-files/\n\nSTATIC_URL = '/static/'\n\n\n# Login\nLOGIN_URL = '/login'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_REDIRECT_URL = '/login'\n\n# API FRAMEWORK\nREST_FRAMEWORK = {\n # Use Django's standard `django.contrib.auth` permissions,\n # or allow read-only access for unauthenticated users.\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'\n ]\n}\n\n# Miscellaneous\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nCSRF_FAILURE_VIEW = 'cms.views.general.csrf_failure'\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nFILER_CANONICAL_URL = 'media/'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler'\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': 'WARN',\n 'propagate': True,\n },\n 'api': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'cms': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'rules': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\n", "path": "backend/backend/settings.py"}]} | 2,129 | 94 |
gh_patches_debug_17968 | rasdani/github-patches | git_diff | Pycord-Development__pycord-621 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cog.get_commands() does not return ext.commands commands
### Summary
`Cog.get_commands()` no longer returns anything but slash/interaction based commands, breaking all help commands
### Reproduction Steps
Run the example below with a cog that has `ext.commands` based commands.
### Minimal Reproducible Code
```python
cog = bot.get_cog("MyCog")
commands = cog.get_commands()
print(commands)
#This will return [] unless MyCog contains slash/interaction based commands
```
### Expected Results
Commands inside cog returned as a list.
### Actual Results
Only interaction-based commands are returned
### Intents
```
discord.Intents(guilds = True, members = True, bans = True, emojis = True, messages = True, invites = True, reactions = True)
```
### System Information
```
- Python v3.10.1-final
- py-cord v2.0.0-alpha
- py-cord pkg_resources: v2.0.0a4627+gca1a76cb
- aiohttp v3.7.4.post0
- system info: Linux 5.15.10-zen1-1-zen #1 ZEN SMP PREEMPT Fri, 17 Dec 2021 11:17:39 +0000
```
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `discord/ext/commands/cog.py`
Content:
```
1 """
2 The MIT License (MIT)
3
4 Copyright (c) 2015-2021 Rapptz
5 Copyright (c) 2021-present Pycord Development
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 and/or sell copies of the Software, and to permit persons to whom the
12 Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice shall be included in
15 all copies or substantial portions of the Software.
16
17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 DEALINGS IN THE SOFTWARE.
24 """
25 from __future__ import annotations
26
27 import discord
28 from ...cog import Cog
29
30 from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type
31
32 from ...commands import ApplicationCommand
33
34 if TYPE_CHECKING:
35 from .core import Command
36
37 __all__ = ('Cog',)
38
39 CogT = TypeVar('CogT', bound='Cog')
40 FuncT = TypeVar('FuncT', bound=Callable[..., Any])
41
42 MISSING: Any = discord.utils.MISSING
43
44 class Cog(Cog):
45 def __new__(cls: Type[CogT], *args: Any, **kwargs: Any) -> CogT:
46 # For issue 426, we need to store a copy of the command objects
47 # since we modify them to inject `self` to them.
48 # To do this, we need to interfere with the Cog creation process.
49 self = super().__new__(cls)
50
51 return self
52
53 def walk_commands(self) -> Generator[Command, None, None]:
54 """An iterator that recursively walks through this cog's commands and subcommands.
55
56 Yields
57 ------
58 Union[:class:`.Command`, :class:`.Group`]
59 A command or group from the cog.
60 """
61 from .core import GroupMixin
62 for command in self.__cog_commands__:
63 if isinstance(command, ApplicationCommand):
64 yield command
65 else:
66 if command.parent is None:
67 yield command
68 if isinstance(command, GroupMixin):
69 yield from command.walk_commands()
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/discord/ext/commands/cog.py b/discord/ext/commands/cog.py
--- a/discord/ext/commands/cog.py
+++ b/discord/ext/commands/cog.py
@@ -27,7 +27,7 @@
import discord
from ...cog import Cog
-from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type
+from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type, Union
from ...commands import ApplicationCommand
@@ -67,3 +67,16 @@
yield command
if isinstance(command, GroupMixin):
yield from command.walk_commands()
+
+ def get_commands(self) -> List[Union[ApplicationCommand, Command]]:
+ r"""
+ Returns
+ --------
+ List[Union[:class:`~discord.ApplicationCommand`, :class:`.Command`]]
+ A :class:`list` of commands that are defined inside this cog.
+
+ .. note::
+
+ This does not include subcommands.
+ """
+ return [c for c in self.__cog_commands__ if c.parent is None]
| {"golden_diff": "diff --git a/discord/ext/commands/cog.py b/discord/ext/commands/cog.py\n--- a/discord/ext/commands/cog.py\n+++ b/discord/ext/commands/cog.py\n@@ -27,7 +27,7 @@\n import discord\n from ...cog import Cog\n \n-from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type\n+from typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type, Union\n \n from ...commands import ApplicationCommand\n \n@@ -67,3 +67,16 @@\n yield command\n if isinstance(command, GroupMixin):\n yield from command.walk_commands()\n+\n+ def get_commands(self) -> List[Union[ApplicationCommand, Command]]:\n+ r\"\"\"\n+ Returns\n+ --------\n+ List[Union[:class:`~discord.ApplicationCommand`, :class:`.Command`]]\n+ A :class:`list` of commands that are defined inside this cog.\n+\n+ .. note::\n+\n+ This does not include subcommands.\n+ \"\"\"\n+ return [c for c in self.__cog_commands__ if c.parent is None]\n", "issue": "Cog.get_commands() does not return ext.commands commands\n### Summary\r\n\r\n`Cog.get_commands()` no longer returns anything but slash/interaction based commands, breaking all help commands\r\n\r\n### Reproduction Steps\r\n\r\nRun the example below with a cog that has `ext.commands` based commands.\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\ncog = bot.get_cog(\"MyCog\")\r\ncommands = cog.get_commands()\r\nprint(commands)\r\n#This will return [] unless MyCog contains slash/interaction based commands\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nCommands inside cog returned as a list.\r\n\r\n### Actual Results\r\n\r\nOnly interaction-based commands are returned\r\n\r\n### Intents\r\n\r\n```\r\ndiscord.Intents(guilds = True, members = True, bans = True, emojis = True, messages = True, invites = True, reactions = True)\r\n```\r\n\r\n### System Information\r\n```\r\n- Python v3.10.1-final\r\n- py-cord v2.0.0-alpha\r\n - py-cord pkg_resources: v2.0.0a4627+gca1a76cb\r\n- aiohttp v3.7.4.post0\r\n- system info: Linux 5.15.10-zen1-1-zen #1 ZEN SMP PREEMPT Fri, 17 Dec 2021 11:17:39 +0000\r\n```\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom __future__ import annotations\n\nimport discord\nfrom ...cog import Cog\n\nfrom typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type\n\nfrom ...commands import ApplicationCommand\n\nif TYPE_CHECKING:\n from .core import Command\n\n__all__ = ('Cog',)\n\nCogT = TypeVar('CogT', bound='Cog')\nFuncT = TypeVar('FuncT', bound=Callable[..., Any])\n\nMISSING: Any = discord.utils.MISSING\n\nclass Cog(Cog):\n def __new__(cls: Type[CogT], *args: Any, **kwargs: Any) -> CogT:\n # For issue 426, we need to store a copy of the command objects\n # since we modify them to inject `self` to them.\n # To do this, we need to interfere with the Cog creation process.\n self = super().__new__(cls)\n\n return self\n\n def walk_commands(self) -> Generator[Command, None, None]:\n \"\"\"An iterator that recursively walks through this cog's commands and subcommands.\n\n Yields\n ------\n Union[:class:`.Command`, :class:`.Group`]\n A command or group from the cog.\n \"\"\"\n from .core import GroupMixin\n for command in self.__cog_commands__:\n if isinstance(command, ApplicationCommand):\n yield command\n else:\n if command.parent is None:\n yield command\n if isinstance(command, GroupMixin):\n yield from command.walk_commands()\n", "path": "discord/ext/commands/cog.py"}], "after_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom __future__ import annotations\n\nimport discord\nfrom ...cog import Cog\n\nfrom typing import Any, Callable, Generator, TYPE_CHECKING, TypeVar, Type, Union\n\nfrom ...commands import ApplicationCommand\n\nif TYPE_CHECKING:\n from .core import Command\n\n__all__ = ('Cog',)\n\nCogT = TypeVar('CogT', bound='Cog')\nFuncT = TypeVar('FuncT', bound=Callable[..., Any])\n\nMISSING: Any = discord.utils.MISSING\n\nclass Cog(Cog):\n def __new__(cls: Type[CogT], *args: Any, **kwargs: Any) -> CogT:\n # For issue 426, we need to store a copy of the command objects\n # since we modify them to inject `self` to them.\n # To do this, we need to interfere with the Cog creation process.\n self = super().__new__(cls)\n\n return self\n\n def walk_commands(self) -> Generator[Command, None, None]:\n \"\"\"An iterator that recursively walks through this cog's commands and subcommands.\n\n Yields\n ------\n Union[:class:`.Command`, :class:`.Group`]\n A command or group from the cog.\n \"\"\"\n from .core import GroupMixin\n for command in self.__cog_commands__:\n if isinstance(command, ApplicationCommand):\n yield command\n else:\n if command.parent is None:\n yield command\n if isinstance(command, GroupMixin):\n yield from command.walk_commands()\n\n def get_commands(self) -> List[Union[ApplicationCommand, Command]]:\n r\"\"\"\n Returns\n --------\n List[Union[:class:`~discord.ApplicationCommand`, :class:`.Command`]]\n A :class:`list` of commands that are defined inside this cog.\n\n .. note::\n\n This does not include subcommands.\n \"\"\"\n return [c for c in self.__cog_commands__ if c.parent is None]\n", "path": "discord/ext/commands/cog.py"}]} | 1,315 | 249 |
gh_patches_debug_13989 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-1192 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
multi-gpu ddp calls validation and testing loops too many times
When using ddp with multiple gpus, each validation and test loop is called with the entire validation dataset for each gpu.
Expected behavior is that the dataset is divided appropriately across the gpus.
I am using current master (cloned Mar 14), Ubuntu 19.10, Cuda 10.1, python 3.7.5, pytorch 1.4, venv environment.
The problem appears to be in `auto_add_sampler()` in data_loading.py. It does not create a `DistributedSampler` for validation or test datasets.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/trainer/data_loading.py`
Content:
```
1 from abc import ABC, abstractmethod
2 from typing import Union, List, Tuple, Callable
3
4 import torch.distributed as torch_distrib
5 from torch.utils.data import SequentialSampler, DataLoader
6 from torch.utils.data.distributed import DistributedSampler
7
8 from pytorch_lightning.core import LightningModule
9 from pytorch_lightning.utilities.debugging import MisconfigurationException
10
11 try:
12 from apex import amp
13 except ImportError:
14 APEX_AVAILABLE = False
15 else:
16 APEX_AVAILABLE = True
17
18 try:
19 import torch_xla
20 import torch_xla.core.xla_model as xm
21 import torch_xla.distributed.xla_multiprocessing as xmp
22 except ImportError:
23 XLA_AVAILABLE = False
24 else:
25 XLA_AVAILABLE = True
26
27
28 def _has_len(dataloader: DataLoader) -> bool:
29 try:
30 # try getting the length
31 _ = len(dataloader)
32 return True
33 except TypeError:
34 return False
35
36
37 class TrainerDataLoadingMixin(ABC):
38
39 # this is just a summary on variables used in this abstract class,
40 # the proper values/initialisation should be done in child class
41 proc_rank: int
42 use_ddp: bool
43 use_ddp2: bool
44 shown_warnings: ...
45 val_check_interval: float
46 use_tpu: bool
47 tpu_local_core_rank: int
48 train_dataloader: DataLoader
49 num_training_batches: Union[int, float]
50 val_check_batch: ...
51 val_dataloaders: List[DataLoader]
52 num_val_batches: Union[int, float]
53 test_dataloaders: List[DataLoader]
54 num_test_batches: Union[int, float]
55 train_percent_check: float
56 val_percent_check: float
57 test_percent_check: float
58
59 @abstractmethod
60 def is_overriden(self, *args):
61 """Warning: this is just empty shell for code implemented in other class."""
62
63 def _percent_range_check(self, name: str) -> None:
64 value = getattr(self, name)
65 msg = f'`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.'
66 if name == 'val_check_interval':
67 msg += ' If you want to disable validation set `val_percent_check` to 0.0 instead.'
68
69 if not 0. <= value <= 1.:
70 raise ValueError(msg)
71
72 def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:
73 if self.use_ddp or self.use_ddp2 or self.use_tpu:
74 dl_args = {
75 'dataset': dataloader.dataset,
76 'batch_size': dataloader.batch_size,
77 'shuffle': False,
78 'num_workers': dataloader.num_workers,
79 'collate_fn': dataloader.collate_fn,
80 'pin_memory': dataloader.pin_memory,
81 'drop_last': dataloader.drop_last,
82 'timeout': dataloader.timeout,
83 'worker_init_fn': dataloader.worker_init_fn
84 }
85
86 if self.use_tpu:
87 sampler = DistributedSampler(
88 dataloader.dataset,
89 num_replicas=xm.xrt_world_size(),
90 rank=xm.get_ordinal()
91 )
92 dl_args['shuffle'] = False
93 else:
94 if train:
95 sampler = DistributedSampler(dataloader.dataset)
96 dl_args['shuffle'] = False
97 else:
98 sampler = SequentialSampler(dataloader.dataset)
99
100 dl_args['sampler'] = sampler
101
102 dataloader = DataLoader(**dl_args)
103 return dataloader
104
105 def reset_train_dataloader(self, model: LightningModule) -> None:
106 """Resets the train dataloader and initialises required variables
107 (number of batches, when to validate, etc.).
108
109 Args:
110 model: The current `LightningModule`
111 """
112 self.train_dataloader = self.request_dataloader(model.train_dataloader)
113 self.num_training_batches = 0
114
115 # automatically add samplers
116 self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)
117
118 self._percent_range_check('train_percent_check')
119
120 if not _has_len(self.train_dataloader):
121 self.num_training_batches = float('inf')
122 else:
123 # try getting the length
124 self.num_training_batches = len(self.train_dataloader)
125 self.num_training_batches = int(self.num_training_batches * self.train_percent_check)
126
127 # determine when to check validation
128 # if int passed in, val checks that often
129 # otherwise, it checks in [0, 1.0] % range of a training epoch
130 if isinstance(self.val_check_interval, int):
131 self.val_check_batch = self.val_check_interval
132 if self.val_check_batch > self.num_training_batches:
133 raise ValueError(
134 f'`val_check_interval` ({self.val_check_interval}) must be less than or equal '
135 f'to the number of the training batches ({self.num_training_batches}). '
136 'If you want to disable validation set `val_percent_check` to 0.0 instead.')
137 else:
138 if not _has_len(self.train_dataloader):
139 raise MisconfigurationException(
140 'When using an infinite DataLoader (e.g. with an IterableDataset or when '
141 'DataLoader does not implement `__len__`) for `train_dataloader`, '
142 '`Trainer(val_check_interval)` must be an int. An int k specifies checking '
143 'validation every k training batches.')
144
145 self._percent_range_check('val_check_interval')
146
147 self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
148 self.val_check_batch = max(1, self.val_check_batch)
149
150 def _reset_eval_dataloader(self, model: LightningModule,
151 mode: str) -> Tuple[int, List[DataLoader]]:
152 """Generic method to reset a dataloader for evaluation.
153
154 Args:
155 model: The current `LightningModule`
156 mode: Either `'val'` or `'test'`
157
158 Returns:
159 Tuple (num_batches, dataloaders)
160 """
161 dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader'))
162
163 if not isinstance(dataloaders, list):
164 dataloaders = [dataloaders]
165
166 # add samplers
167 dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl]
168
169 num_batches = 0
170
171 # determine number of batches
172 # datasets could be none, 1 or 2+
173 if len(dataloaders) != 0:
174 for dataloader in dataloaders:
175 if not _has_len(dataloader):
176 num_batches = float('inf')
177 break
178
179 percent_check = getattr(self, f'{mode}_percent_check')
180
181 if num_batches != float('inf'):
182 self._percent_range_check(f'{mode}_percent_check')
183
184 num_batches = sum(len(dataloader) for dataloader in dataloaders)
185 num_batches = int(num_batches * percent_check)
186 elif percent_check not in (0.0, 1.0):
187 raise MisconfigurationException(
188 'When using an infinite DataLoader (e.g. with an IterableDataset or when '
189 f'DataLoader does not implement `__len__`) for `{mode}_dataloader`, '
190 f'`Trainer({mode}_percent_check)` must be `0.0` or `1.0`.')
191 return num_batches, dataloaders
192
193 def reset_val_dataloader(self, model: LightningModule) -> None:
194 """Resets the validation dataloader and determines the number of batches.
195
196 Args:
197 model: The current `LightningModule`
198 """
199 if self.is_overriden('validation_step'):
200 self.num_val_batches, self.val_dataloaders =\
201 self._reset_eval_dataloader(model, 'val')
202
203 def reset_test_dataloader(self, model) -> None:
204 """Resets the validation dataloader and determines the number of batches.
205
206 Args:
207 model: The current `LightningModule`
208 """
209 if self.is_overriden('test_step'):
210 self.num_test_batches, self.test_dataloaders =\
211 self._reset_eval_dataloader(model, 'test')
212
213 def request_dataloader(self, dataloader_fx: Callable) -> DataLoader:
214 """Handles downloading data in the GPU or TPU case.
215
216 Args:
217 dataloader_fx: The bound dataloader getter
218
219 Returns:
220 The dataloader
221 """
222 dataloader = dataloader_fx()
223
224 # get the function we'll use to get data
225 if self.use_ddp or self.use_ddp2:
226 # all processes wait until data download has happened
227 torch_distrib.barrier()
228
229 # data download/load on TPU
230 elif self.use_tpu and XLA_AVAILABLE:
231 # all processes wait until data download has happened
232 torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders')
233
234 return dataloader
235
236 def determine_data_use_amount(self, train_percent_check: float, val_percent_check: float,
237 test_percent_check: float, overfit_pct: float) -> None:
238 """Use less data for debugging purposes
239 """
240 self.train_percent_check = train_percent_check
241 self.val_percent_check = val_percent_check
242 self.test_percent_check = test_percent_check
243 if overfit_pct > 0:
244 if overfit_pct > 1:
245 raise ValueError(
246 f'`overfit_pct` must be not greater than 1.0, but got {overfit_pct:.3f}.')
247
248 self.train_percent_check = overfit_pct
249 self.val_percent_check = overfit_pct
250 self.test_percent_check = overfit_pct
251
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py
--- a/pytorch_lightning/trainer/data_loading.py
+++ b/pytorch_lightning/trainer/data_loading.py
@@ -91,15 +91,12 @@
)
dl_args['shuffle'] = False
else:
- if train:
- sampler = DistributedSampler(dataloader.dataset)
- dl_args['shuffle'] = False
- else:
- sampler = SequentialSampler(dataloader.dataset)
+ sampler = DistributedSampler(dataloader.dataset)
+ dl_args['shuffle'] = False
dl_args['sampler'] = sampler
-
dataloader = DataLoader(**dl_args)
+
return dataloader
def reset_train_dataloader(self, model: LightningModule) -> None:
| {"golden_diff": "diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py\n--- a/pytorch_lightning/trainer/data_loading.py\n+++ b/pytorch_lightning/trainer/data_loading.py\n@@ -91,15 +91,12 @@\n )\n dl_args['shuffle'] = False\n else:\n- if train:\n- sampler = DistributedSampler(dataloader.dataset)\n- dl_args['shuffle'] = False\n- else:\n- sampler = SequentialSampler(dataloader.dataset)\n+ sampler = DistributedSampler(dataloader.dataset)\n+ dl_args['shuffle'] = False\n \n dl_args['sampler'] = sampler\n-\n dataloader = DataLoader(**dl_args)\n+\n return dataloader\n \n def reset_train_dataloader(self, model: LightningModule) -> None:\n", "issue": "multi-gpu ddp calls validation and testing loops too many times\nWhen using ddp with multiple gpus, each validation and test loop is called with the entire validation dataset for each gpu.\r\n\r\nExpected behavior is that the dataset is divided appropriately across the gpus.\r\n\r\nI am using current master (cloned Mar 14), Ubuntu 19.10, Cuda 10.1, python 3.7.5, pytorch 1.4, venv environment.\r\n\r\nThe problem appears to be in `auto_add_sampler()` in data_loading.py. It does not create a `DistributedSampler` for validation or test datasets.\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom typing import Union, List, Tuple, Callable\n\nimport torch.distributed as torch_distrib\nfrom torch.utils.data import SequentialSampler, DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom pytorch_lightning.core import LightningModule\nfrom pytorch_lightning.utilities.debugging import MisconfigurationException\n\ntry:\n from apex import amp\nexcept ImportError:\n APEX_AVAILABLE = False\nelse:\n APEX_AVAILABLE = True\n\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\n\ndef _has_len(dataloader: DataLoader) -> bool:\n try:\n # try getting the length\n _ = len(dataloader)\n return True\n except TypeError:\n return False\n\n\nclass TrainerDataLoadingMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n proc_rank: int\n use_ddp: bool\n use_ddp2: bool\n shown_warnings: ...\n val_check_interval: float\n use_tpu: bool\n tpu_local_core_rank: int\n train_dataloader: DataLoader\n num_training_batches: Union[int, float]\n val_check_batch: ...\n val_dataloaders: List[DataLoader]\n num_val_batches: Union[int, float]\n test_dataloaders: List[DataLoader]\n num_test_batches: Union[int, float]\n train_percent_check: float\n val_percent_check: float\n test_percent_check: float\n\n @abstractmethod\n def is_overriden(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def _percent_range_check(self, name: str) -> None:\n value = getattr(self, name)\n msg = f'`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.'\n if name == 'val_check_interval':\n msg += ' If you want to disable validation set `val_percent_check` to 0.0 instead.'\n\n if not 0. <= value <= 1.:\n raise ValueError(msg)\n\n def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:\n if self.use_ddp or self.use_ddp2 or self.use_tpu:\n dl_args = {\n 'dataset': dataloader.dataset,\n 'batch_size': dataloader.batch_size,\n 'shuffle': False,\n 'num_workers': dataloader.num_workers,\n 'collate_fn': dataloader.collate_fn,\n 'pin_memory': dataloader.pin_memory,\n 'drop_last': dataloader.drop_last,\n 'timeout': dataloader.timeout,\n 'worker_init_fn': dataloader.worker_init_fn\n }\n\n if self.use_tpu:\n sampler = DistributedSampler(\n dataloader.dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal()\n )\n dl_args['shuffle'] = False\n else:\n if train:\n sampler = DistributedSampler(dataloader.dataset)\n dl_args['shuffle'] = False\n else:\n sampler = SequentialSampler(dataloader.dataset)\n\n dl_args['sampler'] = sampler\n\n dataloader = DataLoader(**dl_args)\n return dataloader\n\n def reset_train_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the train dataloader and initialises required variables\n (number of batches, when to validate, etc.).\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n self.train_dataloader = self.request_dataloader(model.train_dataloader)\n self.num_training_batches = 0\n\n # automatically add samplers\n self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)\n\n self._percent_range_check('train_percent_check')\n\n if not _has_len(self.train_dataloader):\n self.num_training_batches = float('inf')\n else:\n # try getting the length\n self.num_training_batches = len(self.train_dataloader)\n self.num_training_batches = int(self.num_training_batches * self.train_percent_check)\n\n # determine when to check validation\n # if int passed in, val checks that often\n # otherwise, it checks in [0, 1.0] % range of a training epoch\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n if self.val_check_batch > self.num_training_batches:\n raise ValueError(\n f'`val_check_interval` ({self.val_check_interval}) must be less than or equal '\n f'to the number of the training batches ({self.num_training_batches}). '\n 'If you want to disable validation set `val_percent_check` to 0.0 instead.')\n else:\n if not _has_len(self.train_dataloader):\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n 'DataLoader does not implement `__len__`) for `train_dataloader`, '\n '`Trainer(val_check_interval)` must be an int. An int k specifies checking '\n 'validation every k training batches.')\n\n self._percent_range_check('val_check_interval')\n\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n def _reset_eval_dataloader(self, model: LightningModule,\n mode: str) -> Tuple[int, List[DataLoader]]:\n \"\"\"Generic method to reset a dataloader for evaluation.\n\n Args:\n model: The current `LightningModule`\n mode: Either `'val'` or `'test'`\n\n Returns:\n Tuple (num_batches, dataloaders)\n \"\"\"\n dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader'))\n\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n # add samplers\n dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl]\n\n num_batches = 0\n\n # determine number of batches\n # datasets could be none, 1 or 2+\n if len(dataloaders) != 0:\n for dataloader in dataloaders:\n if not _has_len(dataloader):\n num_batches = float('inf')\n break\n\n percent_check = getattr(self, f'{mode}_percent_check')\n\n if num_batches != float('inf'):\n self._percent_range_check(f'{mode}_percent_check')\n\n num_batches = sum(len(dataloader) for dataloader in dataloaders)\n num_batches = int(num_batches * percent_check)\n elif percent_check not in (0.0, 1.0):\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n f'DataLoader does not implement `__len__`) for `{mode}_dataloader`, '\n f'`Trainer({mode}_percent_check)` must be `0.0` or `1.0`.')\n return num_batches, dataloaders\n\n def reset_val_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('validation_step'):\n self.num_val_batches, self.val_dataloaders =\\\n self._reset_eval_dataloader(model, 'val')\n\n def reset_test_dataloader(self, model) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('test_step'):\n self.num_test_batches, self.test_dataloaders =\\\n self._reset_eval_dataloader(model, 'test')\n\n def request_dataloader(self, dataloader_fx: Callable) -> DataLoader:\n \"\"\"Handles downloading data in the GPU or TPU case.\n\n Args:\n dataloader_fx: The bound dataloader getter\n\n Returns:\n The dataloader\n \"\"\"\n dataloader = dataloader_fx()\n\n # get the function we'll use to get data\n if self.use_ddp or self.use_ddp2:\n # all processes wait until data download has happened\n torch_distrib.barrier()\n\n # data download/load on TPU\n elif self.use_tpu and XLA_AVAILABLE:\n # all processes wait until data download has happened\n torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders')\n\n return dataloader\n\n def determine_data_use_amount(self, train_percent_check: float, val_percent_check: float,\n test_percent_check: float, overfit_pct: float) -> None:\n \"\"\"Use less data for debugging purposes\n \"\"\"\n self.train_percent_check = train_percent_check\n self.val_percent_check = val_percent_check\n self.test_percent_check = test_percent_check\n if overfit_pct > 0:\n if overfit_pct > 1:\n raise ValueError(\n f'`overfit_pct` must be not greater than 1.0, but got {overfit_pct:.3f}.')\n\n self.train_percent_check = overfit_pct\n self.val_percent_check = overfit_pct\n self.test_percent_check = overfit_pct\n", "path": "pytorch_lightning/trainer/data_loading.py"}], "after_files": [{"content": "from abc import ABC, abstractmethod\nfrom typing import Union, List, Tuple, Callable\n\nimport torch.distributed as torch_distrib\nfrom torch.utils.data import SequentialSampler, DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom pytorch_lightning.core import LightningModule\nfrom pytorch_lightning.utilities.debugging import MisconfigurationException\n\ntry:\n from apex import amp\nexcept ImportError:\n APEX_AVAILABLE = False\nelse:\n APEX_AVAILABLE = True\n\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\n\ndef _has_len(dataloader: DataLoader) -> bool:\n try:\n # try getting the length\n _ = len(dataloader)\n return True\n except TypeError:\n return False\n\n\nclass TrainerDataLoadingMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n proc_rank: int\n use_ddp: bool\n use_ddp2: bool\n shown_warnings: ...\n val_check_interval: float\n use_tpu: bool\n tpu_local_core_rank: int\n train_dataloader: DataLoader\n num_training_batches: Union[int, float]\n val_check_batch: ...\n val_dataloaders: List[DataLoader]\n num_val_batches: Union[int, float]\n test_dataloaders: List[DataLoader]\n num_test_batches: Union[int, float]\n train_percent_check: float\n val_percent_check: float\n test_percent_check: float\n\n @abstractmethod\n def is_overriden(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def _percent_range_check(self, name: str) -> None:\n value = getattr(self, name)\n msg = f'`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.'\n if name == 'val_check_interval':\n msg += ' If you want to disable validation set `val_percent_check` to 0.0 instead.'\n\n if not 0. <= value <= 1.:\n raise ValueError(msg)\n\n def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:\n if self.use_ddp or self.use_ddp2 or self.use_tpu:\n dl_args = {\n 'dataset': dataloader.dataset,\n 'batch_size': dataloader.batch_size,\n 'shuffle': False,\n 'num_workers': dataloader.num_workers,\n 'collate_fn': dataloader.collate_fn,\n 'pin_memory': dataloader.pin_memory,\n 'drop_last': dataloader.drop_last,\n 'timeout': dataloader.timeout,\n 'worker_init_fn': dataloader.worker_init_fn\n }\n\n if self.use_tpu:\n sampler = DistributedSampler(\n dataloader.dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal()\n )\n dl_args['shuffle'] = False\n else:\n sampler = DistributedSampler(dataloader.dataset)\n dl_args['shuffle'] = False\n\n dl_args['sampler'] = sampler\n dataloader = DataLoader(**dl_args)\n\n return dataloader\n\n def reset_train_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the train dataloader and initialises required variables\n (number of batches, when to validate, etc.).\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n self.train_dataloader = self.request_dataloader(model.train_dataloader)\n self.num_training_batches = 0\n\n # automatically add samplers\n self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)\n\n self._percent_range_check('train_percent_check')\n\n if not _has_len(self.train_dataloader):\n self.num_training_batches = float('inf')\n else:\n # try getting the length\n self.num_training_batches = len(self.train_dataloader)\n self.num_training_batches = int(self.num_training_batches * self.train_percent_check)\n\n # determine when to check validation\n # if int passed in, val checks that often\n # otherwise, it checks in [0, 1.0] % range of a training epoch\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n if self.val_check_batch > self.num_training_batches:\n raise ValueError(\n f'`val_check_interval` ({self.val_check_interval}) must be less than or equal '\n f'to the number of the training batches ({self.num_training_batches}). '\n 'If you want to disable validation set `val_percent_check` to 0.0 instead.')\n else:\n if not _has_len(self.train_dataloader):\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n 'DataLoader does not implement `__len__`) for `train_dataloader`, '\n '`Trainer(val_check_interval)` must be an int. An int k specifies checking '\n 'validation every k training batches.')\n\n self._percent_range_check('val_check_interval')\n\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n def _reset_eval_dataloader(self, model: LightningModule,\n mode: str) -> Tuple[int, List[DataLoader]]:\n \"\"\"Generic method to reset a dataloader for evaluation.\n\n Args:\n model: The current `LightningModule`\n mode: Either `'val'` or `'test'`\n\n Returns:\n Tuple (num_batches, dataloaders)\n \"\"\"\n dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader'))\n\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n # add samplers\n dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl]\n\n num_batches = 0\n\n # determine number of batches\n # datasets could be none, 1 or 2+\n if len(dataloaders) != 0:\n for dataloader in dataloaders:\n if not _has_len(dataloader):\n num_batches = float('inf')\n break\n\n percent_check = getattr(self, f'{mode}_percent_check')\n\n if num_batches != float('inf'):\n self._percent_range_check(f'{mode}_percent_check')\n\n num_batches = sum(len(dataloader) for dataloader in dataloaders)\n num_batches = int(num_batches * percent_check)\n elif percent_check not in (0.0, 1.0):\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n f'DataLoader does not implement `__len__`) for `{mode}_dataloader`, '\n f'`Trainer({mode}_percent_check)` must be `0.0` or `1.0`.')\n return num_batches, dataloaders\n\n def reset_val_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('validation_step'):\n self.num_val_batches, self.val_dataloaders =\\\n self._reset_eval_dataloader(model, 'val')\n\n def reset_test_dataloader(self, model) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('test_step'):\n self.num_test_batches, self.test_dataloaders =\\\n self._reset_eval_dataloader(model, 'test')\n\n def request_dataloader(self, dataloader_fx: Callable) -> DataLoader:\n \"\"\"Handles downloading data in the GPU or TPU case.\n\n Args:\n dataloader_fx: The bound dataloader getter\n\n Returns:\n The dataloader\n \"\"\"\n dataloader = dataloader_fx()\n\n # get the function we'll use to get data\n if self.use_ddp or self.use_ddp2:\n # all processes wait until data download has happened\n torch_distrib.barrier()\n\n # data download/load on TPU\n elif self.use_tpu and XLA_AVAILABLE:\n # all processes wait until data download has happened\n torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders')\n\n return dataloader\n\n def determine_data_use_amount(self, train_percent_check: float, val_percent_check: float,\n test_percent_check: float, overfit_pct: float) -> None:\n \"\"\"Use less data for debugging purposes\n \"\"\"\n self.train_percent_check = train_percent_check\n self.val_percent_check = val_percent_check\n self.test_percent_check = test_percent_check\n if overfit_pct > 0:\n if overfit_pct > 1:\n raise ValueError(\n f'`overfit_pct` must be not greater than 1.0, but got {overfit_pct:.3f}.')\n\n self.train_percent_check = overfit_pct\n self.val_percent_check = overfit_pct\n self.test_percent_check = overfit_pct\n", "path": "pytorch_lightning/trainer/data_loading.py"}]} | 3,168 | 179 |
gh_patches_debug_34006 | rasdani/github-patches | git_diff | conan-io__conan-5846 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[question] How to not include some libs in conanbuildinfo.props
Conan version 1.19.0
I have a dependency on boost (boost/1.71.0@conan/stable).
I'm using the visual_studio generator with VS2017
This adds *all* of the boost libs to conanbuildinfo.props which causes my build to fail. I don't completely understand why this used to be OK with 1.68 and now fails with that too after moving my cache to a different drive (perhaps it was due to installing vs2019 alongside vs2017).
But regardless it would be good if I could either cherrypick which libs to include or remove specific ones from the list.
For example I don't want to include any of the stacktrace libs which result in duplicate symbols.
I can't figure out how to do it. If I specify any of the 'without_xxx' options then it rebuilds the whole of the boost dependency which I would rather avoid - I just want to control which libs it puts in the vs props file!
I was trying to see if there was some place I could remove them from deps_cpp_info but it doesn't look like there is a place to put that (i.e. an event just before the generators are run).
Perhaps overriding the generator could do the trick but it seems like that would be a bit of a sledgehammer and not all that clean.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/generators/visualstudio.py`
Content:
```
1 import os
2 import re
3
4 from conans.model import Generator
5 from conans.paths import BUILD_INFO_VISUAL_STUDIO
6 from conans.client.tools.files import VALID_LIB_EXTENSIONS
7
8
9 class VisualStudioGenerator(Generator):
10
11 template = '''<?xml version="1.0" encoding="utf-8"?>
12 <Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
13 <ImportGroup Label="PropertySheets" />
14 <PropertyGroup Label="UserMacros" />
15 <PropertyGroup Label="Conan-RootDirs">{item_properties}
16 </PropertyGroup>
17 {properties}
18 <ItemGroup />
19 </Project>'''
20
21 properties_template = ''' <PropertyGroup Label="ConanVariables"{condition}>
22 <ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags>
23 <ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions>
24 <ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories>
25 <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>
26 <ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories>
27 <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>
28 </PropertyGroup>
29 <PropertyGroup{condition}>
30 <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>
31 <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor>
32 </PropertyGroup>
33 <ItemDefinitionGroup{condition}>
34 <ClCompile>
35 <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
36 <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>
37 <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>
38 </ClCompile>
39 <Link>
40 <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
41 <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>
42 <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>
43 </Link>
44 <Lib>
45 <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
46 <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>
47 </Lib>
48 <Midl>
49 <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
50 </Midl>
51 <ResourceCompile>
52 <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
53 <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>
54 <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>
55 </ResourceCompile>
56 </ItemDefinitionGroup>'''
57
58 item_template = '''
59 <Conan-{name}-Root>{root_dir}</Conan-{name}-Root>'''
60
61 def _format_items(self):
62 sections = []
63 for dep_name, cpp_info in self._deps_build_info.dependencies:
64 fields = {
65 'root_dir': cpp_info.rootpath,
66 'name': dep_name.replace(".", "-")
67 }
68 section = self.item_template.format(**fields)
69 sections.append(section)
70 return "".join(sections)
71
72 @property
73 def filename(self):
74 return BUILD_INFO_VISUAL_STUDIO
75
76 def _format_properties(self, build_info, condition):
77 def has_valid_ext(lib):
78 ext = os.path.splitext(lib)[1]
79 return ext in VALID_LIB_EXTENSIONS
80
81 fields = {
82 'condition': condition,
83 'bin_dirs': "".join("%s;" % p for p in build_info.bin_paths),
84 'res_dirs': "".join("%s;" % p for p in build_info.res_paths),
85 'include_dirs': "".join("%s;" % p for p in build_info.include_paths),
86 'lib_dirs': "".join("%s;" % p for p in build_info.lib_paths),
87 'libs': "".join(['%s.lib;' % lib if not has_valid_ext(lib)
88 else '%s;' % lib for lib in build_info.libs]),
89 'definitions': "".join("%s;" % d for d in build_info.defines),
90 'compiler_flags': " ".join(build_info.cxxflags + build_info.cflags),
91 'linker_flags': " ".join(build_info.sharedlinkflags),
92 'exe_flags': " ".join(build_info.exelinkflags)
93 }
94 formatted_template = self.properties_template.format(**fields)
95 return formatted_template
96
97 @property
98 def content(self):
99 per_item_props = self._format_items()
100
101 properties = [self._format_properties(self._deps_build_info, condition='')]
102 for config, cpp_info in self._deps_build_info.configs.items():
103 condition = " Condition=\"'$(Configuration)' == '%s'\"" % config
104 properties.append(self._format_properties(cpp_info, condition=condition))
105
106 fields = {
107 'item_properties': per_item_props,
108 'properties': '\n'.join(properties)
109 }
110 formatted_template = self.template.format(**fields)
111
112 userprofile = os.getenv("USERPROFILE")
113 if userprofile:
114 userprofile = userprofile.replace("\\", "\\\\")
115 formatted_template = re.sub(userprofile, "$(USERPROFILE)", formatted_template,
116 flags=re.I)
117 return formatted_template
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/generators/visualstudio.py b/conans/client/generators/visualstudio.py
--- a/conans/client/generators/visualstudio.py
+++ b/conans/client/generators/visualstudio.py
@@ -18,13 +18,15 @@
<ItemGroup />
</Project>'''
- properties_template = ''' <PropertyGroup Label="ConanVariables"{condition}>
+ properties_template = '''<PropertyGroup Label="ConanVariables"{condition}>
<ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags>
+ <ConanLinkerFlags>{linker_flags}</ConanLinkerFlags>
<ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions>
<ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories>
<ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>
<ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories>
<ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>
+ <ConanLibraries>{libs}</ConanLibraries>
</PropertyGroup>
<PropertyGroup{condition}>
<LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>
@@ -38,13 +40,9 @@
</ClCompile>
<Link>
<AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
- <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>
- <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>
+ <AdditionalDependencies>$(ConanLibraries)%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalOptions>$(ConanLinkerFlags) %(AdditionalOptions)</AdditionalOptions>
</Link>
- <Lib>
- <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
- <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>
- </Lib>
<Midl>
<AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</Midl>
| {"golden_diff": "diff --git a/conans/client/generators/visualstudio.py b/conans/client/generators/visualstudio.py\n--- a/conans/client/generators/visualstudio.py\n+++ b/conans/client/generators/visualstudio.py\n@@ -18,13 +18,15 @@\n <ItemGroup />\n </Project>'''\n \n- properties_template = ''' <PropertyGroup Label=\"ConanVariables\"{condition}>\n+ properties_template = '''<PropertyGroup Label=\"ConanVariables\"{condition}>\n <ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags>\n+ <ConanLinkerFlags>{linker_flags}</ConanLinkerFlags>\n <ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions>\n <ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories>\n <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>\n <ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories>\n <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>\n+ <ConanLibraries>{libs}</ConanLibraries>\n </PropertyGroup>\n <PropertyGroup{condition}>\n <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>\n@@ -38,13 +40,9 @@\n </ClCompile>\n <Link>\n <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n- <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n- <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>\n+ <AdditionalDependencies>$(ConanLibraries)%(AdditionalDependencies)</AdditionalDependencies>\n+ <AdditionalOptions>$(ConanLinkerFlags) %(AdditionalOptions)</AdditionalOptions>\n </Link>\n- <Lib>\n- <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n- <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n- </Lib>\n <Midl>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n </Midl>\n", "issue": "[question] How to not include some libs in conanbuildinfo.props\nConan version 1.19.0\r\n\r\nI have a dependency on boost (boost/1.71.0@conan/stable).\r\n\r\nI'm using the visual_studio generator with VS2017\r\n\r\nThis adds *all* of the boost libs to conanbuildinfo.props which causes my build to fail. I don't completely understand why this used to be OK with 1.68 and now fails with that too after moving my cache to a different drive (perhaps it was due to installing vs2019 alongside vs2017). \r\n\r\nBut regardless it would be good if I could either cherrypick which libs to include or remove specific ones from the list.\r\n\r\nFor example I don't want to include any of the stacktrace libs which result in duplicate symbols.\r\n\r\nI can't figure out how to do it. If I specify any of the 'without_xxx' options then it rebuilds the whole of the boost dependency which I would rather avoid - I just want to control which libs it puts in the vs props file!\r\n\r\nI was trying to see if there was some place I could remove them from deps_cpp_info but it doesn't look like there is a place to put that (i.e. an event just before the generators are run).\r\n\r\nPerhaps overriding the generator could do the trick but it seems like that would be a bit of a sledgehammer and not all that clean.\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport re\n\nfrom conans.model import Generator\nfrom conans.paths import BUILD_INFO_VISUAL_STUDIO\nfrom conans.client.tools.files import VALID_LIB_EXTENSIONS\n\n\nclass VisualStudioGenerator(Generator):\n\n template = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ImportGroup Label=\"PropertySheets\" />\n <PropertyGroup Label=\"UserMacros\" />\n <PropertyGroup Label=\"Conan-RootDirs\">{item_properties}\n </PropertyGroup>\n {properties}\n <ItemGroup />\n</Project>'''\n\n properties_template = ''' <PropertyGroup Label=\"ConanVariables\"{condition}>\n <ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags>\n <ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions>\n <ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories>\n <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>\n <ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories>\n <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>\n </PropertyGroup>\n <PropertyGroup{condition}>\n <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>\n <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor>\n </PropertyGroup>\n <ItemDefinitionGroup{condition}>\n <ClCompile>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>\n </ClCompile>\n <Link>\n <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n <AdditionalOptions>{linker_flags} %(AdditionalOptions)</AdditionalOptions>\n </Link>\n <Lib>\n <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n <AdditionalDependencies>{libs}%(AdditionalDependencies)</AdditionalDependencies>\n </Lib>\n <Midl>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n </Midl>\n <ResourceCompile>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>\n </ResourceCompile>\n </ItemDefinitionGroup>'''\n\n item_template = '''\n <Conan-{name}-Root>{root_dir}</Conan-{name}-Root>'''\n\n def _format_items(self):\n sections = []\n for dep_name, cpp_info in self._deps_build_info.dependencies:\n fields = {\n 'root_dir': cpp_info.rootpath,\n 'name': dep_name.replace(\".\", \"-\")\n }\n section = self.item_template.format(**fields)\n sections.append(section)\n return \"\".join(sections)\n\n @property\n def filename(self):\n return BUILD_INFO_VISUAL_STUDIO\n\n def _format_properties(self, build_info, condition):\n def has_valid_ext(lib):\n ext = os.path.splitext(lib)[1]\n return ext in VALID_LIB_EXTENSIONS\n\n fields = {\n 'condition': condition,\n 'bin_dirs': \"\".join(\"%s;\" % p for p in build_info.bin_paths),\n 'res_dirs': \"\".join(\"%s;\" % p for p in build_info.res_paths),\n 'include_dirs': \"\".join(\"%s;\" % p for p in build_info.include_paths),\n 'lib_dirs': \"\".join(\"%s;\" % p for p in build_info.lib_paths),\n 'libs': \"\".join(['%s.lib;' % lib if not has_valid_ext(lib)\n else '%s;' % lib for lib in build_info.libs]),\n 'definitions': \"\".join(\"%s;\" % d for d in build_info.defines),\n 'compiler_flags': \" \".join(build_info.cxxflags + build_info.cflags),\n 'linker_flags': \" \".join(build_info.sharedlinkflags),\n 'exe_flags': \" \".join(build_info.exelinkflags)\n }\n formatted_template = self.properties_template.format(**fields)\n return formatted_template\n\n @property\n def content(self):\n per_item_props = self._format_items()\n\n properties = [self._format_properties(self._deps_build_info, condition='')]\n for config, cpp_info in self._deps_build_info.configs.items():\n condition = \" Condition=\\\"'$(Configuration)' == '%s'\\\"\" % config\n properties.append(self._format_properties(cpp_info, condition=condition))\n\n fields = {\n 'item_properties': per_item_props,\n 'properties': '\\n'.join(properties)\n }\n formatted_template = self.template.format(**fields)\n\n userprofile = os.getenv(\"USERPROFILE\")\n if userprofile:\n userprofile = userprofile.replace(\"\\\\\", \"\\\\\\\\\")\n formatted_template = re.sub(userprofile, \"$(USERPROFILE)\", formatted_template,\n flags=re.I)\n return formatted_template\n", "path": "conans/client/generators/visualstudio.py"}], "after_files": [{"content": "import os\nimport re\n\nfrom conans.model import Generator\nfrom conans.paths import BUILD_INFO_VISUAL_STUDIO\nfrom conans.client.tools.files import VALID_LIB_EXTENSIONS\n\n\nclass VisualStudioGenerator(Generator):\n\n template = '''<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project ToolsVersion=\"4.0\" xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ImportGroup Label=\"PropertySheets\" />\n <PropertyGroup Label=\"UserMacros\" />\n <PropertyGroup Label=\"Conan-RootDirs\">{item_properties}\n </PropertyGroup>\n {properties}\n <ItemGroup />\n</Project>'''\n\n properties_template = '''<PropertyGroup Label=\"ConanVariables\"{condition}>\n <ConanCompilerFlags>{compiler_flags}</ConanCompilerFlags>\n <ConanLinkerFlags>{linker_flags}</ConanLinkerFlags>\n <ConanPreprocessorDefinitions>{definitions}</ConanPreprocessorDefinitions>\n <ConanIncludeDirectories>{include_dirs}</ConanIncludeDirectories>\n <ConanResourceDirectories>{res_dirs}</ConanResourceDirectories>\n <ConanLibraryDirectories>{lib_dirs}</ConanLibraryDirectories>\n <ConanBinaryDirectories>{bin_dirs}</ConanBinaryDirectories>\n <ConanLibraries>{libs}</ConanLibraries>\n </PropertyGroup>\n <PropertyGroup{condition}>\n <LocalDebuggerEnvironment>PATH=%PATH%;{bin_dirs}</LocalDebuggerEnvironment>\n <DebuggerFlavor>WindowsLocalDebugger</DebuggerFlavor>\n </PropertyGroup>\n <ItemDefinitionGroup{condition}>\n <ClCompile>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>\n </ClCompile>\n <Link>\n <AdditionalLibraryDirectories>$(ConanLibraryDirectories)%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>\n <AdditionalDependencies>$(ConanLibraries)%(AdditionalDependencies)</AdditionalDependencies>\n <AdditionalOptions>$(ConanLinkerFlags) %(AdditionalOptions)</AdditionalOptions>\n </Link>\n <Midl>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n </Midl>\n <ResourceCompile>\n <AdditionalIncludeDirectories>$(ConanIncludeDirectories)%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\n <PreprocessorDefinitions>$(ConanPreprocessorDefinitions)%(PreprocessorDefinitions)</PreprocessorDefinitions>\n <AdditionalOptions>$(ConanCompilerFlags) %(AdditionalOptions)</AdditionalOptions>\n </ResourceCompile>\n </ItemDefinitionGroup>'''\n\n item_template = '''\n <Conan-{name}-Root>{root_dir}</Conan-{name}-Root>'''\n\n def _format_items(self):\n sections = []\n for dep_name, cpp_info in self._deps_build_info.dependencies:\n fields = {\n 'root_dir': cpp_info.rootpath,\n 'name': dep_name.replace(\".\", \"-\")\n }\n section = self.item_template.format(**fields)\n sections.append(section)\n return \"\".join(sections)\n\n @property\n def filename(self):\n return BUILD_INFO_VISUAL_STUDIO\n\n def _format_properties(self, build_info, condition):\n def has_valid_ext(lib):\n ext = os.path.splitext(lib)[1]\n return ext in VALID_LIB_EXTENSIONS\n\n fields = {\n 'condition': condition,\n 'bin_dirs': \"\".join(\"%s;\" % p for p in build_info.bin_paths),\n 'res_dirs': \"\".join(\"%s;\" % p for p in build_info.res_paths),\n 'include_dirs': \"\".join(\"%s;\" % p for p in build_info.include_paths),\n 'lib_dirs': \"\".join(\"%s;\" % p for p in build_info.lib_paths),\n 'libs': \"\".join(['%s.lib;' % lib if not has_valid_ext(lib)\n else '%s;' % lib for lib in build_info.libs]),\n 'definitions': \"\".join(\"%s;\" % d for d in build_info.defines),\n 'compiler_flags': \" \".join(build_info.cxxflags + build_info.cflags),\n 'linker_flags': \" \".join(build_info.sharedlinkflags),\n 'exe_flags': \" \".join(build_info.exelinkflags)\n }\n formatted_template = self.properties_template.format(**fields)\n return formatted_template\n\n @property\n def content(self):\n per_item_props = self._format_items()\n\n properties = [self._format_properties(self._deps_build_info, condition='')]\n for config, cpp_info in self._deps_build_info.configs.items():\n condition = \" Condition=\\\"'$(Configuration)' == '%s'\\\"\" % config\n properties.append(self._format_properties(cpp_info, condition=condition))\n\n fields = {\n 'item_properties': per_item_props,\n 'properties': '\\n'.join(properties)\n }\n formatted_template = self.template.format(**fields)\n\n userprofile = os.getenv(\"USERPROFILE\")\n if userprofile:\n userprofile = userprofile.replace(\"\\\\\", \"\\\\\\\\\")\n formatted_template = re.sub(userprofile, \"$(USERPROFILE)\", formatted_template,\n flags=re.I)\n return formatted_template\n", "path": "conans/client/generators/visualstudio.py"}]} | 1,964 | 462 |
gh_patches_debug_38446 | rasdani/github-patches | git_diff | WeblateOrg__weblate-2962 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Download translation & Weblate-specific data
Hello,
When downloading for offline translation, you get the store data but you never get Weblate-specific data such as Suggestions, Weblate Comments, or Weblate source string tags like check-length etc.
Do you think it would be appropriate to get this data at download\* so that offline translator has a level of information closer to online translation?
Thanks!
*) I guess all formats have a comment zone where all of this could be fed.
For .po, we could be a bit more specific, as Weblate comments and suggestions could qualify as translator comments (#) (rather than extracted comments #.), and for Weblate flags, maybe flags? (#,)
(Or maybe you'd envision that as a separate downloaded file??)
##
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/formats/exporters.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © 2012 - 2019 Michal Čihař <[email protected]>
4 #
5 # This file is part of Weblate <https://weblate.org/>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with this program. If not, see <https://www.gnu.org/licenses/>.
19 #
20 """Exporter using translate-toolkit"""
21 from __future__ import unicode_literals
22
23 from django.http import HttpResponse
24 from django.utils.functional import cached_property
25 from django.utils.translation import ugettext_lazy as _
26 from translate.misc.multistring import multistring
27 from translate.storage.csvl10n import csvfile
28 from translate.storage.mo import mofile
29 from translate.storage.po import pofile
30 from translate.storage.poxliff import PoXliffFile
31 from translate.storage.tbx import tbxfile
32 from translate.storage.tmx import tmxfile
33 from translate.storage.xliff import xlifffile
34
35 import weblate
36 from weblate.formats.external import XlsxFormat
37 from weblate.formats.ttkit import TTKitFormat
38 from weblate.utils.site import get_site_url
39
40 # Map to remove control chars except newlines and tabs
41 _CHARMAP = dict.fromkeys(
42 x for x in range(32) if x not in (9, 10, 13)
43 )
44
45 EXPORTERS = {}
46
47
48 def register_exporter(exporter):
49 """Register an exporter."""
50 EXPORTERS[exporter.name] = exporter
51 return exporter
52
53
54 def get_exporter(name):
55 """Return registered exporter"""
56 return EXPORTERS[name]
57
58
59 def list_exporters(translation):
60 return [
61 {'name': x.name, 'verbose': x.verbose}
62 for x in sorted(EXPORTERS.values(), key=lambda x: x.name)
63 if x.supports(translation)
64 ]
65
66
67 class BaseExporter(object):
68 content_type = 'text/plain'
69 extension = 'txt'
70 name = ''
71 verbose = ''
72 set_id = False
73
74 def __init__(self, project=None, language=None, url=None,
75 translation=None, fieldnames=None):
76 if translation is not None:
77 self.plural = translation.plural
78 self.project = translation.component.project
79 self.language = translation.language
80 self.url = get_site_url(translation.get_absolute_url())
81 else:
82 self.project = project
83 self.language = language
84 self.plural = language.plural
85 self.url = url
86 self.fieldnames = fieldnames
87
88 @staticmethod
89 def supports(translation):
90 return True
91
92 @cached_property
93 def storage(self):
94 storage = self.get_storage()
95 storage.setsourcelanguage(
96 self.project.source_language.code
97 )
98 storage.settargetlanguage(
99 self.language.code
100 )
101 return storage
102
103 def string_filter(self, text):
104 return text
105
106 def handle_plurals(self, plurals):
107 if len(plurals) == 1:
108 return self.string_filter(plurals[0])
109 return multistring(
110 [self.string_filter(plural) for plural in plurals]
111 )
112
113 def get_storage(self):
114 raise NotImplementedError()
115
116 def add(self, unit, word):
117 unit.target = word
118
119 def add_dictionary(self, word):
120 """Add dictionary word"""
121 unit = self.storage.UnitClass(self.string_filter(word.source))
122 self.add(unit, self.string_filter(word.target))
123 self.storage.addunit(unit)
124
125 def add_units(self, units):
126 for unit in units.iterator():
127 self.add_unit(unit)
128
129 def add_unit(self, unit):
130 output = self.storage.UnitClass(
131 self.handle_plurals(unit.get_source_plurals())
132 )
133 self.add(output, self.handle_plurals(unit.get_target_plurals()))
134 # Location needs to be set prior to ID to avoid overwrite
135 # on some formats (eg. xliff)
136 for location in unit.location.split():
137 if location:
138 output.addlocation(location)
139
140 # Store context as context and ID
141 context = self.string_filter(unit.context)
142 if context:
143 output.setcontext(context)
144 if self.set_id:
145 output.setid(context)
146 elif self.set_id:
147 # Use checksum based ID on formats requiring it
148 output.setid(unit.checksum)
149
150 # Store note
151 note = self.string_filter(unit.comment)
152 if note:
153 output.addnote(note, origin='developer')
154
155 # Set type comment (for Gettext)
156 if hasattr(output, 'settypecomment'):
157 for flag in unit.flags.split(','):
158 if flag:
159 output.settypecomment(flag)
160
161 # Store fuzzy flag
162 if unit.fuzzy:
163 output.markfuzzy(True)
164
165 self.storage.addunit(output)
166
167 def get_response(self, filetemplate='{project}-{language}.{extension}'):
168 filename = filetemplate.format(
169 project=self.project.slug,
170 language=self.language.code,
171 extension=self.extension
172 )
173
174 response = HttpResponse(
175 content_type='{0}; charset=utf-8'.format(self.content_type)
176 )
177 response['Content-Disposition'] = 'attachment; filename={0}'.format(
178 filename
179 )
180
181 # Save to response
182 response.write(self.serialize())
183
184 return response
185
186 def serialize(self):
187 """Return storage content"""
188 return TTKitFormat.serialize(self.storage)
189
190
191 @register_exporter
192 class PoExporter(BaseExporter):
193 name = 'po'
194 content_type = 'text/x-po'
195 extension = 'po'
196 verbose = _('gettext PO')
197 _storage = pofile
198
199 def get_storage(self):
200 store = self._storage()
201 plural = self.plural
202
203 # Set po file header
204 store.updateheader(
205 add=True,
206 language=self.language.code,
207 x_generator='Weblate {0}'.format(weblate.VERSION),
208 project_id_version='{0} ({1})'.format(
209 self.language.name, self.project.name
210 ),
211 plural_forms=plural.plural_form,
212 language_team='{0} <{1}>'.format(
213 self.language.name,
214 self.url
215 )
216 )
217 return store
218
219
220 class XMLExporter(BaseExporter):
221 """Wrapper for XML based exporters to strip control chars"""
222
223 def string_filter(self, text):
224 return text.translate(_CHARMAP)
225
226 def get_storage(self):
227 raise NotImplementedError()
228
229 def add(self, unit, word):
230 unit.settarget(word, self.language.code)
231
232
233 @register_exporter
234 class PoXliffExporter(XMLExporter):
235 name = 'xliff'
236 content_type = 'application/x-xliff+xml'
237 extension = 'xlf'
238 set_id = True
239 verbose = _('XLIFF with gettext extensions')
240
241 def get_storage(self):
242 return PoXliffFile()
243
244
245 @register_exporter
246 class XliffExporter(XMLExporter):
247 name = 'xliff11'
248 content_type = 'application/x-xliff+xml'
249 extension = 'xlf'
250 set_id = True
251 verbose = _('XLIFF 1.1')
252
253 def get_storage(self):
254 return xlifffile()
255
256
257 @register_exporter
258 class TBXExporter(XMLExporter):
259 name = 'tbx'
260 content_type = 'application/x-tbx'
261 extension = 'tbx'
262 verbose = _('TBX')
263
264 def get_storage(self):
265 return tbxfile()
266
267
268 @register_exporter
269 class TMXExporter(XMLExporter):
270 name = 'tmx'
271 content_type = 'application/x-tmx'
272 extension = 'tmx'
273 verbose = _('TMX')
274
275 def get_storage(self):
276 return tmxfile()
277
278
279 @register_exporter
280 class MoExporter(PoExporter):
281 name = 'mo'
282 content_type = 'application/x-gettext-catalog'
283 extension = 'mo'
284 verbose = _('gettext MO')
285 _storage = mofile
286
287 def __init__(self, project=None, language=None, url=None,
288 translation=None, fieldnames=None):
289 super(MoExporter, self).__init__(
290 project, language, url, translation, fieldnames
291 )
292 # Detect storage properties
293 self.monolingual = False
294 self.use_context = False
295 if translation:
296 self.monolingual = translation.component.has_template()
297 if self.monolingual:
298 unit = next(translation.store.translatable_units, None)
299 self.use_context = unit is not None and not unit.template.source
300
301 def add_unit(self, unit):
302 # We do not store not translated units
303 if not unit.translated:
304 return
305 # Parse properties from unit
306 if self.monolingual:
307 if self.use_context:
308 source = ''
309 context = unit.context
310 else:
311 source = unit.context
312 context = ''
313 else:
314 source = self.handle_plurals(unit.get_source_plurals())
315 context = unit.context
316 # Actually create the unit and set attributes
317 output = self.storage.UnitClass(source)
318 output.target = self.handle_plurals(unit.get_target_plurals())
319 if context:
320 # The setcontext doesn't work on mounit
321 output.msgctxt = [context]
322 # Add unit to the storage
323 self.storage.addunit(output)
324
325 @staticmethod
326 def supports(translation):
327 return translation.component.file_format == 'po'
328
329
330 @register_exporter
331 class CSVExporter(BaseExporter):
332 name = 'csv'
333 content_type = 'text/csv'
334 extension = 'csv'
335 verbose = _('CSV')
336
337 def get_storage(self):
338 return csvfile(fieldnames=self.fieldnames)
339
340 def string_filter(self, text):
341 """Avoid Excel interpreting text as formula.
342
343 This is really bad idea, implemented in Excel, as this change leads
344 to displaying additional ' in all other tools, but this seems to be
345 what most people have gotten used to. Hopefully these chars are not
346 widely used at first position of translatable strings, so that harm is
347 reduced.
348 """
349 if text and text[0] in ('=', '+', '-', '@', '|', '%'):
350 return "'{0}'".format(text.replace('|', '\\|'))
351 return text
352
353
354 @register_exporter
355 class XlsxExporter(BaseExporter):
356 name = 'xlsx'
357 content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
358 extension = 'xlsx'
359 verbose = _('Excel Open XML')
360
361 def get_storage(self):
362 return csvfile(fieldnames=self.fieldnames)
363
364 def serialize(self):
365 """Return storage content"""
366 return XlsxFormat.serialize(self.storage)
367
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/weblate/formats/exporters.py b/weblate/formats/exporters.py
--- a/weblate/formats/exporters.py
+++ b/weblate/formats/exporters.py
@@ -151,12 +151,23 @@
note = self.string_filter(unit.comment)
if note:
output.addnote(note, origin='developer')
+ # In Weblate context
+ note = self.string_filter(unit.source_info.context)
+ if context:
+ output.addnote(note, origin='developer')
+ # Comments
+ for comment in unit.get_comments():
+ output.addnote(comment.comment, origin='translator')
+ # Suggestions
+ for suggestion in unit.suggestions:
+ output.addnote(
+ 'Suggested in Weblate: {}'.format(suggestion.target),
+ origin='translator'
+ )
- # Set type comment (for Gettext)
- if hasattr(output, 'settypecomment'):
- for flag in unit.flags.split(','):
- if flag:
- output.settypecomment(flag)
+ # Store flags
+ if unit.all_flags:
+ self.store_flags(output, unit.all_flags)
# Store fuzzy flag
if unit.fuzzy:
@@ -187,6 +198,9 @@
"""Return storage content"""
return TTKitFormat.serialize(self.storage)
+ def store_flags(self, output, flags):
+ return
+
@register_exporter
class PoExporter(BaseExporter):
@@ -196,6 +210,10 @@
verbose = _('gettext PO')
_storage = pofile
+ def store_flags(self, output, flags):
+ for flag in flags.items():
+ output.settypecomment(flag)
+
def get_storage(self):
store = self._storage()
plural = self.plural
@@ -241,9 +259,15 @@
def get_storage(self):
return PoXliffFile()
+ def store_flags(self, output, flags):
+ if flags.has_value('max-length'):
+ output.xmlelement.set("maxwidth", str(flags.get_value('max-length')))
+
+ output.xmlelement.set("weblate-flags", flags.format())
+
@register_exporter
-class XliffExporter(XMLExporter):
+class XliffExporter(PoXliffExporter):
name = 'xliff11'
content_type = 'application/x-xliff+xml'
extension = 'xlf'
@@ -298,6 +322,9 @@
unit = next(translation.store.translatable_units, None)
self.use_context = unit is not None and not unit.template.source
+ def store_flags(self, output, flags):
+ return
+
def add_unit(self, unit):
# We do not store not translated units
if not unit.translated:
| {"golden_diff": "diff --git a/weblate/formats/exporters.py b/weblate/formats/exporters.py\n--- a/weblate/formats/exporters.py\n+++ b/weblate/formats/exporters.py\n@@ -151,12 +151,23 @@\n note = self.string_filter(unit.comment)\n if note:\n output.addnote(note, origin='developer')\n+ # In Weblate context\n+ note = self.string_filter(unit.source_info.context)\n+ if context:\n+ output.addnote(note, origin='developer')\n+ # Comments\n+ for comment in unit.get_comments():\n+ output.addnote(comment.comment, origin='translator')\n+ # Suggestions\n+ for suggestion in unit.suggestions:\n+ output.addnote(\n+ 'Suggested in Weblate: {}'.format(suggestion.target),\n+ origin='translator'\n+ )\n \n- # Set type comment (for Gettext)\n- if hasattr(output, 'settypecomment'):\n- for flag in unit.flags.split(','):\n- if flag:\n- output.settypecomment(flag)\n+ # Store flags\n+ if unit.all_flags:\n+ self.store_flags(output, unit.all_flags)\n \n # Store fuzzy flag\n if unit.fuzzy:\n@@ -187,6 +198,9 @@\n \"\"\"Return storage content\"\"\"\n return TTKitFormat.serialize(self.storage)\n \n+ def store_flags(self, output, flags):\n+ return\n+\n \n @register_exporter\n class PoExporter(BaseExporter):\n@@ -196,6 +210,10 @@\n verbose = _('gettext PO')\n _storage = pofile\n \n+ def store_flags(self, output, flags):\n+ for flag in flags.items():\n+ output.settypecomment(flag)\n+\n def get_storage(self):\n store = self._storage()\n plural = self.plural\n@@ -241,9 +259,15 @@\n def get_storage(self):\n return PoXliffFile()\n \n+ def store_flags(self, output, flags):\n+ if flags.has_value('max-length'):\n+ output.xmlelement.set(\"maxwidth\", str(flags.get_value('max-length')))\n+\n+ output.xmlelement.set(\"weblate-flags\", flags.format())\n+\n \n @register_exporter\n-class XliffExporter(XMLExporter):\n+class XliffExporter(PoXliffExporter):\n name = 'xliff11'\n content_type = 'application/x-xliff+xml'\n extension = 'xlf'\n@@ -298,6 +322,9 @@\n unit = next(translation.store.translatable_units, None)\n self.use_context = unit is not None and not unit.template.source\n \n+ def store_flags(self, output, flags):\n+ return\n+\n def add_unit(self, unit):\n # We do not store not translated units\n if not unit.translated:\n", "issue": "Download translation & Weblate-specific data\nHello,\nWhen downloading for offline translation, you get the store data but you never get Weblate-specific data such as Suggestions, Weblate Comments, or Weblate source string tags like check-length etc.\nDo you think it would be appropriate to get this data at download\\* so that offline translator has a level of information closer to online translation?\nThanks!\n\n*) I guess all formats have a comment zone where all of this could be fed. \nFor .po, we could be a bit more specific, as Weblate comments and suggestions could qualify as translator comments (#) (rather than extracted comments #.), and for Weblate flags, maybe flags? (#,)\n(Or maybe you'd envision that as a separate downloaded file??)\n## \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2019 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\"\"\"Exporter using translate-toolkit\"\"\"\nfrom __future__ import unicode_literals\n\nfrom django.http import HttpResponse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom translate.misc.multistring import multistring\nfrom translate.storage.csvl10n import csvfile\nfrom translate.storage.mo import mofile\nfrom translate.storage.po import pofile\nfrom translate.storage.poxliff import PoXliffFile\nfrom translate.storage.tbx import tbxfile\nfrom translate.storage.tmx import tmxfile\nfrom translate.storage.xliff import xlifffile\n\nimport weblate\nfrom weblate.formats.external import XlsxFormat\nfrom weblate.formats.ttkit import TTKitFormat\nfrom weblate.utils.site import get_site_url\n\n# Map to remove control chars except newlines and tabs\n_CHARMAP = dict.fromkeys(\n x for x in range(32) if x not in (9, 10, 13)\n)\n\nEXPORTERS = {}\n\n\ndef register_exporter(exporter):\n \"\"\"Register an exporter.\"\"\"\n EXPORTERS[exporter.name] = exporter\n return exporter\n\n\ndef get_exporter(name):\n \"\"\"Return registered exporter\"\"\"\n return EXPORTERS[name]\n\n\ndef list_exporters(translation):\n return [\n {'name': x.name, 'verbose': x.verbose}\n for x in sorted(EXPORTERS.values(), key=lambda x: x.name)\n if x.supports(translation)\n ]\n\n\nclass BaseExporter(object):\n content_type = 'text/plain'\n extension = 'txt'\n name = ''\n verbose = ''\n set_id = False\n\n def __init__(self, project=None, language=None, url=None,\n translation=None, fieldnames=None):\n if translation is not None:\n self.plural = translation.plural\n self.project = translation.component.project\n self.language = translation.language\n self.url = get_site_url(translation.get_absolute_url())\n else:\n self.project = project\n self.language = language\n self.plural = language.plural\n self.url = url\n self.fieldnames = fieldnames\n\n @staticmethod\n def supports(translation):\n return True\n\n @cached_property\n def storage(self):\n storage = self.get_storage()\n storage.setsourcelanguage(\n self.project.source_language.code\n )\n storage.settargetlanguage(\n self.language.code\n )\n return storage\n\n def string_filter(self, text):\n return text\n\n def handle_plurals(self, plurals):\n if len(plurals) == 1:\n return self.string_filter(plurals[0])\n return multistring(\n [self.string_filter(plural) for plural in plurals]\n )\n\n def get_storage(self):\n raise NotImplementedError()\n\n def add(self, unit, word):\n unit.target = word\n\n def add_dictionary(self, word):\n \"\"\"Add dictionary word\"\"\"\n unit = self.storage.UnitClass(self.string_filter(word.source))\n self.add(unit, self.string_filter(word.target))\n self.storage.addunit(unit)\n\n def add_units(self, units):\n for unit in units.iterator():\n self.add_unit(unit)\n\n def add_unit(self, unit):\n output = self.storage.UnitClass(\n self.handle_plurals(unit.get_source_plurals())\n )\n self.add(output, self.handle_plurals(unit.get_target_plurals()))\n # Location needs to be set prior to ID to avoid overwrite\n # on some formats (eg. xliff)\n for location in unit.location.split():\n if location:\n output.addlocation(location)\n\n # Store context as context and ID\n context = self.string_filter(unit.context)\n if context:\n output.setcontext(context)\n if self.set_id:\n output.setid(context)\n elif self.set_id:\n # Use checksum based ID on formats requiring it\n output.setid(unit.checksum)\n\n # Store note\n note = self.string_filter(unit.comment)\n if note:\n output.addnote(note, origin='developer')\n\n # Set type comment (for Gettext)\n if hasattr(output, 'settypecomment'):\n for flag in unit.flags.split(','):\n if flag:\n output.settypecomment(flag)\n\n # Store fuzzy flag\n if unit.fuzzy:\n output.markfuzzy(True)\n\n self.storage.addunit(output)\n\n def get_response(self, filetemplate='{project}-{language}.{extension}'):\n filename = filetemplate.format(\n project=self.project.slug,\n language=self.language.code,\n extension=self.extension\n )\n\n response = HttpResponse(\n content_type='{0}; charset=utf-8'.format(self.content_type)\n )\n response['Content-Disposition'] = 'attachment; filename={0}'.format(\n filename\n )\n\n # Save to response\n response.write(self.serialize())\n\n return response\n\n def serialize(self):\n \"\"\"Return storage content\"\"\"\n return TTKitFormat.serialize(self.storage)\n\n\n@register_exporter\nclass PoExporter(BaseExporter):\n name = 'po'\n content_type = 'text/x-po'\n extension = 'po'\n verbose = _('gettext PO')\n _storage = pofile\n\n def get_storage(self):\n store = self._storage()\n plural = self.plural\n\n # Set po file header\n store.updateheader(\n add=True,\n language=self.language.code,\n x_generator='Weblate {0}'.format(weblate.VERSION),\n project_id_version='{0} ({1})'.format(\n self.language.name, self.project.name\n ),\n plural_forms=plural.plural_form,\n language_team='{0} <{1}>'.format(\n self.language.name,\n self.url\n )\n )\n return store\n\n\nclass XMLExporter(BaseExporter):\n \"\"\"Wrapper for XML based exporters to strip control chars\"\"\"\n\n def string_filter(self, text):\n return text.translate(_CHARMAP)\n\n def get_storage(self):\n raise NotImplementedError()\n\n def add(self, unit, word):\n unit.settarget(word, self.language.code)\n\n\n@register_exporter\nclass PoXliffExporter(XMLExporter):\n name = 'xliff'\n content_type = 'application/x-xliff+xml'\n extension = 'xlf'\n set_id = True\n verbose = _('XLIFF with gettext extensions')\n\n def get_storage(self):\n return PoXliffFile()\n\n\n@register_exporter\nclass XliffExporter(XMLExporter):\n name = 'xliff11'\n content_type = 'application/x-xliff+xml'\n extension = 'xlf'\n set_id = True\n verbose = _('XLIFF 1.1')\n\n def get_storage(self):\n return xlifffile()\n\n\n@register_exporter\nclass TBXExporter(XMLExporter):\n name = 'tbx'\n content_type = 'application/x-tbx'\n extension = 'tbx'\n verbose = _('TBX')\n\n def get_storage(self):\n return tbxfile()\n\n\n@register_exporter\nclass TMXExporter(XMLExporter):\n name = 'tmx'\n content_type = 'application/x-tmx'\n extension = 'tmx'\n verbose = _('TMX')\n\n def get_storage(self):\n return tmxfile()\n\n\n@register_exporter\nclass MoExporter(PoExporter):\n name = 'mo'\n content_type = 'application/x-gettext-catalog'\n extension = 'mo'\n verbose = _('gettext MO')\n _storage = mofile\n\n def __init__(self, project=None, language=None, url=None,\n translation=None, fieldnames=None):\n super(MoExporter, self).__init__(\n project, language, url, translation, fieldnames\n )\n # Detect storage properties\n self.monolingual = False\n self.use_context = False\n if translation:\n self.monolingual = translation.component.has_template()\n if self.monolingual:\n unit = next(translation.store.translatable_units, None)\n self.use_context = unit is not None and not unit.template.source\n\n def add_unit(self, unit):\n # We do not store not translated units\n if not unit.translated:\n return\n # Parse properties from unit\n if self.monolingual:\n if self.use_context:\n source = ''\n context = unit.context\n else:\n source = unit.context\n context = ''\n else:\n source = self.handle_plurals(unit.get_source_plurals())\n context = unit.context\n # Actually create the unit and set attributes\n output = self.storage.UnitClass(source)\n output.target = self.handle_plurals(unit.get_target_plurals())\n if context:\n # The setcontext doesn't work on mounit\n output.msgctxt = [context]\n # Add unit to the storage\n self.storage.addunit(output)\n\n @staticmethod\n def supports(translation):\n return translation.component.file_format == 'po'\n\n\n@register_exporter\nclass CSVExporter(BaseExporter):\n name = 'csv'\n content_type = 'text/csv'\n extension = 'csv'\n verbose = _('CSV')\n\n def get_storage(self):\n return csvfile(fieldnames=self.fieldnames)\n\n def string_filter(self, text):\n \"\"\"Avoid Excel interpreting text as formula.\n\n This is really bad idea, implemented in Excel, as this change leads\n to displaying additional ' in all other tools, but this seems to be\n what most people have gotten used to. Hopefully these chars are not\n widely used at first position of translatable strings, so that harm is\n reduced.\n \"\"\"\n if text and text[0] in ('=', '+', '-', '@', '|', '%'):\n return \"'{0}'\".format(text.replace('|', '\\\\|'))\n return text\n\n\n@register_exporter\nclass XlsxExporter(BaseExporter):\n name = 'xlsx'\n content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n extension = 'xlsx'\n verbose = _('Excel Open XML')\n\n def get_storage(self):\n return csvfile(fieldnames=self.fieldnames)\n\n def serialize(self):\n \"\"\"Return storage content\"\"\"\n return XlsxFormat.serialize(self.storage)\n", "path": "weblate/formats/exporters.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 2012 - 2019 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\"\"\"Exporter using translate-toolkit\"\"\"\nfrom __future__ import unicode_literals\n\nfrom django.http import HttpResponse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom translate.misc.multistring import multistring\nfrom translate.storage.csvl10n import csvfile\nfrom translate.storage.mo import mofile\nfrom translate.storage.po import pofile\nfrom translate.storage.poxliff import PoXliffFile\nfrom translate.storage.tbx import tbxfile\nfrom translate.storage.tmx import tmxfile\nfrom translate.storage.xliff import xlifffile\n\nimport weblate\nfrom weblate.formats.external import XlsxFormat\nfrom weblate.formats.ttkit import TTKitFormat\nfrom weblate.utils.site import get_site_url\n\n# Map to remove control chars except newlines and tabs\n_CHARMAP = dict.fromkeys(\n x for x in range(32) if x not in (9, 10, 13)\n)\n\nEXPORTERS = {}\n\n\ndef register_exporter(exporter):\n \"\"\"Register an exporter.\"\"\"\n EXPORTERS[exporter.name] = exporter\n return exporter\n\n\ndef get_exporter(name):\n \"\"\"Return registered exporter\"\"\"\n return EXPORTERS[name]\n\n\ndef list_exporters(translation):\n return [\n {'name': x.name, 'verbose': x.verbose}\n for x in sorted(EXPORTERS.values(), key=lambda x: x.name)\n if x.supports(translation)\n ]\n\n\nclass BaseExporter(object):\n content_type = 'text/plain'\n extension = 'txt'\n name = ''\n verbose = ''\n set_id = False\n\n def __init__(self, project=None, language=None, url=None,\n translation=None, fieldnames=None):\n if translation is not None:\n self.plural = translation.plural\n self.project = translation.component.project\n self.language = translation.language\n self.url = get_site_url(translation.get_absolute_url())\n else:\n self.project = project\n self.language = language\n self.plural = language.plural\n self.url = url\n self.fieldnames = fieldnames\n\n @staticmethod\n def supports(translation):\n return True\n\n @cached_property\n def storage(self):\n storage = self.get_storage()\n storage.setsourcelanguage(\n self.project.source_language.code\n )\n storage.settargetlanguage(\n self.language.code\n )\n return storage\n\n def string_filter(self, text):\n return text\n\n def handle_plurals(self, plurals):\n if len(plurals) == 1:\n return self.string_filter(plurals[0])\n return multistring(\n [self.string_filter(plural) for plural in plurals]\n )\n\n def get_storage(self):\n raise NotImplementedError()\n\n def add(self, unit, word):\n unit.target = word\n\n def add_dictionary(self, word):\n \"\"\"Add dictionary word\"\"\"\n unit = self.storage.UnitClass(self.string_filter(word.source))\n self.add(unit, self.string_filter(word.target))\n self.storage.addunit(unit)\n\n def add_units(self, units):\n for unit in units.iterator():\n self.add_unit(unit)\n\n def add_unit(self, unit):\n output = self.storage.UnitClass(\n self.handle_plurals(unit.get_source_plurals())\n )\n self.add(output, self.handle_plurals(unit.get_target_plurals()))\n # Location needs to be set prior to ID to avoid overwrite\n # on some formats (eg. xliff)\n for location in unit.location.split():\n if location:\n output.addlocation(location)\n\n # Store context as context and ID\n context = self.string_filter(unit.context)\n if context:\n output.setcontext(context)\n if self.set_id:\n output.setid(context)\n elif self.set_id:\n # Use checksum based ID on formats requiring it\n output.setid(unit.checksum)\n\n # Store note\n note = self.string_filter(unit.comment)\n if note:\n output.addnote(note, origin='developer')\n # In Weblate context\n note = self.string_filter(unit.source_info.context)\n if context:\n output.addnote(note, origin='developer')\n # Comments\n for comment in unit.get_comments():\n output.addnote(comment.comment, origin='translator')\n # Suggestions\n for suggestion in unit.suggestions:\n output.addnote(\n 'Suggested in Weblate: {}'.format(suggestion.target),\n origin='translator'\n )\n\n # Store flags\n if unit.all_flags:\n self.store_flags(output, unit.all_flags)\n\n # Store fuzzy flag\n if unit.fuzzy:\n output.markfuzzy(True)\n\n self.storage.addunit(output)\n\n def get_response(self, filetemplate='{project}-{language}.{extension}'):\n filename = filetemplate.format(\n project=self.project.slug,\n language=self.language.code,\n extension=self.extension\n )\n\n response = HttpResponse(\n content_type='{0}; charset=utf-8'.format(self.content_type)\n )\n response['Content-Disposition'] = 'attachment; filename={0}'.format(\n filename\n )\n\n # Save to response\n response.write(self.serialize())\n\n return response\n\n def serialize(self):\n \"\"\"Return storage content\"\"\"\n return TTKitFormat.serialize(self.storage)\n\n def store_flags(self, output, flags):\n return\n\n\n@register_exporter\nclass PoExporter(BaseExporter):\n name = 'po'\n content_type = 'text/x-po'\n extension = 'po'\n verbose = _('gettext PO')\n _storage = pofile\n\n def store_flags(self, output, flags):\n for flag in flags.items():\n output.settypecomment(flag)\n\n def get_storage(self):\n store = self._storage()\n plural = self.plural\n\n # Set po file header\n store.updateheader(\n add=True,\n language=self.language.code,\n x_generator='Weblate {0}'.format(weblate.VERSION),\n project_id_version='{0} ({1})'.format(\n self.language.name, self.project.name\n ),\n plural_forms=plural.plural_form,\n language_team='{0} <{1}>'.format(\n self.language.name,\n self.url\n )\n )\n return store\n\n\nclass XMLExporter(BaseExporter):\n \"\"\"Wrapper for XML based exporters to strip control chars\"\"\"\n\n def string_filter(self, text):\n return text.translate(_CHARMAP)\n\n def get_storage(self):\n raise NotImplementedError()\n\n def add(self, unit, word):\n unit.settarget(word, self.language.code)\n\n\n@register_exporter\nclass PoXliffExporter(XMLExporter):\n name = 'xliff'\n content_type = 'application/x-xliff+xml'\n extension = 'xlf'\n set_id = True\n verbose = _('XLIFF with gettext extensions')\n\n def get_storage(self):\n return PoXliffFile()\n\n def store_flags(self, output, flags):\n if flags.has_value('max-length'):\n output.xmlelement.set(\"maxwidth\", str(flags.get_value('max-length')))\n\n output.xmlelement.set(\"weblate-flags\", flags.format())\n\n\n@register_exporter\nclass XliffExporter(PoXliffExporter):\n name = 'xliff11'\n content_type = 'application/x-xliff+xml'\n extension = 'xlf'\n set_id = True\n verbose = _('XLIFF 1.1')\n\n def get_storage(self):\n return xlifffile()\n\n\n@register_exporter\nclass TBXExporter(XMLExporter):\n name = 'tbx'\n content_type = 'application/x-tbx'\n extension = 'tbx'\n verbose = _('TBX')\n\n def get_storage(self):\n return tbxfile()\n\n\n@register_exporter\nclass TMXExporter(XMLExporter):\n name = 'tmx'\n content_type = 'application/x-tmx'\n extension = 'tmx'\n verbose = _('TMX')\n\n def get_storage(self):\n return tmxfile()\n\n\n@register_exporter\nclass MoExporter(PoExporter):\n name = 'mo'\n content_type = 'application/x-gettext-catalog'\n extension = 'mo'\n verbose = _('gettext MO')\n _storage = mofile\n\n def __init__(self, project=None, language=None, url=None,\n translation=None, fieldnames=None):\n super(MoExporter, self).__init__(\n project, language, url, translation, fieldnames\n )\n # Detect storage properties\n self.monolingual = False\n self.use_context = False\n if translation:\n self.monolingual = translation.component.has_template()\n if self.monolingual:\n unit = next(translation.store.translatable_units, None)\n self.use_context = unit is not None and not unit.template.source\n\n def store_flags(self, output, flags):\n return\n\n def add_unit(self, unit):\n # We do not store not translated units\n if not unit.translated:\n return\n # Parse properties from unit\n if self.monolingual:\n if self.use_context:\n source = ''\n context = unit.context\n else:\n source = unit.context\n context = ''\n else:\n source = self.handle_plurals(unit.get_source_plurals())\n context = unit.context\n # Actually create the unit and set attributes\n output = self.storage.UnitClass(source)\n output.target = self.handle_plurals(unit.get_target_plurals())\n if context:\n # The setcontext doesn't work on mounit\n output.msgctxt = [context]\n # Add unit to the storage\n self.storage.addunit(output)\n\n @staticmethod\n def supports(translation):\n return translation.component.file_format == 'po'\n\n\n@register_exporter\nclass CSVExporter(BaseExporter):\n name = 'csv'\n content_type = 'text/csv'\n extension = 'csv'\n verbose = _('CSV')\n\n def get_storage(self):\n return csvfile(fieldnames=self.fieldnames)\n\n def string_filter(self, text):\n \"\"\"Avoid Excel interpreting text as formula.\n\n This is really bad idea, implemented in Excel, as this change leads\n to displaying additional ' in all other tools, but this seems to be\n what most people have gotten used to. Hopefully these chars are not\n widely used at first position of translatable strings, so that harm is\n reduced.\n \"\"\"\n if text and text[0] in ('=', '+', '-', '@', '|', '%'):\n return \"'{0}'\".format(text.replace('|', '\\\\|'))\n return text\n\n\n@register_exporter\nclass XlsxExporter(BaseExporter):\n name = 'xlsx'\n content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n extension = 'xlsx'\n verbose = _('Excel Open XML')\n\n def get_storage(self):\n return csvfile(fieldnames=self.fieldnames)\n\n def serialize(self):\n \"\"\"Return storage content\"\"\"\n return XlsxFormat.serialize(self.storage)\n", "path": "weblate/formats/exporters.py"}]} | 3,829 | 637 |
gh_patches_debug_32036 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-851 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve errors logging
Right now errors are being caught by GraphQL core and sent into the `errors` fields of the GraphQL response. Unfortunately that makes things harder to debug locally; we should print better exceptions locally, by using one of the following options:
```python
def process_result(
self, request: HttpRequest, result: ExecutionResult
) -> GraphQLHTTPResponse:
data: GraphQLHTTPResponse = {"data": result.data}
for error in result.errors:
actual_error = error.original_error or error
traceback.print_exception(
type(actual_error), actual_error, actual_error.__traceback__
)
return data
```
```python
def process_result(self, request, result):
data = super().process_result(request, result)
# Log errors
if result.errors:
for error in result.errors:
try:
if isinstance(error, GraphQLError) and error.original_error:
raise error.original_error
else:
raise error
except Exception as error:
logger.exception(error)
return data
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/schema/schema.py`
Content:
```
1 from typing import Any, Dict, List, Optional, Sequence, Type, Union
2
3 from graphql import (
4 ExecutionContext as GraphQLExecutionContext,
5 GraphQLSchema,
6 get_introspection_query,
7 parse,
8 validate_schema,
9 )
10 from graphql.subscription import subscribe
11 from graphql.type.directives import specified_directives
12
13 from strawberry.custom_scalar import ScalarDefinition
14 from strawberry.enum import EnumDefinition
15 from strawberry.extensions import Extension
16 from strawberry.schema.schema_converter import GraphQLCoreConverter
17 from strawberry.types import ExecutionResult
18 from strawberry.types.types import TypeDefinition
19 from strawberry.union import StrawberryUnion
20
21 from ..middleware import DirectivesMiddleware, Middleware
22 from ..printer import print_schema
23 from .execute import execute, execute_sync
24
25
26 class Schema:
27 def __init__(
28 self,
29 # TODO: can we make sure we only allow to pass something that has been decorated?
30 query: Type,
31 mutation: Optional[Type] = None,
32 subscription: Optional[Type] = None,
33 directives=(),
34 types=(),
35 extensions: Sequence[Type[Extension]] = (),
36 execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,
37 ):
38 self.extensions = extensions
39 self.execution_context_class = execution_context_class
40 self.schema_converter = GraphQLCoreConverter()
41
42 query_type = self.schema_converter.from_object_type(query)
43 mutation_type = (
44 self.schema_converter.from_object_type(mutation) if mutation else None
45 )
46 subscription_type = (
47 self.schema_converter.from_object_type(subscription)
48 if subscription
49 else None
50 )
51
52 self.middleware: List[Middleware] = [DirectivesMiddleware(directives)]
53
54 directives = [
55 self.schema_converter.from_directive(directive.directive_definition)
56 for directive in directives
57 ]
58
59 self._schema = GraphQLSchema(
60 query=query_type,
61 mutation=mutation_type,
62 subscription=subscription_type if subscription else None,
63 directives=specified_directives + directives,
64 types=list(map(self.schema_converter.from_object_type, types)),
65 )
66
67 # Validate schema early because we want developers to know about
68 # possible issues as soon as possible
69 errors = validate_schema(self._schema)
70 if errors:
71 formatted_errors = "\n\n".join(f"❌ {error.message}" for error in errors)
72 raise ValueError(f"Invalid Schema. Errors:\n\n{formatted_errors}")
73
74 self.query = self.schema_converter.type_map[query_type.name]
75
76 def get_type_by_name(
77 self, name: str
78 ) -> Optional[
79 Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]
80 ]:
81 if name in self.schema_converter.type_map:
82 return self.schema_converter.type_map[name].definition
83
84 return None
85
86 async def execute(
87 self,
88 query: str,
89 variable_values: Optional[Dict[str, Any]] = None,
90 context_value: Optional[Any] = None,
91 root_value: Optional[Any] = None,
92 operation_name: Optional[str] = None,
93 validate_queries: bool = True,
94 ) -> ExecutionResult:
95 result = await execute(
96 self._schema,
97 query,
98 variable_values=variable_values,
99 root_value=root_value,
100 context_value=context_value,
101 operation_name=operation_name,
102 additional_middlewares=self.middleware,
103 extensions=self.extensions,
104 execution_context_class=self.execution_context_class,
105 validate_queries=validate_queries,
106 )
107
108 return ExecutionResult(
109 data=result.data,
110 errors=result.errors,
111 extensions=result.extensions,
112 )
113
114 def execute_sync(
115 self,
116 query: str,
117 variable_values: Optional[Dict[str, Any]] = None,
118 context_value: Optional[Any] = None,
119 root_value: Optional[Any] = None,
120 operation_name: Optional[str] = None,
121 validate_queries: bool = True,
122 ) -> ExecutionResult:
123 result = execute_sync(
124 self._schema,
125 query,
126 variable_values=variable_values,
127 root_value=root_value,
128 context_value=context_value,
129 operation_name=operation_name,
130 additional_middlewares=self.middleware,
131 extensions=self.extensions,
132 execution_context_class=self.execution_context_class,
133 validate_queries=validate_queries,
134 )
135
136 return ExecutionResult(
137 data=result.data,
138 errors=result.errors,
139 extensions=result.extensions,
140 )
141
142 async def subscribe(
143 self,
144 query: str,
145 variable_values: Optional[Dict[str, Any]] = None,
146 context_value: Optional[Any] = None,
147 root_value: Optional[Any] = None,
148 operation_name: Optional[str] = None,
149 ):
150 return await subscribe(
151 self._schema,
152 parse(query),
153 root_value=root_value,
154 context_value=context_value,
155 variable_values=variable_values,
156 operation_name=operation_name,
157 )
158
159 def as_str(self) -> str:
160 return print_schema(self)
161
162 __str__ = as_str
163
164 def introspect(self) -> Dict[str, Any]:
165 """Return the introspection query result for the current schema
166
167 Raises:
168 ValueError: If the introspection query fails due to an invalid schema
169 """
170 introspection = self.execute_sync(get_introspection_query())
171 if introspection.errors or not introspection.data:
172 raise ValueError(f"Invalid Schema. Errors {introspection.errors!r}")
173
174 return introspection.data
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py
--- a/strawberry/schema/schema.py
+++ b/strawberry/schema/schema.py
@@ -1,3 +1,4 @@
+import logging
from typing import Any, Dict, List, Optional, Sequence, Type, Union
from graphql import (
@@ -7,6 +8,7 @@
parse,
validate_schema,
)
+from graphql.error import GraphQLError
from graphql.subscription import subscribe
from graphql.type.directives import specified_directives
@@ -23,6 +25,9 @@
from .execute import execute, execute_sync
+logger = logging.getLogger("strawberry.execution")
+
+
class Schema:
def __init__(
self,
@@ -83,6 +88,11 @@
return None
+ def process_errors(self, errors: List[GraphQLError]) -> None:
+ for error in errors:
+ actual_error = error.original_error or error
+ logger.error(actual_error, exc_info=actual_error)
+
async def execute(
self,
query: str,
@@ -105,6 +115,9 @@
validate_queries=validate_queries,
)
+ if result.errors:
+ self.process_errors(result.errors)
+
return ExecutionResult(
data=result.data,
errors=result.errors,
@@ -133,6 +146,9 @@
validate_queries=validate_queries,
)
+ if result.errors:
+ self.process_errors(result.errors)
+
return ExecutionResult(
data=result.data,
errors=result.errors,
| {"golden_diff": "diff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py\n--- a/strawberry/schema/schema.py\n+++ b/strawberry/schema/schema.py\n@@ -1,3 +1,4 @@\n+import logging\n from typing import Any, Dict, List, Optional, Sequence, Type, Union\n \n from graphql import (\n@@ -7,6 +8,7 @@\n parse,\n validate_schema,\n )\n+from graphql.error import GraphQLError\n from graphql.subscription import subscribe\n from graphql.type.directives import specified_directives\n \n@@ -23,6 +25,9 @@\n from .execute import execute, execute_sync\n \n \n+logger = logging.getLogger(\"strawberry.execution\")\n+\n+\n class Schema:\n def __init__(\n self,\n@@ -83,6 +88,11 @@\n \n return None\n \n+ def process_errors(self, errors: List[GraphQLError]) -> None:\n+ for error in errors:\n+ actual_error = error.original_error or error\n+ logger.error(actual_error, exc_info=actual_error)\n+\n async def execute(\n self,\n query: str,\n@@ -105,6 +115,9 @@\n validate_queries=validate_queries,\n )\n \n+ if result.errors:\n+ self.process_errors(result.errors)\n+\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n@@ -133,6 +146,9 @@\n validate_queries=validate_queries,\n )\n \n+ if result.errors:\n+ self.process_errors(result.errors)\n+\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n", "issue": "Improve errors logging\nRight now errors are being caught by GraphQL core and sent into the `errors` fields of the GraphQL response. Unfortunately that makes things harder to debug locally; we should print better exceptions locally, by using one of the following options:\r\n\r\n```python\r\n def process_result(\r\n self, request: HttpRequest, result: ExecutionResult\r\n ) -> GraphQLHTTPResponse:\r\n data: GraphQLHTTPResponse = {\"data\": result.data}\r\n\r\n for error in result.errors:\r\n actual_error = error.original_error or error\r\n\r\n traceback.print_exception(\r\n type(actual_error), actual_error, actual_error.__traceback__\r\n )\r\n\r\n return data\r\n```\r\n\r\n```python\r\n def process_result(self, request, result):\r\n data = super().process_result(request, result)\r\n\r\n # Log errors\r\n if result.errors:\r\n for error in result.errors:\r\n try:\r\n if isinstance(error, GraphQLError) and error.original_error:\r\n raise error.original_error\r\n else:\r\n raise error\r\n except Exception as error:\r\n logger.exception(error)\r\n\r\n return data\r\n```\n", "before_files": [{"content": "from typing import Any, Dict, List, Optional, Sequence, Type, Union\n\nfrom graphql import (\n ExecutionContext as GraphQLExecutionContext,\n GraphQLSchema,\n get_introspection_query,\n parse,\n validate_schema,\n)\nfrom graphql.subscription import subscribe\nfrom graphql.type.directives import specified_directives\n\nfrom strawberry.custom_scalar import ScalarDefinition\nfrom strawberry.enum import EnumDefinition\nfrom strawberry.extensions import Extension\nfrom strawberry.schema.schema_converter import GraphQLCoreConverter\nfrom strawberry.types import ExecutionResult\nfrom strawberry.types.types import TypeDefinition\nfrom strawberry.union import StrawberryUnion\n\nfrom ..middleware import DirectivesMiddleware, Middleware\nfrom ..printer import print_schema\nfrom .execute import execute, execute_sync\n\n\nclass Schema:\n def __init__(\n self,\n # TODO: can we make sure we only allow to pass something that has been decorated?\n query: Type,\n mutation: Optional[Type] = None,\n subscription: Optional[Type] = None,\n directives=(),\n types=(),\n extensions: Sequence[Type[Extension]] = (),\n execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,\n ):\n self.extensions = extensions\n self.execution_context_class = execution_context_class\n self.schema_converter = GraphQLCoreConverter()\n\n query_type = self.schema_converter.from_object_type(query)\n mutation_type = (\n self.schema_converter.from_object_type(mutation) if mutation else None\n )\n subscription_type = (\n self.schema_converter.from_object_type(subscription)\n if subscription\n else None\n )\n\n self.middleware: List[Middleware] = [DirectivesMiddleware(directives)]\n\n directives = [\n self.schema_converter.from_directive(directive.directive_definition)\n for directive in directives\n ]\n\n self._schema = GraphQLSchema(\n query=query_type,\n mutation=mutation_type,\n subscription=subscription_type if subscription else None,\n directives=specified_directives + directives,\n types=list(map(self.schema_converter.from_object_type, types)),\n )\n\n # Validate schema early because we want developers to know about\n # possible issues as soon as possible\n errors = validate_schema(self._schema)\n if errors:\n formatted_errors = \"\\n\\n\".join(f\"\u274c {error.message}\" for error in errors)\n raise ValueError(f\"Invalid Schema. Errors:\\n\\n{formatted_errors}\")\n\n self.query = self.schema_converter.type_map[query_type.name]\n\n def get_type_by_name(\n self, name: str\n ) -> Optional[\n Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]\n ]:\n if name in self.schema_converter.type_map:\n return self.schema_converter.type_map[name].definition\n\n return None\n\n async def execute(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n validate_queries: bool = True,\n ) -> ExecutionResult:\n result = await execute(\n self._schema,\n query,\n variable_values=variable_values,\n root_value=root_value,\n context_value=context_value,\n operation_name=operation_name,\n additional_middlewares=self.middleware,\n extensions=self.extensions,\n execution_context_class=self.execution_context_class,\n validate_queries=validate_queries,\n )\n\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n extensions=result.extensions,\n )\n\n def execute_sync(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n validate_queries: bool = True,\n ) -> ExecutionResult:\n result = execute_sync(\n self._schema,\n query,\n variable_values=variable_values,\n root_value=root_value,\n context_value=context_value,\n operation_name=operation_name,\n additional_middlewares=self.middleware,\n extensions=self.extensions,\n execution_context_class=self.execution_context_class,\n validate_queries=validate_queries,\n )\n\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n extensions=result.extensions,\n )\n\n async def subscribe(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n ):\n return await subscribe(\n self._schema,\n parse(query),\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n\n def as_str(self) -> str:\n return print_schema(self)\n\n __str__ = as_str\n\n def introspect(self) -> Dict[str, Any]:\n \"\"\"Return the introspection query result for the current schema\n\n Raises:\n ValueError: If the introspection query fails due to an invalid schema\n \"\"\"\n introspection = self.execute_sync(get_introspection_query())\n if introspection.errors or not introspection.data:\n raise ValueError(f\"Invalid Schema. Errors {introspection.errors!r}\")\n\n return introspection.data\n", "path": "strawberry/schema/schema.py"}], "after_files": [{"content": "import logging\nfrom typing import Any, Dict, List, Optional, Sequence, Type, Union\n\nfrom graphql import (\n ExecutionContext as GraphQLExecutionContext,\n GraphQLSchema,\n get_introspection_query,\n parse,\n validate_schema,\n)\nfrom graphql.error import GraphQLError\nfrom graphql.subscription import subscribe\nfrom graphql.type.directives import specified_directives\n\nfrom strawberry.custom_scalar import ScalarDefinition\nfrom strawberry.enum import EnumDefinition\nfrom strawberry.extensions import Extension\nfrom strawberry.schema.schema_converter import GraphQLCoreConverter\nfrom strawberry.types import ExecutionResult\nfrom strawberry.types.types import TypeDefinition\nfrom strawberry.union import StrawberryUnion\n\nfrom ..middleware import DirectivesMiddleware, Middleware\nfrom ..printer import print_schema\nfrom .execute import execute, execute_sync\n\n\nlogger = logging.getLogger(\"strawberry.execution\")\n\n\nclass Schema:\n def __init__(\n self,\n # TODO: can we make sure we only allow to pass something that has been decorated?\n query: Type,\n mutation: Optional[Type] = None,\n subscription: Optional[Type] = None,\n directives=(),\n types=(),\n extensions: Sequence[Type[Extension]] = (),\n execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,\n ):\n self.extensions = extensions\n self.execution_context_class = execution_context_class\n self.schema_converter = GraphQLCoreConverter()\n\n query_type = self.schema_converter.from_object_type(query)\n mutation_type = (\n self.schema_converter.from_object_type(mutation) if mutation else None\n )\n subscription_type = (\n self.schema_converter.from_object_type(subscription)\n if subscription\n else None\n )\n\n self.middleware: List[Middleware] = [DirectivesMiddleware(directives)]\n\n directives = [\n self.schema_converter.from_directive(directive.directive_definition)\n for directive in directives\n ]\n\n self._schema = GraphQLSchema(\n query=query_type,\n mutation=mutation_type,\n subscription=subscription_type if subscription else None,\n directives=specified_directives + directives,\n types=list(map(self.schema_converter.from_object_type, types)),\n )\n\n # Validate schema early because we want developers to know about\n # possible issues as soon as possible\n errors = validate_schema(self._schema)\n if errors:\n formatted_errors = \"\\n\\n\".join(f\"\u274c {error.message}\" for error in errors)\n raise ValueError(f\"Invalid Schema. Errors:\\n\\n{formatted_errors}\")\n\n self.query = self.schema_converter.type_map[query_type.name]\n\n def get_type_by_name(\n self, name: str\n ) -> Optional[\n Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]\n ]:\n if name in self.schema_converter.type_map:\n return self.schema_converter.type_map[name].definition\n\n return None\n\n def process_errors(self, errors: List[GraphQLError]) -> None:\n for error in errors:\n actual_error = error.original_error or error\n logger.error(actual_error, exc_info=actual_error)\n\n async def execute(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n validate_queries: bool = True,\n ) -> ExecutionResult:\n result = await execute(\n self._schema,\n query,\n variable_values=variable_values,\n root_value=root_value,\n context_value=context_value,\n operation_name=operation_name,\n additional_middlewares=self.middleware,\n extensions=self.extensions,\n execution_context_class=self.execution_context_class,\n validate_queries=validate_queries,\n )\n\n if result.errors:\n self.process_errors(result.errors)\n\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n extensions=result.extensions,\n )\n\n def execute_sync(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n validate_queries: bool = True,\n ) -> ExecutionResult:\n result = execute_sync(\n self._schema,\n query,\n variable_values=variable_values,\n root_value=root_value,\n context_value=context_value,\n operation_name=operation_name,\n additional_middlewares=self.middleware,\n extensions=self.extensions,\n execution_context_class=self.execution_context_class,\n validate_queries=validate_queries,\n )\n\n if result.errors:\n self.process_errors(result.errors)\n\n return ExecutionResult(\n data=result.data,\n errors=result.errors,\n extensions=result.extensions,\n )\n\n async def subscribe(\n self,\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n ):\n return await subscribe(\n self._schema,\n parse(query),\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n\n def as_str(self) -> str:\n return print_schema(self)\n\n __str__ = as_str\n\n def introspect(self) -> Dict[str, Any]:\n \"\"\"Return the introspection query result for the current schema\n\n Raises:\n ValueError: If the introspection query fails due to an invalid schema\n \"\"\"\n introspection = self.execute_sync(get_introspection_query())\n if introspection.errors or not introspection.data:\n raise ValueError(f\"Invalid Schema. Errors {introspection.errors!r}\")\n\n return introspection.data\n", "path": "strawberry/schema/schema.py"}]} | 2,023 | 353 |
gh_patches_debug_7799 | rasdani/github-patches | git_diff | e2nIEE__pandapower-1329 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'plot_gens' unexpected in simple_plot()
It seems that `plot_gens` is an argument in `simple_plot()`, but is not documented. When attempting to set `plot_gens` when calling `simple_plot()`, I get the following error:
> TypeError: simple_plot() got an unexpected keyword argument 'plot_gens'
Version: 2.7.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandapower/plotting/simple_plot.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
4 # and Energy System Technology (IEE), Kassel. All rights reserved.
5
6
7 import matplotlib.pyplot as plt
8
9 from pandapower.plotting.plotting_toolbox import get_collection_sizes
10 from pandapower.plotting.collections import create_bus_collection, create_line_collection, \
11 create_trafo_collection, create_trafo3w_collection, \
12 create_line_switch_collection, draw_collections, create_bus_bus_switch_collection, create_sgen_collection, \
13 create_gen_collection, create_load_collection
14 from pandapower.plotting.generic_geodata import create_generic_coordinates
15
16 try:
17 import pplog as logging
18 except ImportError:
19 import logging
20
21 logger = logging.getLogger(__name__)
22
23
24 def simple_plot(net, respect_switches=False, line_width=1.0, bus_size=1.0, ext_grid_size=1.0,
25 trafo_size=1.0, plot_loads=False, plot_gens=False, plot_sgens=False, load_size=1.0, gen_size=1.0, sgen_size=1.0,
26 switch_size=2.0, switch_distance=1.0, plot_line_switches=False, scale_size=True,
27 bus_color="b", line_color='grey', trafo_color='k', ext_grid_color='y',
28 switch_color='k', library="igraph", show_plot=True, ax=None):
29 """
30 Plots a pandapower network as simple as possible. If no geodata is available, artificial
31 geodata is generated. For advanced plotting see the tutorial
32
33 INPUT:
34 **net** - The pandapower format network.
35
36 OPTIONAL:
37 **respect_switches** (bool, False) - Respect switches if artificial geodata is created.
38 This Flag is ignored if plot_line_switches is True
39
40 **line_width** (float, 1.0) - width of lines
41
42 **bus_size** (float, 1.0) - Relative size of buses to plot.
43 The value bus_size is multiplied with mean_distance_between_buses, which equals the
44 distance between
45 the max geoocord and the min divided by 200.
46 mean_distance_between_buses = sum((net['bus_geodata'].max() - net['bus_geodata'].min()) / 200)
47
48 **ext_grid_size** (float, 1.0) - Relative size of ext_grids to plot. See bus sizes for details.
49 Note: ext_grids are plottet as rectangles
50
51 **trafo_size** (float, 1.0) - Relative size of trafos to plot.
52
53 **plot_loads** (bool, False) - Flag to decide whether load symbols should be drawn.
54
55 **plot_sgens** (bool, False) - Flag to decide whether sgen symbols should be drawn.
56
57 **load_size** (float, 1.0) - Relative size of loads to plot.
58
59 **sgen_size** (float, 1.0) - Relative size of sgens to plot.
60
61 **switch_size** (float, 2.0) - Relative size of switches to plot. See bus size for details
62
63 **switch_distance** (float, 1.0) - Relative distance of the switch to its corresponding \
64 bus. See bus size for details
65
66 **plot_line_switches** (bool, False) - Flag if line switches are plotted
67
68 **scale_size** (bool, True) - Flag if bus_size, ext_grid_size, bus_size- and distance \
69 will be scaled with respect to grid mean distances
70
71 **bus_color** (String, colors[0]) - Bus Color. Init as first value of color palette. Usually colors[0] = "b".
72
73 **line_color** (String, 'grey') - Line Color. Init is grey
74
75 **trafo_color** (String, 'k') - Trafo Color. Init is black
76
77 **ext_grid_color** (String, 'y') - External Grid Color. Init is yellow
78
79 **switch_color** (String, 'k') - Switch Color. Init is black
80
81 **library** (String, "igraph") - library name to create generic coordinates (case of
82 missing geodata). "igraph" to use igraph package or "networkx" to use networkx package.
83
84 **show_plot** (bool, True) - Shows plot at the end of plotting
85
86 **ax** (object, None) - matplotlib axis to plot to
87
88 OUTPUT:
89 **ax** - axes of figure
90 """
91 # don't hide lines if switches are plotted
92 if plot_line_switches:
93 respect_switches = False
94
95 # create geocoord if none are available
96 if len(net.line_geodata) == 0 and len(net.bus_geodata) == 0:
97 logger.warning("No or insufficient geodata available --> Creating artificial coordinates." +
98 " This may take some time")
99 create_generic_coordinates(net, respect_switches=respect_switches, library=library)
100
101 if scale_size:
102 # if scale_size -> calc size from distance between min and max geocoord
103 sizes = get_collection_sizes(net, bus_size, ext_grid_size, trafo_size,
104 load_size, sgen_size, switch_size, switch_distance)
105 bus_size = sizes["bus"]
106 ext_grid_size = sizes["ext_grid"]
107 trafo_size = sizes["trafo"]
108 sgen_size = sizes["sgen"]
109 load_size = sizes["load"]
110 switch_size = sizes["switch"]
111 switch_distance = sizes["switch_distance"]
112
113 # create bus collections to plot
114 bc = create_bus_collection(net, net.bus.index, size=bus_size, color=bus_color, zorder=10)
115
116 # if bus geodata is available, but no line geodata
117 use_bus_geodata = len(net.line_geodata) == 0
118 in_service_lines = net.line[net.line.in_service].index
119 nogolines = set(net.switch.element[(net.switch.et == "l") & (net.switch.closed == 0)]) \
120 if respect_switches else set()
121 plot_lines = in_service_lines.difference(nogolines)
122
123 # create line collections
124 lc = create_line_collection(net, plot_lines, color=line_color, linewidths=line_width,
125 use_bus_geodata=use_bus_geodata)
126 collections = [bc, lc]
127
128 # create ext_grid collections
129 eg_buses_with_geo_coordinates = set(net.ext_grid.bus.values) & set(net.bus_geodata.index)
130 if len(eg_buses_with_geo_coordinates) > 0:
131 sc = create_bus_collection(net, eg_buses_with_geo_coordinates, patch_type="rect",
132 size=ext_grid_size, color=ext_grid_color, zorder=11)
133 collections.append(sc)
134
135 # create trafo collection if trafo is available
136 trafo_buses_with_geo_coordinates = [t for t, trafo in net.trafo.iterrows()
137 if trafo.hv_bus in net.bus_geodata.index and
138 trafo.lv_bus in net.bus_geodata.index]
139 if len(trafo_buses_with_geo_coordinates) > 0:
140 tc = create_trafo_collection(net, trafo_buses_with_geo_coordinates,
141 color=trafo_color, size=trafo_size)
142 collections.append(tc)
143
144 # create trafo3w collection if trafo3w is available
145 trafo3w_buses_with_geo_coordinates = [
146 t for t, trafo3w in net.trafo3w.iterrows() if trafo3w.hv_bus in net.bus_geodata.index and
147 trafo3w.mv_bus in net.bus_geodata.index and trafo3w.lv_bus in net.bus_geodata.index]
148 if len(trafo3w_buses_with_geo_coordinates) > 0:
149 tc = create_trafo3w_collection(net, trafo3w_buses_with_geo_coordinates,
150 color=trafo_color)
151 collections.append(tc)
152
153 if plot_line_switches and len(net.switch):
154 sc = create_line_switch_collection(
155 net, size=switch_size, distance_to_bus=switch_distance,
156 use_line_geodata=not use_bus_geodata, zorder=12, color=switch_color)
157 collections.append(sc)
158
159 if plot_sgens and len(net.sgen):
160 sgc = create_sgen_collection(net, size=sgen_size)
161 collections.append(sgc)
162 if plot_gens and len(net.gen):
163 gc = create_gen_collection(net, size=gen_size)
164 collections.append(gc)
165 if plot_loads and len(net.load):
166 lc = create_load_collection(net, size=load_size)
167 collections.append(lc)
168
169 if len(net.switch):
170 bsc = create_bus_bus_switch_collection(net, size=switch_size)
171 collections.append(bsc)
172
173 ax = draw_collections(collections, ax=ax)
174 if show_plot:
175 plt.show()
176 return ax
177
178
179 if __name__ == "__main__":
180 import pandapower.networks as nw
181
182 net = nw.case145()
183 # net = nw.create_cigre_network_mv()
184 # net = nw.mv_oberrhein()
185 simple_plot(net, bus_size=0.4)
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandapower/plotting/simple_plot.py b/pandapower/plotting/simple_plot.py
--- a/pandapower/plotting/simple_plot.py
+++ b/pandapower/plotting/simple_plot.py
@@ -51,6 +51,8 @@
**trafo_size** (float, 1.0) - Relative size of trafos to plot.
**plot_loads** (bool, False) - Flag to decide whether load symbols should be drawn.
+
+ **plot_gens** (bool, False) - Flag to decide whether gen symbols should be drawn.
**plot_sgens** (bool, False) - Flag to decide whether sgen symbols should be drawn.
| {"golden_diff": "diff --git a/pandapower/plotting/simple_plot.py b/pandapower/plotting/simple_plot.py\n--- a/pandapower/plotting/simple_plot.py\n+++ b/pandapower/plotting/simple_plot.py\n@@ -51,6 +51,8 @@\n **trafo_size** (float, 1.0) - Relative size of trafos to plot.\n \n **plot_loads** (bool, False) - Flag to decide whether load symbols should be drawn.\n+ \n+ **plot_gens** (bool, False) - Flag to decide whether gen symbols should be drawn.\n \n **plot_sgens** (bool, False) - Flag to decide whether sgen symbols should be drawn.\n", "issue": "'plot_gens' unexpected in simple_plot()\nIt seems that `plot_gens` is an argument in `simple_plot()`, but is not documented. When attempting to set `plot_gens` when calling `simple_plot()`, I get the following error:\r\n\r\n> TypeError: simple_plot() got an unexpected keyword argument 'plot_gens'\r\n\r\nVersion: 2.7.0\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport matplotlib.pyplot as plt\n\nfrom pandapower.plotting.plotting_toolbox import get_collection_sizes\nfrom pandapower.plotting.collections import create_bus_collection, create_line_collection, \\\n create_trafo_collection, create_trafo3w_collection, \\\n create_line_switch_collection, draw_collections, create_bus_bus_switch_collection, create_sgen_collection, \\\n create_gen_collection, create_load_collection\nfrom pandapower.plotting.generic_geodata import create_generic_coordinates\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef simple_plot(net, respect_switches=False, line_width=1.0, bus_size=1.0, ext_grid_size=1.0,\n trafo_size=1.0, plot_loads=False, plot_gens=False, plot_sgens=False, load_size=1.0, gen_size=1.0, sgen_size=1.0,\n switch_size=2.0, switch_distance=1.0, plot_line_switches=False, scale_size=True,\n bus_color=\"b\", line_color='grey', trafo_color='k', ext_grid_color='y',\n switch_color='k', library=\"igraph\", show_plot=True, ax=None):\n \"\"\"\n Plots a pandapower network as simple as possible. If no geodata is available, artificial\n geodata is generated. For advanced plotting see the tutorial\n\n INPUT:\n **net** - The pandapower format network.\n\n OPTIONAL:\n **respect_switches** (bool, False) - Respect switches if artificial geodata is created.\n This Flag is ignored if plot_line_switches is True\n\n **line_width** (float, 1.0) - width of lines\n\n **bus_size** (float, 1.0) - Relative size of buses to plot.\n The value bus_size is multiplied with mean_distance_between_buses, which equals the\n distance between\n the max geoocord and the min divided by 200.\n mean_distance_between_buses = sum((net['bus_geodata'].max() - net['bus_geodata'].min()) / 200)\n\n **ext_grid_size** (float, 1.0) - Relative size of ext_grids to plot. See bus sizes for details.\n Note: ext_grids are plottet as rectangles\n\n **trafo_size** (float, 1.0) - Relative size of trafos to plot.\n\n **plot_loads** (bool, False) - Flag to decide whether load symbols should be drawn.\n\n **plot_sgens** (bool, False) - Flag to decide whether sgen symbols should be drawn.\n\n **load_size** (float, 1.0) - Relative size of loads to plot.\n\n **sgen_size** (float, 1.0) - Relative size of sgens to plot.\n\n **switch_size** (float, 2.0) - Relative size of switches to plot. See bus size for details\n\n **switch_distance** (float, 1.0) - Relative distance of the switch to its corresponding \\\n bus. See bus size for details\n\n **plot_line_switches** (bool, False) - Flag if line switches are plotted\n\n **scale_size** (bool, True) - Flag if bus_size, ext_grid_size, bus_size- and distance \\\n will be scaled with respect to grid mean distances\n\n **bus_color** (String, colors[0]) - Bus Color. Init as first value of color palette. Usually colors[0] = \"b\".\n\n **line_color** (String, 'grey') - Line Color. Init is grey\n\n **trafo_color** (String, 'k') - Trafo Color. Init is black\n\n **ext_grid_color** (String, 'y') - External Grid Color. Init is yellow\n\n **switch_color** (String, 'k') - Switch Color. Init is black\n\n **library** (String, \"igraph\") - library name to create generic coordinates (case of\n missing geodata). \"igraph\" to use igraph package or \"networkx\" to use networkx package.\n\n **show_plot** (bool, True) - Shows plot at the end of plotting\n\n **ax** (object, None) - matplotlib axis to plot to\n\n OUTPUT:\n **ax** - axes of figure\n \"\"\"\n # don't hide lines if switches are plotted\n if plot_line_switches:\n respect_switches = False\n\n # create geocoord if none are available\n if len(net.line_geodata) == 0 and len(net.bus_geodata) == 0:\n logger.warning(\"No or insufficient geodata available --> Creating artificial coordinates.\" +\n \" This may take some time\")\n create_generic_coordinates(net, respect_switches=respect_switches, library=library)\n\n if scale_size:\n # if scale_size -> calc size from distance between min and max geocoord\n sizes = get_collection_sizes(net, bus_size, ext_grid_size, trafo_size,\n load_size, sgen_size, switch_size, switch_distance)\n bus_size = sizes[\"bus\"]\n ext_grid_size = sizes[\"ext_grid\"]\n trafo_size = sizes[\"trafo\"]\n sgen_size = sizes[\"sgen\"]\n load_size = sizes[\"load\"]\n switch_size = sizes[\"switch\"]\n switch_distance = sizes[\"switch_distance\"]\n\n # create bus collections to plot\n bc = create_bus_collection(net, net.bus.index, size=bus_size, color=bus_color, zorder=10)\n\n # if bus geodata is available, but no line geodata\n use_bus_geodata = len(net.line_geodata) == 0\n in_service_lines = net.line[net.line.in_service].index\n nogolines = set(net.switch.element[(net.switch.et == \"l\") & (net.switch.closed == 0)]) \\\n if respect_switches else set()\n plot_lines = in_service_lines.difference(nogolines)\n\n # create line collections\n lc = create_line_collection(net, plot_lines, color=line_color, linewidths=line_width,\n use_bus_geodata=use_bus_geodata)\n collections = [bc, lc]\n\n # create ext_grid collections\n eg_buses_with_geo_coordinates = set(net.ext_grid.bus.values) & set(net.bus_geodata.index)\n if len(eg_buses_with_geo_coordinates) > 0:\n sc = create_bus_collection(net, eg_buses_with_geo_coordinates, patch_type=\"rect\",\n size=ext_grid_size, color=ext_grid_color, zorder=11)\n collections.append(sc)\n\n # create trafo collection if trafo is available\n trafo_buses_with_geo_coordinates = [t for t, trafo in net.trafo.iterrows()\n if trafo.hv_bus in net.bus_geodata.index and\n trafo.lv_bus in net.bus_geodata.index]\n if len(trafo_buses_with_geo_coordinates) > 0:\n tc = create_trafo_collection(net, trafo_buses_with_geo_coordinates,\n color=trafo_color, size=trafo_size)\n collections.append(tc)\n\n # create trafo3w collection if trafo3w is available\n trafo3w_buses_with_geo_coordinates = [\n t for t, trafo3w in net.trafo3w.iterrows() if trafo3w.hv_bus in net.bus_geodata.index and\n trafo3w.mv_bus in net.bus_geodata.index and trafo3w.lv_bus in net.bus_geodata.index]\n if len(trafo3w_buses_with_geo_coordinates) > 0:\n tc = create_trafo3w_collection(net, trafo3w_buses_with_geo_coordinates,\n color=trafo_color)\n collections.append(tc)\n\n if plot_line_switches and len(net.switch):\n sc = create_line_switch_collection(\n net, size=switch_size, distance_to_bus=switch_distance,\n use_line_geodata=not use_bus_geodata, zorder=12, color=switch_color)\n collections.append(sc)\n\n if plot_sgens and len(net.sgen):\n sgc = create_sgen_collection(net, size=sgen_size)\n collections.append(sgc)\n if plot_gens and len(net.gen):\n gc = create_gen_collection(net, size=gen_size)\n collections.append(gc)\n if plot_loads and len(net.load):\n lc = create_load_collection(net, size=load_size)\n collections.append(lc)\n\n if len(net.switch):\n bsc = create_bus_bus_switch_collection(net, size=switch_size)\n collections.append(bsc)\n\n ax = draw_collections(collections, ax=ax)\n if show_plot:\n plt.show()\n return ax\n\n\nif __name__ == \"__main__\":\n import pandapower.networks as nw\n\n net = nw.case145()\n # net = nw.create_cigre_network_mv()\n # net = nw.mv_oberrhein()\n simple_plot(net, bus_size=0.4)\n", "path": "pandapower/plotting/simple_plot.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\n\nimport matplotlib.pyplot as plt\n\nfrom pandapower.plotting.plotting_toolbox import get_collection_sizes\nfrom pandapower.plotting.collections import create_bus_collection, create_line_collection, \\\n create_trafo_collection, create_trafo3w_collection, \\\n create_line_switch_collection, draw_collections, create_bus_bus_switch_collection, create_sgen_collection, \\\n create_gen_collection, create_load_collection\nfrom pandapower.plotting.generic_geodata import create_generic_coordinates\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef simple_plot(net, respect_switches=False, line_width=1.0, bus_size=1.0, ext_grid_size=1.0,\n trafo_size=1.0, plot_loads=False, plot_gens=False, plot_sgens=False, load_size=1.0, gen_size=1.0, sgen_size=1.0,\n switch_size=2.0, switch_distance=1.0, plot_line_switches=False, scale_size=True,\n bus_color=\"b\", line_color='grey', trafo_color='k', ext_grid_color='y',\n switch_color='k', library=\"igraph\", show_plot=True, ax=None):\n \"\"\"\n Plots a pandapower network as simple as possible. If no geodata is available, artificial\n geodata is generated. For advanced plotting see the tutorial\n\n INPUT:\n **net** - The pandapower format network.\n\n OPTIONAL:\n **respect_switches** (bool, False) - Respect switches if artificial geodata is created.\n This Flag is ignored if plot_line_switches is True\n\n **line_width** (float, 1.0) - width of lines\n\n **bus_size** (float, 1.0) - Relative size of buses to plot.\n The value bus_size is multiplied with mean_distance_between_buses, which equals the\n distance between\n the max geoocord and the min divided by 200.\n mean_distance_between_buses = sum((net['bus_geodata'].max() - net['bus_geodata'].min()) / 200)\n\n **ext_grid_size** (float, 1.0) - Relative size of ext_grids to plot. See bus sizes for details.\n Note: ext_grids are plottet as rectangles\n\n **trafo_size** (float, 1.0) - Relative size of trafos to plot.\n\n **plot_loads** (bool, False) - Flag to decide whether load symbols should be drawn.\n \n **plot_gens** (bool, False) - Flag to decide whether gen symbols should be drawn.\n\n **plot_sgens** (bool, False) - Flag to decide whether sgen symbols should be drawn.\n\n **load_size** (float, 1.0) - Relative size of loads to plot.\n\n **sgen_size** (float, 1.0) - Relative size of sgens to plot.\n\n **switch_size** (float, 2.0) - Relative size of switches to plot. See bus size for details\n\n **switch_distance** (float, 1.0) - Relative distance of the switch to its corresponding \\\n bus. See bus size for details\n\n **plot_line_switches** (bool, False) - Flag if line switches are plotted\n\n **scale_size** (bool, True) - Flag if bus_size, ext_grid_size, bus_size- and distance \\\n will be scaled with respect to grid mean distances\n\n **bus_color** (String, colors[0]) - Bus Color. Init as first value of color palette. Usually colors[0] = \"b\".\n\n **line_color** (String, 'grey') - Line Color. Init is grey\n\n **trafo_color** (String, 'k') - Trafo Color. Init is black\n\n **ext_grid_color** (String, 'y') - External Grid Color. Init is yellow\n\n **switch_color** (String, 'k') - Switch Color. Init is black\n\n **library** (String, \"igraph\") - library name to create generic coordinates (case of\n missing geodata). \"igraph\" to use igraph package or \"networkx\" to use networkx package.\n\n **show_plot** (bool, True) - Shows plot at the end of plotting\n\n **ax** (object, None) - matplotlib axis to plot to\n\n OUTPUT:\n **ax** - axes of figure\n \"\"\"\n # don't hide lines if switches are plotted\n if plot_line_switches:\n respect_switches = False\n\n # create geocoord if none are available\n if len(net.line_geodata) == 0 and len(net.bus_geodata) == 0:\n logger.warning(\"No or insufficient geodata available --> Creating artificial coordinates.\" +\n \" This may take some time\")\n create_generic_coordinates(net, respect_switches=respect_switches, library=library)\n\n if scale_size:\n # if scale_size -> calc size from distance between min and max geocoord\n sizes = get_collection_sizes(net, bus_size, ext_grid_size, trafo_size,\n load_size, sgen_size, switch_size, switch_distance)\n bus_size = sizes[\"bus\"]\n ext_grid_size = sizes[\"ext_grid\"]\n trafo_size = sizes[\"trafo\"]\n sgen_size = sizes[\"sgen\"]\n load_size = sizes[\"load\"]\n switch_size = sizes[\"switch\"]\n switch_distance = sizes[\"switch_distance\"]\n\n # create bus collections to plot\n bc = create_bus_collection(net, net.bus.index, size=bus_size, color=bus_color, zorder=10)\n\n # if bus geodata is available, but no line geodata\n use_bus_geodata = len(net.line_geodata) == 0\n in_service_lines = net.line[net.line.in_service].index\n nogolines = set(net.switch.element[(net.switch.et == \"l\") & (net.switch.closed == 0)]) \\\n if respect_switches else set()\n plot_lines = in_service_lines.difference(nogolines)\n\n # create line collections\n lc = create_line_collection(net, plot_lines, color=line_color, linewidths=line_width,\n use_bus_geodata=use_bus_geodata)\n collections = [bc, lc]\n\n # create ext_grid collections\n eg_buses_with_geo_coordinates = set(net.ext_grid.bus.values) & set(net.bus_geodata.index)\n if len(eg_buses_with_geo_coordinates) > 0:\n sc = create_bus_collection(net, eg_buses_with_geo_coordinates, patch_type=\"rect\",\n size=ext_grid_size, color=ext_grid_color, zorder=11)\n collections.append(sc)\n\n # create trafo collection if trafo is available\n trafo_buses_with_geo_coordinates = [t for t, trafo in net.trafo.iterrows()\n if trafo.hv_bus in net.bus_geodata.index and\n trafo.lv_bus in net.bus_geodata.index]\n if len(trafo_buses_with_geo_coordinates) > 0:\n tc = create_trafo_collection(net, trafo_buses_with_geo_coordinates,\n color=trafo_color, size=trafo_size)\n collections.append(tc)\n\n # create trafo3w collection if trafo3w is available\n trafo3w_buses_with_geo_coordinates = [\n t for t, trafo3w in net.trafo3w.iterrows() if trafo3w.hv_bus in net.bus_geodata.index and\n trafo3w.mv_bus in net.bus_geodata.index and trafo3w.lv_bus in net.bus_geodata.index]\n if len(trafo3w_buses_with_geo_coordinates) > 0:\n tc = create_trafo3w_collection(net, trafo3w_buses_with_geo_coordinates,\n color=trafo_color)\n collections.append(tc)\n\n if plot_line_switches and len(net.switch):\n sc = create_line_switch_collection(\n net, size=switch_size, distance_to_bus=switch_distance,\n use_line_geodata=not use_bus_geodata, zorder=12, color=switch_color)\n collections.append(sc)\n\n if plot_sgens and len(net.sgen):\n sgc = create_sgen_collection(net, size=sgen_size)\n collections.append(sgc)\n if plot_gens and len(net.gen):\n gc = create_gen_collection(net, size=gen_size)\n collections.append(gc)\n if plot_loads and len(net.load):\n lc = create_load_collection(net, size=load_size)\n collections.append(lc)\n\n if len(net.switch):\n bsc = create_bus_bus_switch_collection(net, size=switch_size)\n collections.append(bsc)\n\n ax = draw_collections(collections, ax=ax)\n if show_plot:\n plt.show()\n return ax\n\n\nif __name__ == \"__main__\":\n import pandapower.networks as nw\n\n net = nw.case145()\n # net = nw.create_cigre_network_mv()\n # net = nw.mv_oberrhein()\n simple_plot(net, bus_size=0.4)\n", "path": "pandapower/plotting/simple_plot.py"}]} | 2,840 | 159 |
gh_patches_debug_13840 | rasdani/github-patches | git_diff | conan-io__conan-center-index-636 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] libx264/20190605: build fail compiler.version setting required for vcvars not defined
### Package and Environment Details
* Package Name/Version: **libx264/20190605**
* Operating System+version: **Windows 10**
* Compiler+version: **Mingw GCC 8.1**
* Docker image: **conanio/gcc8.1**
* Conan version: **conan 1.21.0**
* Python version: **Python 3.7.5**
### Conan profile
```
Configuration:
[settings]
arch=x86_64
arch_build=x86_64
build_type=Debug
compiler=gcc
compiler.cppstd=14
compiler.exception=seh
compiler.libcxx=libstdc++11
compiler.threads=posix
compiler.version=8.1
os=Windows
os_build=Windows
[options]
[build_requires]
[env]
CONAN_CMAKE_GENERATOR=MinGW Makefiles
```
```
[requires]
libx264/20190605
```
### Steps to reproduce
Change `libx264/20190605@bincrafters/stable` to `libx264/20190605` build fail.
Failed in
```
ConanException: compiler.version setting required for vcvars not defined
```
Because of new `conanfile.py` without first determining whether it is `_is_msvc`.
In no `Visual Studio` environment, `tools.vcvars` due to the error.
### Fixed method
in line 94, change to:
```
def build(self):
if self._is_msvc:
with tools.vcvars(self.settings):
autotools = self._configure_autotools()
autotools.make()
else:
autotools = self._configure_autotools()
autotools.make()
def package(self):
if self._is_msvc:
with tools.vcvars(self.settings):
autotools = self._configure_autotools()
autotools.install()
else:
autotools = self._configure_autotools()
autotools.install()
self.copy(pattern="COPYING", src=self._source_subfolder, dst='licenses')
tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))
```
### Logs
<details><summary>Click to expand log</summary>
```
libx264/20190605: Applying build-requirement: nasm/2.14
libx264/20190605: Applying build-requirement: msys2/20190524
libx264/20190605: WARN: Build folder is dirty, removing it: C:\Users\Home\.conan\data\libx264\20190605\_\_\build\1a9b746db3232276ada4cf64b16512ba901fcf09
libx264/20190605: Copying sources to build folder
libx264/20190605: Building your package in C:\Users\Home\.conan\data\libx264\20190605\_\_\build\1a9b746db3232276ada4cf64b16512ba901fcf09
libx264/20190605: Generator txt created conanbuildinfo.txt
libx264/20190605: Calling build()
libx264/20190605:
libx264/20190605: ERROR: Package '1a9b746db3232276ada4cf64b16512ba901fcf09' build failed
libx264/20190605: WARN: Build folder C:\Users\Home\.conan\data\libx264\20190605\_\_\build\1a9b746db3232276ada4cf64b16512ba901fcf09
ERROR: libx264/20190605: Error in build() method, line 95
with tools.vcvars(self.settings):
ConanException: compiler.version setting required for vcvars not defined
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/libx264/all/conanfile.py`
Content:
```
1 from conans import ConanFile, tools, AutoToolsBuildEnvironment
2 import os
3
4
5 class LibX264Conan(ConanFile):
6 name = "libx264"
7 url = "https://github.com/conan-io/conan-center-index"
8 homepage = "https://www.videolan.org/developers/x264.html"
9 description = "x264 is a free software library and application for encoding video streams into the " \
10 "H.264/MPEG-4 AVC compression format"
11 topics = ("conan", "libx264", "video", "encoding")
12 license = "GPL-2.0"
13 settings = "os", "arch", "compiler", "build_type"
14 options = {"shared": [True, False], "fPIC": [True, False], "bit_depth": [8, 10, "all"]}
15 default_options = {'shared': False, 'fPIC': True, 'bit_depth': 'all'}
16 build_requires = "nasm/2.13.02"
17 _override_env = {}
18 _autotools = None
19
20 @property
21 def _is_mingw(self):
22 return self.settings.os == "Windows" and self.settings.compiler == 'gcc'
23
24 @property
25 def _is_msvc(self):
26 return self.settings.compiler == 'Visual Studio'
27
28 @property
29 def _source_subfolder(self):
30 return "source_subfolder"
31
32 def build_requirements(self):
33 if "CONAN_BASH_PATH" not in os.environ and tools.os_info.is_windows:
34 self.build_requires("msys2/20190524")
35
36 def config_options(self):
37 if self.settings.os == 'Windows':
38 del self.options.fPIC
39
40 def configure(self):
41 del self.settings.compiler.libcxx
42 del self.settings.compiler.cppstd
43
44 def source(self):
45 tools.get(**self.conan_data["sources"][self.version])
46 extracted_dir = 'x264-snapshot-%s-2245' % self.version
47 os.rename(extracted_dir, self._source_subfolder)
48
49 @property
50 def env(self):
51 ret = super(LibX264Conan, self).env
52 ret.update(self._override_env)
53 return ret
54
55 def _configure_autotools(self):
56 if not self._autotools:
57 prefix = tools.unix_path(self.package_folder)
58 args = ['--disable-cli', '--prefix={}'.format(prefix)]
59 if self.options.shared:
60 args.append('--enable-shared')
61 else:
62 args.append('--enable-static')
63 if self.settings.os != 'Windows' and self.options.fPIC:
64 args.append('--enable-pic')
65 if self.settings.build_type == 'Debug':
66 args.append('--enable-debug')
67 args.append('--bit-depth=%s' % str(self.options.bit_depth))
68
69 if tools.cross_building(self.settings):
70 if self.settings.os == "Android":
71 # the as of ndk does not work well for building libx264
72 self._override_env["AS"] = os.environ["CC"]
73 ndk_root = tools.unix_path(os.environ["NDK_ROOT"])
74 arch = {'armv7': 'arm',
75 'armv8': 'aarch64',
76 'x86': 'i686',
77 'x86_64': 'x86_64'}.get(str(self.settings.arch))
78 abi = 'androideabi' if self.settings.arch == 'armv7' else 'android'
79 cross_prefix = "%s/bin/%s-linux-%s-" % (ndk_root, arch, abi)
80 args.append('--cross-prefix=%s' % cross_prefix)
81
82 if self._is_msvc:
83 self._override_env['CC'] = 'cl'
84 self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
85 if self._is_msvc:
86 self._autotools.flags.append('-%s' % str(self.settings.compiler.runtime))
87 # cannot open program database ... if multiple CL.EXE write to the same .PDB file, please use /FS
88 self._autotools.flags.append('-FS')
89 self._autotools.configure(args=args, build=False, vars=self._override_env, configure_dir=self._source_subfolder)
90 return self._autotools
91
92 def build(self):
93 with tools.vcvars(self.settings):
94 autotools = self._configure_autotools()
95 autotools.make()
96
97 def package(self):
98 with tools.vcvars(self.settings):
99 autotools = self._configure_autotools()
100 autotools.install()
101 self.copy(pattern="COPYING", src=self._source_subfolder, dst='licenses')
102 tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))
103
104 def package_info(self):
105 if self._is_msvc:
106 self.cpp_info.libs = ['libx264.dll.lib' if self.options.shared else 'libx264']
107 if self.options.shared:
108 self.cpp_info.defines.append("X264_API_IMPORTS")
109 elif self._is_mingw:
110 self.cpp_info.libs = ['x264.dll' if self.options.shared else 'x264']
111 else:
112 self.cpp_info.libs = ['x264']
113 if self.settings.os == "Linux":
114 self.cpp_info.system_libs.extend(['dl', 'pthread', 'm'])
115 elif self.settings.os == "Android":
116 self.cpp_info.system_libs.extend(['dl', 'm'])
117 self.cpp_info.names['pkg_config'] = 'x264'
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/libx264/all/conanfile.py b/recipes/libx264/all/conanfile.py
--- a/recipes/libx264/all/conanfile.py
+++ b/recipes/libx264/all/conanfile.py
@@ -90,12 +90,12 @@
return self._autotools
def build(self):
- with tools.vcvars(self.settings):
+ with tools.vcvars(self.settings) if self._is_msvc else tools.no_op():
autotools = self._configure_autotools()
autotools.make()
def package(self):
- with tools.vcvars(self.settings):
+ with tools.vcvars(self.settings) if self._is_msvc else tools.no_op():
autotools = self._configure_autotools()
autotools.install()
self.copy(pattern="COPYING", src=self._source_subfolder, dst='licenses')
| {"golden_diff": "diff --git a/recipes/libx264/all/conanfile.py b/recipes/libx264/all/conanfile.py\n--- a/recipes/libx264/all/conanfile.py\n+++ b/recipes/libx264/all/conanfile.py\n@@ -90,12 +90,12 @@\n return self._autotools\n \n def build(self):\n- with tools.vcvars(self.settings):\n+ with tools.vcvars(self.settings) if self._is_msvc else tools.no_op():\n autotools = self._configure_autotools()\n autotools.make()\n \n def package(self):\n- with tools.vcvars(self.settings):\n+ with tools.vcvars(self.settings) if self._is_msvc else tools.no_op():\n autotools = self._configure_autotools()\n autotools.install()\n self.copy(pattern=\"COPYING\", src=self._source_subfolder, dst='licenses')\n", "issue": "[package] libx264/20190605: build fail compiler.version setting required for vcvars not defined\n\r\n### Package and Environment Details\r\n * Package Name/Version: **libx264/20190605**\r\n * Operating System+version: **Windows 10**\r\n * Compiler+version: **Mingw GCC 8.1**\r\n * Docker image: **conanio/gcc8.1**\r\n * Conan version: **conan 1.21.0**\r\n * Python version: **Python 3.7.5**\r\n\r\n\r\n### Conan profile\r\n```\r\nConfiguration:\r\n[settings]\r\narch=x86_64\r\narch_build=x86_64\r\nbuild_type=Debug\r\ncompiler=gcc\r\ncompiler.cppstd=14\r\ncompiler.exception=seh\r\ncompiler.libcxx=libstdc++11\r\ncompiler.threads=posix\r\ncompiler.version=8.1\r\nos=Windows\r\nos_build=Windows\r\n[options]\r\n[build_requires]\r\n[env]\r\nCONAN_CMAKE_GENERATOR=MinGW Makefiles\r\n```\r\n\r\n```\r\n[requires]\r\nlibx264/20190605\r\n```\r\n\r\n\r\n### Steps to reproduce\r\nChange `libx264/20190605@bincrafters/stable` to `libx264/20190605` build fail.\r\nFailed in\r\n\r\n```\r\nConanException: compiler.version setting required for vcvars not defined\r\n```\r\nBecause of new `conanfile.py` without first determining whether it is `_is_msvc`.\r\nIn no `Visual Studio` environment, `tools.vcvars` due to the error.\r\n\r\n\r\n### Fixed method\r\nin line 94, change to:\r\n```\r\n def build(self):\r\n if self._is_msvc:\r\n with tools.vcvars(self.settings):\r\n autotools = self._configure_autotools()\r\n autotools.make()\r\n else:\r\n autotools = self._configure_autotools()\r\n autotools.make()\r\n\r\n def package(self):\r\n if self._is_msvc:\r\n with tools.vcvars(self.settings):\r\n autotools = self._configure_autotools()\r\n autotools.install()\r\n else:\r\n autotools = self._configure_autotools()\r\n autotools.install()\r\n self.copy(pattern=\"COPYING\", src=self._source_subfolder, dst='licenses')\r\n tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))\r\n```\r\n\r\n\r\n### Logs\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nlibx264/20190605: Applying build-requirement: nasm/2.14\r\nlibx264/20190605: Applying build-requirement: msys2/20190524\r\nlibx264/20190605: WARN: Build folder is dirty, removing it: C:\\Users\\Home\\.conan\\data\\libx264\\20190605\\_\\_\\build\\1a9b746db3232276ada4cf64b16512ba901fcf09\r\nlibx264/20190605: Copying sources to build folder\r\nlibx264/20190605: Building your package in C:\\Users\\Home\\.conan\\data\\libx264\\20190605\\_\\_\\build\\1a9b746db3232276ada4cf64b16512ba901fcf09\r\nlibx264/20190605: Generator txt created conanbuildinfo.txt\r\nlibx264/20190605: Calling build()\r\nlibx264/20190605: \r\nlibx264/20190605: ERROR: Package '1a9b746db3232276ada4cf64b16512ba901fcf09' build failed\r\nlibx264/20190605: WARN: Build folder C:\\Users\\Home\\.conan\\data\\libx264\\20190605\\_\\_\\build\\1a9b746db3232276ada4cf64b16512ba901fcf09\r\nERROR: libx264/20190605: Error in build() method, line 95\r\n\twith tools.vcvars(self.settings):\r\n\tConanException: compiler.version setting required for vcvars not defined\r\n\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conans import ConanFile, tools, AutoToolsBuildEnvironment\nimport os\n\n\nclass LibX264Conan(ConanFile):\n name = \"libx264\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.videolan.org/developers/x264.html\"\n description = \"x264 is a free software library and application for encoding video streams into the \" \\\n \"H.264/MPEG-4 AVC compression format\"\n topics = (\"conan\", \"libx264\", \"video\", \"encoding\")\n license = \"GPL-2.0\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False], \"bit_depth\": [8, 10, \"all\"]}\n default_options = {'shared': False, 'fPIC': True, 'bit_depth': 'all'}\n build_requires = \"nasm/2.13.02\"\n _override_env = {}\n _autotools = None\n\n @property\n def _is_mingw(self):\n return self.settings.os == \"Windows\" and self.settings.compiler == 'gcc'\n\n @property\n def _is_msvc(self):\n return self.settings.compiler == 'Visual Studio'\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def build_requirements(self):\n if \"CONAN_BASH_PATH\" not in os.environ and tools.os_info.is_windows:\n self.build_requires(\"msys2/20190524\")\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = 'x264-snapshot-%s-2245' % self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n @property\n def env(self):\n ret = super(LibX264Conan, self).env\n ret.update(self._override_env)\n return ret\n\n def _configure_autotools(self):\n if not self._autotools:\n prefix = tools.unix_path(self.package_folder)\n args = ['--disable-cli', '--prefix={}'.format(prefix)]\n if self.options.shared:\n args.append('--enable-shared')\n else:\n args.append('--enable-static')\n if self.settings.os != 'Windows' and self.options.fPIC:\n args.append('--enable-pic')\n if self.settings.build_type == 'Debug':\n args.append('--enable-debug')\n args.append('--bit-depth=%s' % str(self.options.bit_depth))\n\n if tools.cross_building(self.settings):\n if self.settings.os == \"Android\":\n # the as of ndk does not work well for building libx264\n self._override_env[\"AS\"] = os.environ[\"CC\"]\n ndk_root = tools.unix_path(os.environ[\"NDK_ROOT\"])\n arch = {'armv7': 'arm',\n 'armv8': 'aarch64',\n 'x86': 'i686',\n 'x86_64': 'x86_64'}.get(str(self.settings.arch))\n abi = 'androideabi' if self.settings.arch == 'armv7' else 'android'\n cross_prefix = \"%s/bin/%s-linux-%s-\" % (ndk_root, arch, abi)\n args.append('--cross-prefix=%s' % cross_prefix)\n\n if self._is_msvc:\n self._override_env['CC'] = 'cl'\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n if self._is_msvc:\n self._autotools.flags.append('-%s' % str(self.settings.compiler.runtime))\n # cannot open program database ... if multiple CL.EXE write to the same .PDB file, please use /FS\n self._autotools.flags.append('-FS')\n self._autotools.configure(args=args, build=False, vars=self._override_env, configure_dir=self._source_subfolder)\n return self._autotools\n\n def build(self):\n with tools.vcvars(self.settings):\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n with tools.vcvars(self.settings):\n autotools = self._configure_autotools()\n autotools.install()\n self.copy(pattern=\"COPYING\", src=self._source_subfolder, dst='licenses')\n tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))\n\n def package_info(self):\n if self._is_msvc:\n self.cpp_info.libs = ['libx264.dll.lib' if self.options.shared else 'libx264']\n if self.options.shared:\n self.cpp_info.defines.append(\"X264_API_IMPORTS\")\n elif self._is_mingw:\n self.cpp_info.libs = ['x264.dll' if self.options.shared else 'x264']\n else:\n self.cpp_info.libs = ['x264']\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend(['dl', 'pthread', 'm'])\n elif self.settings.os == \"Android\":\n self.cpp_info.system_libs.extend(['dl', 'm'])\n self.cpp_info.names['pkg_config'] = 'x264'\n", "path": "recipes/libx264/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, tools, AutoToolsBuildEnvironment\nimport os\n\n\nclass LibX264Conan(ConanFile):\n name = \"libx264\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.videolan.org/developers/x264.html\"\n description = \"x264 is a free software library and application for encoding video streams into the \" \\\n \"H.264/MPEG-4 AVC compression format\"\n topics = (\"conan\", \"libx264\", \"video\", \"encoding\")\n license = \"GPL-2.0\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False], \"bit_depth\": [8, 10, \"all\"]}\n default_options = {'shared': False, 'fPIC': True, 'bit_depth': 'all'}\n build_requires = \"nasm/2.13.02\"\n _override_env = {}\n _autotools = None\n\n @property\n def _is_mingw(self):\n return self.settings.os == \"Windows\" and self.settings.compiler == 'gcc'\n\n @property\n def _is_msvc(self):\n return self.settings.compiler == 'Visual Studio'\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def build_requirements(self):\n if \"CONAN_BASH_PATH\" not in os.environ and tools.os_info.is_windows:\n self.build_requires(\"msys2/20190524\")\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = 'x264-snapshot-%s-2245' % self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n @property\n def env(self):\n ret = super(LibX264Conan, self).env\n ret.update(self._override_env)\n return ret\n\n def _configure_autotools(self):\n if not self._autotools:\n prefix = tools.unix_path(self.package_folder)\n args = ['--disable-cli', '--prefix={}'.format(prefix)]\n if self.options.shared:\n args.append('--enable-shared')\n else:\n args.append('--enable-static')\n if self.settings.os != 'Windows' and self.options.fPIC:\n args.append('--enable-pic')\n if self.settings.build_type == 'Debug':\n args.append('--enable-debug')\n args.append('--bit-depth=%s' % str(self.options.bit_depth))\n\n if tools.cross_building(self.settings):\n if self.settings.os == \"Android\":\n # the as of ndk does not work well for building libx264\n self._override_env[\"AS\"] = os.environ[\"CC\"]\n ndk_root = tools.unix_path(os.environ[\"NDK_ROOT\"])\n arch = {'armv7': 'arm',\n 'armv8': 'aarch64',\n 'x86': 'i686',\n 'x86_64': 'x86_64'}.get(str(self.settings.arch))\n abi = 'androideabi' if self.settings.arch == 'armv7' else 'android'\n cross_prefix = \"%s/bin/%s-linux-%s-\" % (ndk_root, arch, abi)\n args.append('--cross-prefix=%s' % cross_prefix)\n\n if self._is_msvc:\n self._override_env['CC'] = 'cl'\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n if self._is_msvc:\n self._autotools.flags.append('-%s' % str(self.settings.compiler.runtime))\n # cannot open program database ... if multiple CL.EXE write to the same .PDB file, please use /FS\n self._autotools.flags.append('-FS')\n self._autotools.configure(args=args, build=False, vars=self._override_env, configure_dir=self._source_subfolder)\n return self._autotools\n\n def build(self):\n with tools.vcvars(self.settings) if self._is_msvc else tools.no_op():\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n with tools.vcvars(self.settings) if self._is_msvc else tools.no_op():\n autotools = self._configure_autotools()\n autotools.install()\n self.copy(pattern=\"COPYING\", src=self._source_subfolder, dst='licenses')\n tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))\n\n def package_info(self):\n if self._is_msvc:\n self.cpp_info.libs = ['libx264.dll.lib' if self.options.shared else 'libx264']\n if self.options.shared:\n self.cpp_info.defines.append(\"X264_API_IMPORTS\")\n elif self._is_mingw:\n self.cpp_info.libs = ['x264.dll' if self.options.shared else 'x264']\n else:\n self.cpp_info.libs = ['x264']\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.extend(['dl', 'pthread', 'm'])\n elif self.settings.os == \"Android\":\n self.cpp_info.system_libs.extend(['dl', 'm'])\n self.cpp_info.names['pkg_config'] = 'x264'\n", "path": "recipes/libx264/all/conanfile.py"}]} | 2,825 | 210 |
gh_patches_debug_1871 | rasdani/github-patches | git_diff | fidals__shopelectro-200 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SE yml fix delivery 3k -> 5k
[origin trello task](https://trello.com/c/LyLVDakS/298-se-%D0%BF%D1%80%D0%B0%D0%B2%D1%8C-yml)
Внезапно обноружили, что через фид передается( в теге <sales_notes>) неверная инфа о доставке.
Исправь на 5к.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/settings/base.py`
Content:
```
1 """
2 Django settings for shopelectro project.
3
4 Generated by 'django-admin startproject' using Django 1.9.5.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/1.9/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/1.9/ref/settings/
11 """
12
13 import os
14 from datetime import datetime
15
16 import dj_database_url
17
18 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
19 BASE_DIR = os.path.dirname(os.path.dirname(
20 os.path.dirname(os.path.abspath(__file__))))
21
22 # Quick-start development settings - unsuitable for production
23 # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
24
25 # SECURITY WARNING: keep the secret key used in production secret!
26 SECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')
27
28 # SECURITY WARNING: don't run with debug turned on in production!
29 DEBUG = True
30
31 # http://bit.ly/sorl-thumbnail-docs
32 THUMBNAIL_DEBUG = False
33
34 ALLOWED_HOSTS = ['*']
35
36 if os.environ.get('TEST_ENV', False):
37 # disable https in CI
38 # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header
39 SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')
40
41 # Enable in frame loading for Ya.Metric
42 # https://docs.djangoproject.com/es/1.10/ref/clickjacking/
43 # https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page
44 X_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'
45
46 # Application definition
47 INSTALLED_APPS = [
48 # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover
49 'django.contrib.contenttypes',
50 'django.contrib.auth',
51 'django.contrib.messages',
52 'django.contrib.redirects',
53 'django.contrib.sessions',
54 'django.contrib.sitemaps',
55 'django.contrib.sites',
56 'django.contrib.staticfiles',
57 'django.contrib.humanize',
58 'django_user_agents',
59 'generic_admin',
60 'django.contrib.admin.apps.SimpleAdminConfig',
61 'debug_toolbar',
62 'mptt',
63 'widget_tweaks',
64 'sorl.thumbnail',
65 'images',
66 'pages',
67 'catalog',
68 'ecommerce',
69 'shopelectro',
70 ]
71
72 MIDDLEWARE = [
73 'django.middleware.security.SecurityMiddleware',
74 'django.contrib.sessions.middleware.SessionMiddleware',
75 'django.middleware.common.CommonMiddleware',
76 'django.middleware.csrf.CsrfViewMiddleware',
77 'django.contrib.auth.middleware.AuthenticationMiddleware',
78 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
79 'django.contrib.messages.middleware.MessageMiddleware',
80 'django.middleware.clickjacking.XFrameOptionsMiddleware',
81 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',
82 'django.middleware.locale.LocaleMiddleware',
83 'django_user_agents.middleware.UserAgentMiddleware',
84 'debug_toolbar.middleware.DebugToolbarMiddleware',
85 ]
86
87 ROOT_URLCONF = 'shopelectro.urls'
88
89 TEMPLATES = [
90 {
91 'BACKEND': 'django.template.backends.django.DjangoTemplates',
92 'DIRS': [os.path.join(BASE_DIR, 'templates')],
93 'APP_DIRS': True,
94 'OPTIONS': {
95 'context_processors': [
96 'django.template.context_processors.debug',
97 'django.template.context_processors.media',
98 'django.template.context_processors.request',
99 'django.template.context_processors.static',
100 'django.contrib.auth.context_processors.auth',
101 'django.contrib.messages.context_processors.messages',
102 'ecommerce.context_processors.cart',
103 'shopelectro.context_processors.shop',
104 ],
105 },
106 },
107 ]
108
109 WSGI_APPLICATION = 'shopelectro.wsgi.application'
110
111 # Password validation
112 # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
113
114 AUTH_PASSWORD_VALIDATORS = [
115 {
116 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
117 },
118 {
119 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
120 },
121 {
122 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
123 },
124 {
125 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
126 },
127 ]
128
129 # Internationalization
130 # https://docs.djangoproject.com/en/1.9/topics/i18n/
131
132 LOCALE_NAME = 'en_US'
133 TIME_ZONE = 'UTC'
134
135 USE_I18N = True
136 USE_L10N = True
137 USE_TZ = True
138
139 LOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]
140 FORMAT_MODULE_PATH = [
141 'shopelectro.formats',
142 ]
143
144 # Static files (CSS, JavaScript, Images)
145 # https://docs.djangoproject.com/en/1.9/howto/static-files/
146 STATIC_URL = '/static/'
147 STATIC_ROOT = os.path.join(BASE_DIR, 'static')
148 ASSETS_DIR = os.path.join(BASE_DIR, 'assets')
149
150 STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
151
152 STATICFILES_DIRS = [
153 os.path.join(BASE_DIR, 'front/build'),
154 ASSETS_DIR,
155 ]
156
157 MEDIA_URL = '/media/'
158 MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
159
160 # It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`
161 DATABASE_URL = 'postgres://user:pass@db_name/table'
162 DATABASES = {
163 'default': dj_database_url.config(
164 env='DATABASE_URL',
165 default=DATABASE_URL,
166 )
167 }
168
169 LOGGING = {
170 'version': 1,
171 'disable_existing_loggers': False,
172 'handlers': {
173 'console': {
174 'class': 'logging.StreamHandler',
175 },
176 },
177 'loggers': {
178 'django': {
179 'handlers': ['console'],
180 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
181 },
182 },
183 }
184
185 SITE_CREATED = datetime(2013, 1, 1)
186
187 LOCALHOST = 'http://127.0.0.1:8000/'
188 BASE_URL = 'https://www.shopelectro.ru'
189
190 PLACEHOLDER_IMAGE = 'images/logo.png'
191 PLACEHOLDER_ALT = 'Логотип компании Shopelectro'
192
193 # Autocomplete and search settings
194 SEARCH_SEE_ALL_LABEL = 'Смотреть все результаты'
195
196 # For sitemaps and sites framework
197 SITE_ID = 1
198 SITE_DOMAIN_NAME = 'www.shopelectro.ru'
199
200 # Used to retrieve instances in ecommerce.Cart
201 CART_ID = 'cart'
202
203 # Used to define choices attr in definition of Order.payment_type field
204 PAYMENT_OPTIONS = (
205 ('cash', 'Наличные'),
206 ('cashless', 'Безналичные и денежные переводы'),
207 ('AC', 'Банковская карта'),
208 ('PC', 'Яндекс.Деньги'),
209 ('GP', 'Связной (терминал)'),
210 ('AB', 'Альфа-Клик'),
211 )
212
213 # It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`
214 YANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')
215
216 # Used for order's email in ecommerce app
217 FAKE_ORDER_NUMBER = 6000
218
219 # Subjects for different types of emails sent from SE.
220 EMAIL_SUBJECTS = {
221 'call': 'Обратный звонок',
222 'order': 'Заказ №{0.fake_order_number}',
223 'yandex_order': 'Заказ №{0.fake_order_number} | Яндекс.Касса',
224 'one_click': 'Заказ в один клик №{0.fake_order_number}',
225 'ya_feedback_request': 'Оцените нас на Яндекс.Маркете',
226 }
227
228 # Email configs
229 # It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`
230 EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')
231 EMAIL_HOST_USER = '[email protected]'
232 EMAIL_USE_TLS = True
233 EMAIL_HOST = 'smtp.yandex.ru'
234 EMAIL_PORT = 587
235 EMAIL_SENDER = '[email protected]'
236 EMAIL_RECIPIENT = '[email protected]'
237 SHOP_EMAIL = '[email protected]'
238
239 # FTP configs
240 FTP_USER = os.environ.get('FTP_USER', 'user')
241 FTP_PASS = os.environ.get('FTP_PASS', 'pass')
242 FTP_IP = os.environ.get('FTP_IP', '0.0.0.0')
243
244 # Used in admin image uploads
245 MODEL_TYPES = {
246 'Product': {
247 'app_name': 'shopelectro',
248 'dir_name': 'products',
249 },
250 'Category': {
251 'app_name': 'shopelectro',
252 'dir_name': 'categories',
253 }
254 }
255
256 # This need for using {% debug %} variable in templates.
257 INTERNAL_IPS = (
258 '127.0.0.1',
259 )
260
261 TOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]
262
263 SHOP = {
264 'id': '69886',
265 'scid': '64788',
266 'success_url': BASE_URL + '/shop/order-success/',
267 'fail_url': BASE_URL + '/',
268 'cps_phone': '+78124163200',
269 'cps_email': '[email protected]',
270 'local_delivery_cost': 300,
271 'local_delivery_cost_threshold': 3000,
272 }
273
274 # used in data-migrations and tests
275 CUSTOM_PAGES = {
276 'index': {
277 'slug': '',
278 'name': 'Интернет-магазин элементов питания "ShopElectro"',
279 'menu_title': 'Главная',
280 'title': 'Интернет-магазин Элементов питания с доставкой по России',
281 },
282 'sitemap': {
283 'slug': 'sitemap',
284 'h1': 'Карта сайта',
285 'name': 'Карта сайта',
286 },
287 'order': {
288 'slug': 'order',
289 'name': 'Оформление заказа',
290 'title': 'Корзина Интернет-магазин shopelectro.ru Санкт-Петербург',
291 },
292 'search': {
293 'slug': 'search',
294 'name': 'Результаты поиска',
295 },
296 'catalog': {
297 'slug': 'catalog',
298 'name': 'Каталог товаров',
299 'menu_title': 'Каталог',
300 },
301 'order_success': {
302 'slug': 'order-success',
303 'name': 'Заказ принят',
304 }
305 }
306
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py
--- a/shopelectro/settings/base.py
+++ b/shopelectro/settings/base.py
@@ -268,7 +268,7 @@
'cps_phone': '+78124163200',
'cps_email': '[email protected]',
'local_delivery_cost': 300,
- 'local_delivery_cost_threshold': 3000,
+ 'local_delivery_cost_threshold': 5000,
}
# used in data-migrations and tests
| {"golden_diff": "diff --git a/shopelectro/settings/base.py b/shopelectro/settings/base.py\n--- a/shopelectro/settings/base.py\n+++ b/shopelectro/settings/base.py\n@@ -268,7 +268,7 @@\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n- 'local_delivery_cost_threshold': 3000,\n+ 'local_delivery_cost_threshold': 5000,\n }\n \n # used in data-migrations and tests\n", "issue": "SE yml fix delivery 3k -> 5k\n[origin trello task](https://trello.com/c/LyLVDakS/298-se-%D0%BF%D1%80%D0%B0%D0%B2%D1%8C-yml)\r\n\r\n\u0412\u043d\u0435\u0437\u0430\u043f\u043d\u043e \u043e\u0431\u043d\u043e\u0440\u0443\u0436\u0438\u043b\u0438, \u0447\u0442\u043e \u0447\u0435\u0440\u0435\u0437 \u0444\u0438\u0434 \u043f\u0435\u0440\u0435\u0434\u0430\u0435\u0442\u0441\u044f( \u0432 \u0442\u0435\u0433\u0435 <sales_notes>) \u043d\u0435\u0432\u0435\u0440\u043d\u0430\u044f \u0438\u043d\u0444\u0430 \u043e \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u0435.\r\n\u0418\u0441\u043f\u0440\u0430\u0432\u044c \u043d\u0430 5\u043a.\n", "before_files": [{"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'images',\n 'pages',\n 'catalog',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`\nDATABASE_URL = 'postgres://user:pass@db_name/table'\nDATABASES = {\n 'default': dj_database_url.config(\n env='DATABASE_URL',\n default=DATABASE_URL,\n )\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = '\u0421\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u0432\u0441\u0435 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', '\u041d\u0430\u043b\u0438\u0447\u043d\u044b\u0435'),\n ('cashless', '\u0411\u0435\u0437\u043d\u0430\u043b\u0438\u0447\u043d\u044b\u0435 \u0438 \u0434\u0435\u043d\u0435\u0436\u043d\u044b\u0435 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044b'),\n ('AC', '\u0411\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0430\u044f \u043a\u0430\u0440\u0442\u0430'),\n ('PC', '\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0435\u043d\u044c\u0433\u0438'),\n ('GP', '\u0421\u0432\u044f\u0437\u043d\u043e\u0439 (\u0442\u0435\u0440\u043c\u0438\u043d\u0430\u043b)'),\n ('AB', '\u0410\u043b\u044c\u0444\u0430-\u041a\u043b\u0438\u043a'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': '\u041e\u0431\u0440\u0430\u0442\u043d\u044b\u0439 \u0437\u0432\u043e\u043d\u043e\u043a',\n 'order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number}',\n 'yandex_order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number} | \u042f\u043d\u0434\u0435\u043a\u0441.\u041a\u0430\u0441\u0441\u0430',\n 'one_click': '\u0417\u0430\u043a\u0430\u0437 \u0432 \u043e\u0434\u0438\u043d \u043a\u043b\u0438\u043a \u2116{0.fake_order_number}',\n 'ya_feedback_request': '\u041e\u0446\u0435\u043d\u0438\u0442\u0435 \u043d\u0430\u0441 \u043d\u0430 \u042f\u043d\u0434\u0435\u043a\u0441.\u041c\u0430\u0440\u043a\u0435\u0442\u0435',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 3000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u044d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \"ShopElectro\"',\n 'menu_title': '\u0413\u043b\u0430\u0432\u043d\u0430\u044f',\n 'title': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u042d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \u0441 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u043e\u0439 \u043f\u043e \u0420\u043e\u0441\u0441\u0438\u0438',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n 'name': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n },\n 'order': {\n 'slug': 'order',\n 'name': '\u041e\u0444\u043e\u0440\u043c\u043b\u0435\u043d\u0438\u0435 \u0437\u0430\u043a\u0430\u0437\u0430',\n 'title': '\u041a\u043e\u0440\u0437\u0438\u043d\u0430 \u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d shopelectro.ru \u0421\u0430\u043d\u043a\u0442-\u041f\u0435\u0442\u0435\u0440\u0431\u0443\u0440\u0433',\n },\n 'search': {\n 'slug': 'search',\n 'name': '\u0420\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b \u043f\u043e\u0438\u0441\u043a\u0430',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433 \u0442\u043e\u0432\u0430\u0440\u043e\u0432',\n 'menu_title': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442',\n }\n}\n", "path": "shopelectro/settings/base.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for shopelectro project.\n\nGenerated by 'django-admin startproject' using Django 1.9.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\n\nimport os\nfrom datetime import datetime\n\nimport dj_database_url\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(\n os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get('SECRET_KEY', 'so_secret_key')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# http://bit.ly/sorl-thumbnail-docs\nTHUMBNAIL_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nif os.environ.get('TEST_ENV', False):\n # disable https in CI\n # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\n SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'http')\n\n# Enable in frame loading for Ya.Metric\n# https://docs.djangoproject.com/es/1.10/ref/clickjacking/\n# https://yandex.ru/support/metrika/general/counter-webvisor.xml#download-page\nX_FRAME_OPTIONS = 'ALLOW-FROM http://webvisor.com'\n\n# Application definition\nINSTALLED_APPS = [\n # https://docs.djangoproject.com/en/1.9/ref/contrib/admin/#django.contrib.admin.autodiscover\n 'django.contrib.contenttypes',\n 'django.contrib.auth',\n 'django.contrib.messages',\n 'django.contrib.redirects',\n 'django.contrib.sessions',\n 'django.contrib.sitemaps',\n 'django.contrib.sites',\n 'django.contrib.staticfiles',\n 'django.contrib.humanize',\n 'django_user_agents',\n 'generic_admin',\n 'django.contrib.admin.apps.SimpleAdminConfig',\n 'debug_toolbar',\n 'mptt',\n 'widget_tweaks',\n 'sorl.thumbnail',\n 'images',\n 'pages',\n 'catalog',\n 'ecommerce',\n 'shopelectro',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.redirects.middleware.RedirectFallbackMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_user_agents.middleware.UserAgentMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n]\n\nROOT_URLCONF = 'shopelectro.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.media',\n 'django.template.context_processors.request',\n 'django.template.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'ecommerce.context_processors.cart',\n 'shopelectro.context_processors.shop',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'shopelectro.wsgi.application'\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLOCALE_NAME = 'en_US'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, 'shopelectro/locale')]\nFORMAT_MODULE_PATH = [\n 'shopelectro.formats',\n]\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nASSETS_DIR = os.path.join(BASE_DIR, 'assets')\n\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\nSTATICFILES_DIRS = [\n os.path.join(BASE_DIR, 'front/build'),\n ASSETS_DIR,\n]\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# It is fake-url. Correct url will be created on `docker-compose up` stage from `docker/.env`\nDATABASE_URL = 'postgres://user:pass@db_name/table'\nDATABASES = {\n 'default': dj_database_url.config(\n env='DATABASE_URL',\n default=DATABASE_URL,\n )\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nSITE_CREATED = datetime(2013, 1, 1)\n\nLOCALHOST = 'http://127.0.0.1:8000/'\nBASE_URL = 'https://www.shopelectro.ru'\n\nPLACEHOLDER_IMAGE = 'images/logo.png'\nPLACEHOLDER_ALT = '\u041b\u043e\u0433\u043e\u0442\u0438\u043f \u043a\u043e\u043c\u043f\u0430\u043d\u0438\u0438 Shopelectro'\n\n# Autocomplete and search settings\nSEARCH_SEE_ALL_LABEL = '\u0421\u043c\u043e\u0442\u0440\u0435\u0442\u044c \u0432\u0441\u0435 \u0440\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b'\n\n# For sitemaps and sites framework\nSITE_ID = 1\nSITE_DOMAIN_NAME = 'www.shopelectro.ru'\n\n# Used to retrieve instances in ecommerce.Cart\nCART_ID = 'cart'\n\n# Used to define choices attr in definition of Order.payment_type field\nPAYMENT_OPTIONS = (\n ('cash', '\u041d\u0430\u043b\u0438\u0447\u043d\u044b\u0435'),\n ('cashless', '\u0411\u0435\u0437\u043d\u0430\u043b\u0438\u0447\u043d\u044b\u0435 \u0438 \u0434\u0435\u043d\u0435\u0436\u043d\u044b\u0435 \u043f\u0435\u0440\u0435\u0432\u043e\u0434\u044b'),\n ('AC', '\u0411\u0430\u043d\u043a\u043e\u0432\u0441\u043a\u0430\u044f \u043a\u0430\u0440\u0442\u0430'),\n ('PC', '\u042f\u043d\u0434\u0435\u043a\u0441.\u0414\u0435\u043d\u044c\u0433\u0438'),\n ('GP', '\u0421\u0432\u044f\u0437\u043d\u043e\u0439 (\u0442\u0435\u0440\u043c\u0438\u043d\u0430\u043b)'),\n ('AB', '\u0410\u043b\u044c\u0444\u0430-\u041a\u043b\u0438\u043a'),\n)\n\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nYANDEX_SHOP_PASS = os.environ.get('YANDEX_SHOP_PASS', 'so_secret_pass')\n\n# Used for order's email in ecommerce app\nFAKE_ORDER_NUMBER = 6000\n\n# Subjects for different types of emails sent from SE.\nEMAIL_SUBJECTS = {\n 'call': '\u041e\u0431\u0440\u0430\u0442\u043d\u044b\u0439 \u0437\u0432\u043e\u043d\u043e\u043a',\n 'order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number}',\n 'yandex_order': '\u0417\u0430\u043a\u0430\u0437 \u2116{0.fake_order_number} | \u042f\u043d\u0434\u0435\u043a\u0441.\u041a\u0430\u0441\u0441\u0430',\n 'one_click': '\u0417\u0430\u043a\u0430\u0437 \u0432 \u043e\u0434\u0438\u043d \u043a\u043b\u0438\u043a \u2116{0.fake_order_number}',\n 'ya_feedback_request': '\u041e\u0446\u0435\u043d\u0438\u0442\u0435 \u043d\u0430\u0441 \u043d\u0430 \u042f\u043d\u0434\u0435\u043a\u0441.\u041c\u0430\u0440\u043a\u0435\u0442\u0435',\n}\n\n# Email configs\n# It is fake-pass. Correct pass will be created on `docker-compose up` stage from `docker/.env`\nEMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD', 'so_secret_pass')\nEMAIL_HOST_USER = '[email protected]'\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.yandex.ru'\nEMAIL_PORT = 587\nEMAIL_SENDER = '[email protected]'\nEMAIL_RECIPIENT = '[email protected]'\nSHOP_EMAIL = '[email protected]'\n\n# FTP configs\nFTP_USER = os.environ.get('FTP_USER', 'user')\nFTP_PASS = os.environ.get('FTP_PASS', 'pass')\nFTP_IP = os.environ.get('FTP_IP', '0.0.0.0')\n\n# Used in admin image uploads\nMODEL_TYPES = {\n 'Product': {\n 'app_name': 'shopelectro',\n 'dir_name': 'products',\n },\n 'Category': {\n 'app_name': 'shopelectro',\n 'dir_name': 'categories',\n }\n}\n\n# This need for using {% debug %} variable in templates.\nINTERNAL_IPS = (\n '127.0.0.1',\n)\n\nTOP_PRODUCTS = [291, 438, 1137, 2166, 2725, 2838, 3288, 3884, 3959, 2764]\n\nSHOP = {\n 'id': '69886',\n 'scid': '64788',\n 'success_url': BASE_URL + '/shop/order-success/',\n 'fail_url': BASE_URL + '/',\n 'cps_phone': '+78124163200',\n 'cps_email': '[email protected]',\n 'local_delivery_cost': 300,\n 'local_delivery_cost_threshold': 5000,\n}\n\n# used in data-migrations and tests\nCUSTOM_PAGES = {\n 'index': {\n 'slug': '',\n 'name': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u044d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \"ShopElectro\"',\n 'menu_title': '\u0413\u043b\u0430\u0432\u043d\u0430\u044f',\n 'title': '\u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d \u042d\u043b\u0435\u043c\u0435\u043d\u0442\u043e\u0432 \u043f\u0438\u0442\u0430\u043d\u0438\u044f \u0441 \u0434\u043e\u0441\u0442\u0430\u0432\u043a\u043e\u0439 \u043f\u043e \u0420\u043e\u0441\u0441\u0438\u0438',\n },\n 'sitemap': {\n 'slug': 'sitemap',\n 'h1': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n 'name': '\u041a\u0430\u0440\u0442\u0430 \u0441\u0430\u0439\u0442\u0430',\n },\n 'order': {\n 'slug': 'order',\n 'name': '\u041e\u0444\u043e\u0440\u043c\u043b\u0435\u043d\u0438\u0435 \u0437\u0430\u043a\u0430\u0437\u0430',\n 'title': '\u041a\u043e\u0440\u0437\u0438\u043d\u0430 \u0418\u043d\u0442\u0435\u0440\u043d\u0435\u0442-\u043c\u0430\u0433\u0430\u0437\u0438\u043d shopelectro.ru \u0421\u0430\u043d\u043a\u0442-\u041f\u0435\u0442\u0435\u0440\u0431\u0443\u0440\u0433',\n },\n 'search': {\n 'slug': 'search',\n 'name': '\u0420\u0435\u0437\u0443\u043b\u044c\u0442\u0430\u0442\u044b \u043f\u043e\u0438\u0441\u043a\u0430',\n },\n 'catalog': {\n 'slug': 'catalog',\n 'name': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433 \u0442\u043e\u0432\u0430\u0440\u043e\u0432',\n 'menu_title': '\u041a\u0430\u0442\u0430\u043b\u043e\u0433',\n },\n 'order_success': {\n 'slug': 'order-success',\n 'name': '\u0417\u0430\u043a\u0430\u0437 \u043f\u0440\u0438\u043d\u044f\u0442',\n }\n}\n", "path": "shopelectro/settings/base.py"}]} | 3,552 | 136 |
gh_patches_debug_10792 | rasdani/github-patches | git_diff | AnalogJ__lexicon-1100 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
transip provider needs requests>=2.27.0 in dependency spec
The update of the transip provider from #1086 included in 3.9.2 uses `from requests.exceptions import JSONDecodeError` which was first included in requests 2.27.0 but the requirements spec only requires `>=2`:
https://github.com/AnalogJ/lexicon/blob/d8b26bcfffd484f34444c91ceb3ced4a2583bd18/pyproject.toml#L43
This is passed directly through to the requirement spec in the wheel:
```
Requires-Dist: requests (>=2)
```
And means that someone who upgrades dns-lexicon from a previous version using pip will get an import error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lexicon/providers/transip.py`
Content:
```
1 """Module provider for TransIP"""
2 import binascii
3 import json
4 import logging
5 import uuid
6 from base64 import b64decode, b64encode
7 from typing import Any, Dict, List, Optional
8
9 import requests
10 from cryptography.hazmat.backends import default_backend
11 from cryptography.hazmat.primitives import hashes
12 from cryptography.hazmat.primitives.asymmetric import padding
13 from cryptography.hazmat.primitives.serialization import load_pem_private_key
14 from requests.exceptions import JSONDecodeError
15
16 from lexicon.exceptions import LexiconError
17 from lexicon.providers.base import Provider as BaseProvider
18
19 LOGGER = logging.getLogger(__name__)
20
21 NAMESERVER_DOMAINS: List[str] = []
22
23 API_BASE_URL = "https://api.transip.nl/v6"
24
25
26 def provider_parser(subparser):
27 """Configure provider parser for TransIP"""
28 subparser.add_argument(
29 "--auth-username", help="specify username for authentication"
30 )
31 subparser.add_argument(
32 "--auth-api-key",
33 help="specify the private key to use for API authentication, in PEM format: can be either "
34 "the path of the key file (eg. /tmp/key.pem) or the base64 encoded content of this "
35 "file prefixed by 'base64::' (eg. base64::eyJhbGciOyJ...)",
36 )
37 subparser.add_argument(
38 "--auth-key-is-global",
39 action="store_true",
40 help="set this flag is the private key used is a global key with no IP whitelist restriction",
41 )
42
43
44 class Provider(BaseProvider):
45 """
46 Provider class for TransIP
47
48 provider_options can be overwritten by a Provider to setup custom defaults.
49 They will be overwritten by any options set via the CLI or Env.
50 order is:
51
52 """
53
54 def __init__(self, config):
55 super(Provider, self).__init__(config)
56 self.provider_name = "transip"
57 self.domain_id = None
58
59 private_key_conf = self._get_provider_option("auth_api_key")
60 if private_key_conf.startswith("base64::"):
61 private_key_bytes = b64decode(private_key_conf.replace("base64::", ""))
62 else:
63 with open(
64 private_key_conf,
65 "rb",
66 ) as file:
67 private_key_bytes = file.read()
68
69 self.private_key = load_pem_private_key(private_key_bytes, password=None)
70 self.token: str
71
72 def _authenticate(self):
73 request_body = {
74 "login": self._get_provider_option("auth_username"),
75 "nonce": uuid.uuid4().hex,
76 "global_key": self._get_provider_option("auth_key_is_global") or False,
77 }
78
79 request_body_bytes = json.dumps(request_body).encode()
80
81 signature = self.private_key.sign(
82 request_body_bytes,
83 padding.PKCS1v15(),
84 hashes.SHA512(),
85 )
86
87 headers = {"Signature": b64encode(signature).decode()}
88
89 response = requests.request(
90 "POST", f"{API_BASE_URL}/auth", json=request_body, headers=headers
91 )
92 response.raise_for_status()
93
94 self.token = response.json()["token"]
95
96 data = self._get(f"/domains/{self.domain}")
97
98 self.domain_id = data["domain"]["authCode"]
99
100 def _create_record(self, rtype: str, name: str, content: str) -> bool:
101 if not rtype or not name or not content:
102 raise Exception(
103 "Error, rtype, name and content are mandatory to create a record."
104 )
105
106 identifier = Provider._identifier(
107 {"type": rtype, "name": self._full_name(name), "content": content}
108 )
109
110 if any(
111 record
112 for record in self._list_records(rtype=rtype, name=name, content=content)
113 if record["id"] == identifier
114 ):
115 LOGGER.debug("create_record (ignored, duplicate): %s", identifier)
116 return True
117
118 data = {
119 "dnsEntry": {
120 "type": rtype,
121 "name": self._relative_name(name),
122 "content": content,
123 "expire": self._get_lexicon_option("ttl"),
124 },
125 }
126
127 self._post(f"/domains/{self.domain}/dns", data=data)
128
129 LOGGER.debug("create_record: %s", identifier)
130
131 return True
132
133 def _list_records(
134 self,
135 rtype: Optional[str] = None,
136 name: Optional[str] = None,
137 content: Optional[str] = None,
138 ) -> List[Dict[str, Any]]:
139 data = self._get(f"/domains/{self.domain}/dns")
140
141 records = []
142 for entry in data["dnsEntries"]:
143 record = {
144 "type": entry["type"],
145 "name": self._full_name(entry["name"]),
146 "ttl": entry["expire"],
147 "content": entry["content"],
148 }
149 record["id"] = Provider._identifier(record)
150 records.append(record)
151
152 if rtype:
153 records = [record for record in records if record["type"] == rtype]
154 if name:
155 records = [
156 record for record in records if record["name"] == self._full_name(name)
157 ]
158 if content:
159 records = [record for record in records if record["content"] == content]
160
161 LOGGER.debug("list_records: %s", records)
162
163 return records
164
165 def _update_record(
166 self,
167 identifier: Optional[str] = None,
168 rtype: Optional[str] = None,
169 name: Optional[str] = None,
170 content: Optional[str] = None,
171 ) -> bool:
172 if not identifier and (not rtype or not name):
173 raise Exception("Error, identifier or rtype+name parameters are required.")
174
175 if identifier:
176 records = self._list_records()
177 records_to_update = [
178 record for record in records if record["id"] == identifier
179 ]
180 else:
181 records_to_update = self._list_records(rtype=rtype, name=name)
182
183 if not records_to_update:
184 raise Exception(
185 f"Error, could not find a record for given identifier: {identifier}"
186 )
187
188 if len(records_to_update) > 1:
189 LOGGER.warning(
190 "Warning, multiple records found for given parameters, "
191 "only first one will be updated: %s",
192 records_to_update,
193 )
194
195 record = records_to_update[0]
196
197 # TransIP API is not designed to update one record out of several records
198 # matching the same type+name (eg. multi-valued TXT entries).
199 # To circumvent the limitation, we remove first the record to update, then
200 # recreate it with the updated content.
201
202 data = {
203 "dnsEntry": {
204 "type": record["type"],
205 "name": self._relative_name(record["name"]),
206 "content": record["content"],
207 "expire": record["ttl"],
208 },
209 }
210
211 self._request("DELETE", f"/domains/{self.domain}/dns", data=data)
212
213 data["dnsEntry"]["content"] = content
214
215 self._post(f"/domains/{self.domain}/dns", data=data)
216
217 LOGGER.debug("update_record: %s", record["id"])
218
219 return True
220
221 def _delete_record(
222 self,
223 identifier: Optional[str] = None,
224 rtype: Optional[str] = None,
225 name: Optional[str] = None,
226 content: Optional[str] = None,
227 ) -> bool:
228 if identifier:
229 records = self._list_records()
230 records = [record for record in records if record["id"] == identifier]
231
232 if not records:
233 raise LexiconError(
234 f"Could not find a record matching the identifier provider: {identifier}"
235 )
236 else:
237 records = self._list_records(rtype, name, content)
238
239 for record in records:
240 data = {
241 "dnsEntry": {
242 "type": record["type"],
243 "name": self._relative_name(record["name"]),
244 "content": record["content"],
245 "expire": record["ttl"],
246 },
247 }
248
249 self._request("DELETE", f"/domains/{self.domain}/dns", data=data)
250
251 LOGGER.debug("delete_records: %s %s %s %s", identifier, rtype, name, content)
252
253 return True
254
255 def _request(
256 self,
257 action: str = "GET",
258 url: str = "/",
259 data: Optional[Dict] = None,
260 query_params: Optional[Dict] = None,
261 ) -> Optional[Dict[str, Any]]:
262 response = requests.request(
263 action,
264 f"{API_BASE_URL}{url}",
265 params=query_params,
266 json=data,
267 headers={"Authorization": f"Bearer {self.token}"},
268 )
269
270 response.raise_for_status()
271
272 try:
273 return response.json()
274 except JSONDecodeError:
275 return None
276
277 @staticmethod
278 def _identifier(record):
279 digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
280 digest.update(("type=" + record.get("type", "") + ",").encode("utf-8"))
281 digest.update(("name=" + record.get("name", "") + ",").encode("utf-8"))
282 digest.update(("content=" + record.get("content", "") + ",").encode("utf-8"))
283
284 return binascii.hexlify(digest.finalize()).decode("utf-8")[0:7]
285
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lexicon/providers/transip.py b/lexicon/providers/transip.py
--- a/lexicon/providers/transip.py
+++ b/lexicon/providers/transip.py
@@ -11,7 +11,11 @@
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
-from requests.exceptions import JSONDecodeError
+
+try:
+ from simplejson import JSONDecodeError
+except ImportError:
+ from json import JSONDecodeError # type: ignore[misc]
from lexicon.exceptions import LexiconError
from lexicon.providers.base import Provider as BaseProvider
| {"golden_diff": "diff --git a/lexicon/providers/transip.py b/lexicon/providers/transip.py\n--- a/lexicon/providers/transip.py\n+++ b/lexicon/providers/transip.py\n@@ -11,7 +11,11 @@\n from cryptography.hazmat.primitives import hashes\n from cryptography.hazmat.primitives.asymmetric import padding\n from cryptography.hazmat.primitives.serialization import load_pem_private_key\n-from requests.exceptions import JSONDecodeError\n+\n+try:\n+ from simplejson import JSONDecodeError\n+except ImportError:\n+ from json import JSONDecodeError # type: ignore[misc]\n \n from lexicon.exceptions import LexiconError\n from lexicon.providers.base import Provider as BaseProvider\n", "issue": "transip provider needs requests>=2.27.0 in dependency spec\nThe update of the transip provider from #1086 included in 3.9.2 uses `from requests.exceptions import JSONDecodeError` which was first included in requests 2.27.0 but the requirements spec only requires `>=2`:\r\nhttps://github.com/AnalogJ/lexicon/blob/d8b26bcfffd484f34444c91ceb3ced4a2583bd18/pyproject.toml#L43\r\n\r\nThis is passed directly through to the requirement spec in the wheel:\r\n```\r\nRequires-Dist: requests (>=2)\r\n```\r\n\r\nAnd means that someone who upgrades dns-lexicon from a previous version using pip will get an import error.\n", "before_files": [{"content": "\"\"\"Module provider for TransIP\"\"\"\nimport binascii\nimport json\nimport logging\nimport uuid\nfrom base64 import b64decode, b64encode\nfrom typing import Any, Dict, List, Optional\n\nimport requests\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives.serialization import load_pem_private_key\nfrom requests.exceptions import JSONDecodeError\n\nfrom lexicon.exceptions import LexiconError\nfrom lexicon.providers.base import Provider as BaseProvider\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS: List[str] = []\n\nAPI_BASE_URL = \"https://api.transip.nl/v6\"\n\n\ndef provider_parser(subparser):\n \"\"\"Configure provider parser for TransIP\"\"\"\n subparser.add_argument(\n \"--auth-username\", help=\"specify username for authentication\"\n )\n subparser.add_argument(\n \"--auth-api-key\",\n help=\"specify the private key to use for API authentication, in PEM format: can be either \"\n \"the path of the key file (eg. /tmp/key.pem) or the base64 encoded content of this \"\n \"file prefixed by 'base64::' (eg. base64::eyJhbGciOyJ...)\",\n )\n subparser.add_argument(\n \"--auth-key-is-global\",\n action=\"store_true\",\n help=\"set this flag is the private key used is a global key with no IP whitelist restriction\",\n )\n\n\nclass Provider(BaseProvider):\n \"\"\"\n Provider class for TransIP\n\n provider_options can be overwritten by a Provider to setup custom defaults.\n They will be overwritten by any options set via the CLI or Env.\n order is:\n\n \"\"\"\n\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.provider_name = \"transip\"\n self.domain_id = None\n\n private_key_conf = self._get_provider_option(\"auth_api_key\")\n if private_key_conf.startswith(\"base64::\"):\n private_key_bytes = b64decode(private_key_conf.replace(\"base64::\", \"\"))\n else:\n with open(\n private_key_conf,\n \"rb\",\n ) as file:\n private_key_bytes = file.read()\n\n self.private_key = load_pem_private_key(private_key_bytes, password=None)\n self.token: str\n\n def _authenticate(self):\n request_body = {\n \"login\": self._get_provider_option(\"auth_username\"),\n \"nonce\": uuid.uuid4().hex,\n \"global_key\": self._get_provider_option(\"auth_key_is_global\") or False,\n }\n\n request_body_bytes = json.dumps(request_body).encode()\n\n signature = self.private_key.sign(\n request_body_bytes,\n padding.PKCS1v15(),\n hashes.SHA512(),\n )\n\n headers = {\"Signature\": b64encode(signature).decode()}\n\n response = requests.request(\n \"POST\", f\"{API_BASE_URL}/auth\", json=request_body, headers=headers\n )\n response.raise_for_status()\n\n self.token = response.json()[\"token\"]\n\n data = self._get(f\"/domains/{self.domain}\")\n\n self.domain_id = data[\"domain\"][\"authCode\"]\n\n def _create_record(self, rtype: str, name: str, content: str) -> bool:\n if not rtype or not name or not content:\n raise Exception(\n \"Error, rtype, name and content are mandatory to create a record.\"\n )\n\n identifier = Provider._identifier(\n {\"type\": rtype, \"name\": self._full_name(name), \"content\": content}\n )\n\n if any(\n record\n for record in self._list_records(rtype=rtype, name=name, content=content)\n if record[\"id\"] == identifier\n ):\n LOGGER.debug(\"create_record (ignored, duplicate): %s\", identifier)\n return True\n\n data = {\n \"dnsEntry\": {\n \"type\": rtype,\n \"name\": self._relative_name(name),\n \"content\": content,\n \"expire\": self._get_lexicon_option(\"ttl\"),\n },\n }\n\n self._post(f\"/domains/{self.domain}/dns\", data=data)\n\n LOGGER.debug(\"create_record: %s\", identifier)\n\n return True\n\n def _list_records(\n self,\n rtype: Optional[str] = None,\n name: Optional[str] = None,\n content: Optional[str] = None,\n ) -> List[Dict[str, Any]]:\n data = self._get(f\"/domains/{self.domain}/dns\")\n\n records = []\n for entry in data[\"dnsEntries\"]:\n record = {\n \"type\": entry[\"type\"],\n \"name\": self._full_name(entry[\"name\"]),\n \"ttl\": entry[\"expire\"],\n \"content\": entry[\"content\"],\n }\n record[\"id\"] = Provider._identifier(record)\n records.append(record)\n\n if rtype:\n records = [record for record in records if record[\"type\"] == rtype]\n if name:\n records = [\n record for record in records if record[\"name\"] == self._full_name(name)\n ]\n if content:\n records = [record for record in records if record[\"content\"] == content]\n\n LOGGER.debug(\"list_records: %s\", records)\n\n return records\n\n def _update_record(\n self,\n identifier: Optional[str] = None,\n rtype: Optional[str] = None,\n name: Optional[str] = None,\n content: Optional[str] = None,\n ) -> bool:\n if not identifier and (not rtype or not name):\n raise Exception(\"Error, identifier or rtype+name parameters are required.\")\n\n if identifier:\n records = self._list_records()\n records_to_update = [\n record for record in records if record[\"id\"] == identifier\n ]\n else:\n records_to_update = self._list_records(rtype=rtype, name=name)\n\n if not records_to_update:\n raise Exception(\n f\"Error, could not find a record for given identifier: {identifier}\"\n )\n\n if len(records_to_update) > 1:\n LOGGER.warning(\n \"Warning, multiple records found for given parameters, \"\n \"only first one will be updated: %s\",\n records_to_update,\n )\n\n record = records_to_update[0]\n\n # TransIP API is not designed to update one record out of several records\n # matching the same type+name (eg. multi-valued TXT entries).\n # To circumvent the limitation, we remove first the record to update, then\n # recreate it with the updated content.\n\n data = {\n \"dnsEntry\": {\n \"type\": record[\"type\"],\n \"name\": self._relative_name(record[\"name\"]),\n \"content\": record[\"content\"],\n \"expire\": record[\"ttl\"],\n },\n }\n\n self._request(\"DELETE\", f\"/domains/{self.domain}/dns\", data=data)\n\n data[\"dnsEntry\"][\"content\"] = content\n\n self._post(f\"/domains/{self.domain}/dns\", data=data)\n\n LOGGER.debug(\"update_record: %s\", record[\"id\"])\n\n return True\n\n def _delete_record(\n self,\n identifier: Optional[str] = None,\n rtype: Optional[str] = None,\n name: Optional[str] = None,\n content: Optional[str] = None,\n ) -> bool:\n if identifier:\n records = self._list_records()\n records = [record for record in records if record[\"id\"] == identifier]\n\n if not records:\n raise LexiconError(\n f\"Could not find a record matching the identifier provider: {identifier}\"\n )\n else:\n records = self._list_records(rtype, name, content)\n\n for record in records:\n data = {\n \"dnsEntry\": {\n \"type\": record[\"type\"],\n \"name\": self._relative_name(record[\"name\"]),\n \"content\": record[\"content\"],\n \"expire\": record[\"ttl\"],\n },\n }\n\n self._request(\"DELETE\", f\"/domains/{self.domain}/dns\", data=data)\n\n LOGGER.debug(\"delete_records: %s %s %s %s\", identifier, rtype, name, content)\n\n return True\n\n def _request(\n self,\n action: str = \"GET\",\n url: str = \"/\",\n data: Optional[Dict] = None,\n query_params: Optional[Dict] = None,\n ) -> Optional[Dict[str, Any]]:\n response = requests.request(\n action,\n f\"{API_BASE_URL}{url}\",\n params=query_params,\n json=data,\n headers={\"Authorization\": f\"Bearer {self.token}\"},\n )\n\n response.raise_for_status()\n\n try:\n return response.json()\n except JSONDecodeError:\n return None\n\n @staticmethod\n def _identifier(record):\n digest = hashes.Hash(hashes.SHA256(), backend=default_backend())\n digest.update((\"type=\" + record.get(\"type\", \"\") + \",\").encode(\"utf-8\"))\n digest.update((\"name=\" + record.get(\"name\", \"\") + \",\").encode(\"utf-8\"))\n digest.update((\"content=\" + record.get(\"content\", \"\") + \",\").encode(\"utf-8\"))\n\n return binascii.hexlify(digest.finalize()).decode(\"utf-8\")[0:7]\n", "path": "lexicon/providers/transip.py"}], "after_files": [{"content": "\"\"\"Module provider for TransIP\"\"\"\nimport binascii\nimport json\nimport logging\nimport uuid\nfrom base64 import b64decode, b64encode\nfrom typing import Any, Dict, List, Optional\n\nimport requests\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives.serialization import load_pem_private_key\n\ntry:\n from simplejson import JSONDecodeError\nexcept ImportError:\n from json import JSONDecodeError # type: ignore[misc]\n\nfrom lexicon.exceptions import LexiconError\nfrom lexicon.providers.base import Provider as BaseProvider\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS: List[str] = []\n\nAPI_BASE_URL = \"https://api.transip.nl/v6\"\n\n\ndef provider_parser(subparser):\n \"\"\"Configure provider parser for TransIP\"\"\"\n subparser.add_argument(\n \"--auth-username\", help=\"specify username for authentication\"\n )\n subparser.add_argument(\n \"--auth-api-key\",\n help=\"specify the private key to use for API authentication, in PEM format: can be either \"\n \"the path of the key file (eg. /tmp/key.pem) or the base64 encoded content of this \"\n \"file prefixed by 'base64::' (eg. base64::eyJhbGciOyJ...)\",\n )\n subparser.add_argument(\n \"--auth-key-is-global\",\n action=\"store_true\",\n help=\"set this flag is the private key used is a global key with no IP whitelist restriction\",\n )\n\n\nclass Provider(BaseProvider):\n \"\"\"\n Provider class for TransIP\n\n provider_options can be overwritten by a Provider to setup custom defaults.\n They will be overwritten by any options set via the CLI or Env.\n order is:\n\n \"\"\"\n\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.provider_name = \"transip\"\n self.domain_id = None\n\n private_key_conf = self._get_provider_option(\"auth_api_key\")\n if private_key_conf.startswith(\"base64::\"):\n private_key_bytes = b64decode(private_key_conf.replace(\"base64::\", \"\"))\n else:\n with open(\n private_key_conf,\n \"rb\",\n ) as file:\n private_key_bytes = file.read()\n\n self.private_key = load_pem_private_key(private_key_bytes, password=None)\n self.token: str\n\n def _authenticate(self):\n request_body = {\n \"login\": self._get_provider_option(\"auth_username\"),\n \"nonce\": uuid.uuid4().hex,\n \"global_key\": self._get_provider_option(\"auth_key_is_global\") or False,\n }\n\n request_body_bytes = json.dumps(request_body).encode()\n\n signature = self.private_key.sign(\n request_body_bytes,\n padding.PKCS1v15(),\n hashes.SHA512(),\n )\n\n headers = {\"Signature\": b64encode(signature).decode()}\n\n response = requests.request(\n \"POST\", f\"{API_BASE_URL}/auth\", json=request_body, headers=headers\n )\n response.raise_for_status()\n\n self.token = response.json()[\"token\"]\n\n data = self._get(f\"/domains/{self.domain}\")\n\n self.domain_id = data[\"domain\"][\"authCode\"]\n\n def _create_record(self, rtype: str, name: str, content: str) -> bool:\n if not rtype or not name or not content:\n raise Exception(\n \"Error, rtype, name and content are mandatory to create a record.\"\n )\n\n identifier = Provider._identifier(\n {\"type\": rtype, \"name\": self._full_name(name), \"content\": content}\n )\n\n if any(\n record\n for record in self._list_records(rtype=rtype, name=name, content=content)\n if record[\"id\"] == identifier\n ):\n LOGGER.debug(\"create_record (ignored, duplicate): %s\", identifier)\n return True\n\n data = {\n \"dnsEntry\": {\n \"type\": rtype,\n \"name\": self._relative_name(name),\n \"content\": content,\n \"expire\": self._get_lexicon_option(\"ttl\"),\n },\n }\n\n self._post(f\"/domains/{self.domain}/dns\", data=data)\n\n LOGGER.debug(\"create_record: %s\", identifier)\n\n return True\n\n def _list_records(\n self,\n rtype: Optional[str] = None,\n name: Optional[str] = None,\n content: Optional[str] = None,\n ) -> List[Dict[str, Any]]:\n data = self._get(f\"/domains/{self.domain}/dns\")\n\n records = []\n for entry in data[\"dnsEntries\"]:\n record = {\n \"type\": entry[\"type\"],\n \"name\": self._full_name(entry[\"name\"]),\n \"ttl\": entry[\"expire\"],\n \"content\": entry[\"content\"],\n }\n record[\"id\"] = Provider._identifier(record)\n records.append(record)\n\n if rtype:\n records = [record for record in records if record[\"type\"] == rtype]\n if name:\n records = [\n record for record in records if record[\"name\"] == self._full_name(name)\n ]\n if content:\n records = [record for record in records if record[\"content\"] == content]\n\n LOGGER.debug(\"list_records: %s\", records)\n\n return records\n\n def _update_record(\n self,\n identifier: Optional[str] = None,\n rtype: Optional[str] = None,\n name: Optional[str] = None,\n content: Optional[str] = None,\n ) -> bool:\n if not identifier and (not rtype or not name):\n raise Exception(\"Error, identifier or rtype+name parameters are required.\")\n\n if identifier:\n records = self._list_records()\n records_to_update = [\n record for record in records if record[\"id\"] == identifier\n ]\n else:\n records_to_update = self._list_records(rtype=rtype, name=name)\n\n if not records_to_update:\n raise Exception(\n f\"Error, could not find a record for given identifier: {identifier}\"\n )\n\n if len(records_to_update) > 1:\n LOGGER.warning(\n \"Warning, multiple records found for given parameters, \"\n \"only first one will be updated: %s\",\n records_to_update,\n )\n\n record = records_to_update[0]\n\n # TransIP API is not designed to update one record out of several records\n # matching the same type+name (eg. multi-valued TXT entries).\n # To circumvent the limitation, we remove first the record to update, then\n # recreate it with the updated content.\n\n data = {\n \"dnsEntry\": {\n \"type\": record[\"type\"],\n \"name\": self._relative_name(record[\"name\"]),\n \"content\": record[\"content\"],\n \"expire\": record[\"ttl\"],\n },\n }\n\n self._request(\"DELETE\", f\"/domains/{self.domain}/dns\", data=data)\n\n data[\"dnsEntry\"][\"content\"] = content\n\n self._post(f\"/domains/{self.domain}/dns\", data=data)\n\n LOGGER.debug(\"update_record: %s\", record[\"id\"])\n\n return True\n\n def _delete_record(\n self,\n identifier: Optional[str] = None,\n rtype: Optional[str] = None,\n name: Optional[str] = None,\n content: Optional[str] = None,\n ) -> bool:\n if identifier:\n records = self._list_records()\n records = [record for record in records if record[\"id\"] == identifier]\n\n if not records:\n raise LexiconError(\n f\"Could not find a record matching the identifier provider: {identifier}\"\n )\n else:\n records = self._list_records(rtype, name, content)\n\n for record in records:\n data = {\n \"dnsEntry\": {\n \"type\": record[\"type\"],\n \"name\": self._relative_name(record[\"name\"]),\n \"content\": record[\"content\"],\n \"expire\": record[\"ttl\"],\n },\n }\n\n self._request(\"DELETE\", f\"/domains/{self.domain}/dns\", data=data)\n\n LOGGER.debug(\"delete_records: %s %s %s %s\", identifier, rtype, name, content)\n\n return True\n\n def _request(\n self,\n action: str = \"GET\",\n url: str = \"/\",\n data: Optional[Dict] = None,\n query_params: Optional[Dict] = None,\n ) -> Optional[Dict[str, Any]]:\n response = requests.request(\n action,\n f\"{API_BASE_URL}{url}\",\n params=query_params,\n json=data,\n headers={\"Authorization\": f\"Bearer {self.token}\"},\n )\n\n response.raise_for_status()\n\n try:\n return response.json()\n except JSONDecodeError:\n return None\n\n @staticmethod\n def _identifier(record):\n digest = hashes.Hash(hashes.SHA256(), backend=default_backend())\n digest.update((\"type=\" + record.get(\"type\", \"\") + \",\").encode(\"utf-8\"))\n digest.update((\"name=\" + record.get(\"name\", \"\") + \",\").encode(\"utf-8\"))\n digest.update((\"content=\" + record.get(\"content\", \"\") + \",\").encode(\"utf-8\"))\n\n return binascii.hexlify(digest.finalize()).decode(\"utf-8\")[0:7]\n", "path": "lexicon/providers/transip.py"}]} | 3,231 | 153 |
gh_patches_debug_33279 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-29682 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
One seemingly random "Unsupported URL", while others works just fine
## Checklist
- [x] I'm reporting a broken site support
- [x] I've verified that I'm running youtube-dl version **2021.06.06**
- [x] I've checked that all provided URLs are alive and playable in a browser
- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped
- [x] I've searched the bugtracker for similar issues including closed ones
## Verbose log
```
youtube-dl -v -F https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['-v', '-F', 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/']
[debug] Encodings: locale cp65001, fs mbcs, out cp65001, pref cp65001
[debug] youtube-dl version 2021.06.06
[debug] Python version 3.4.4 (CPython) - Windows-10-10.0.19041
[debug] exe versions: ffmpeg 4.2.3, ffprobe git-2019-12-17-bd83191
[debug] Proxy map: {}
[generic] 9999a34: Requesting header
WARNING: Falling back on generic information extractor.
[generic] 9999a34: Downloading webpage
[generic] 9999a34: Extracting information
ERROR: Unsupported URL: https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/
Traceback (most recent call last):
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\YoutubeDL.py", line 815, in wrapper
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\YoutubeDL.py", line 836, in __extract_info
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\extractor\common.py", line 534, in extract
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\ytdl-org\tmpkqxnwl31\build\youtube_dl\extractor\generic.py", line 3520, in _real_extract
youtube_dl.utils.UnsupportedError: Unsupported URL: https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/
```
## Description
I'm getting " Unsupported URL" for this single link. Every other url I've tried from the nhk site (https://www3.nhk.or.jp/nhkworld/), formatted exactly like this one, downloads just fine.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/nhk.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import urljoin
7
8
9 class NhkBaseIE(InfoExtractor):
10 _API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sod%slist/v7a/%s/%s/%s/all%s.json'
11 _BASE_URL_REGEX = r'https?://www3\.nhk\.or\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand'
12 _TYPE_REGEX = r'/(?P<type>video|audio)/'
13
14 def _call_api(self, m_id, lang, is_video, is_episode, is_clip):
15 return self._download_json(
16 self._API_URL_TEMPLATE % (
17 'v' if is_video else 'r',
18 'clip' if is_clip else 'esd',
19 'episode' if is_episode else 'program',
20 m_id, lang, '/all' if is_video else ''),
21 m_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'] or []
22
23 def _extract_episode_info(self, url, episode=None):
24 fetch_episode = episode is None
25 lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()
26 if episode_id.isdigit():
27 episode_id = episode_id[:4] + '-' + episode_id[4:]
28
29 is_video = m_type == 'video'
30 if fetch_episode:
31 episode = self._call_api(
32 episode_id, lang, is_video, True, episode_id[:4] == '9999')[0]
33 title = episode.get('sub_title_clean') or episode['sub_title']
34
35 def get_clean_field(key):
36 return episode.get(key + '_clean') or episode.get(key)
37
38 series = get_clean_field('title')
39
40 thumbnails = []
41 for s, w, h in [('', 640, 360), ('_l', 1280, 720)]:
42 img_path = episode.get('image' + s)
43 if not img_path:
44 continue
45 thumbnails.append({
46 'id': '%dp' % h,
47 'height': h,
48 'width': w,
49 'url': 'https://www3.nhk.or.jp' + img_path,
50 })
51
52 info = {
53 'id': episode_id + '-' + lang,
54 'title': '%s - %s' % (series, title) if series and title else title,
55 'description': get_clean_field('description'),
56 'thumbnails': thumbnails,
57 'series': series,
58 'episode': title,
59 }
60 if is_video:
61 vod_id = episode['vod_id']
62 info.update({
63 '_type': 'url_transparent',
64 'ie_key': 'Piksel',
65 'url': 'https://player.piksel.com/v/refid/nhkworld/prefid/' + vod_id,
66 'id': vod_id,
67 })
68 else:
69 if fetch_episode:
70 audio_path = episode['audio']['audio']
71 info['formats'] = self._extract_m3u8_formats(
72 'https://nhkworld-vh.akamaihd.net/i%s/master.m3u8' % audio_path,
73 episode_id, 'm4a', entry_protocol='m3u8_native',
74 m3u8_id='hls', fatal=False)
75 for f in info['formats']:
76 f['language'] = lang
77 else:
78 info.update({
79 '_type': 'url_transparent',
80 'ie_key': NhkVodIE.ie_key(),
81 'url': url,
82 })
83 return info
84
85
86 class NhkVodIE(NhkBaseIE):
87 _VALID_URL = r'%s%s(?P<id>\d{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
88 # Content available only for a limited period of time. Visit
89 # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
90 _TESTS = [{
91 # video clip
92 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/',
93 'md5': '7a90abcfe610ec22a6bfe15bd46b30ca',
94 'info_dict': {
95 'id': 'a95j5iza',
96 'ext': 'mp4',
97 'title': "Dining with the Chef - Chef Saito's Family recipe: MENCHI-KATSU",
98 'description': 'md5:5aee4a9f9d81c26281862382103b0ea5',
99 'timestamp': 1565965194,
100 'upload_date': '20190816',
101 },
102 }, {
103 # audio clip
104 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/r_inventions-20201104-1/',
105 'info_dict': {
106 'id': 'r_inventions-20201104-1-en',
107 'ext': 'm4a',
108 'title': "Japan's Top Inventions - Miniature Video Cameras",
109 'description': 'md5:07ea722bdbbb4936fdd360b6a480c25b',
110 },
111 'params': {
112 # m3u8 download
113 'skip_download': True,
114 },
115 }, {
116 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/',
117 'only_matching': True,
118 }, {
119 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/plugin-20190404-1/',
120 'only_matching': True,
121 }, {
122 'url': 'https://www3.nhk.or.jp/nhkworld/fr/ondemand/audio/plugin-20190404-1/',
123 'only_matching': True,
124 }, {
125 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',
126 'only_matching': True,
127 }]
128
129 def _real_extract(self, url):
130 return self._extract_episode_info(url)
131
132
133 class NhkVodProgramIE(NhkBaseIE):
134 _VALID_URL = r'%s/program%s(?P<id>[0-9a-z]+)(?:.+?\btype=(?P<episode_type>clip|(?:radio|tv)Episode))?' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
135 _TESTS = [{
136 # video program episodes
137 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway',
138 'info_dict': {
139 'id': 'japanrailway',
140 'title': 'Japan Railway Journal',
141 },
142 'playlist_mincount': 1,
143 }, {
144 # video program clips
145 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway/?type=clip',
146 'info_dict': {
147 'id': 'japanrailway',
148 'title': 'Japan Railway Journal',
149 },
150 'playlist_mincount': 5,
151 }, {
152 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/10yearshayaomiyazaki/',
153 'only_matching': True,
154 }, {
155 # audio program
156 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/audio/listener/',
157 'only_matching': True,
158 }]
159
160 def _real_extract(self, url):
161 lang, m_type, program_id, episode_type = re.match(self._VALID_URL, url).groups()
162
163 episodes = self._call_api(
164 program_id, lang, m_type == 'video', False, episode_type == 'clip')
165
166 entries = []
167 for episode in episodes:
168 episode_path = episode.get('url')
169 if not episode_path:
170 continue
171 entries.append(self._extract_episode_info(
172 urljoin(url, episode_path), episode))
173
174 program_title = None
175 if entries:
176 program_title = entries[0].get('series')
177
178 return self.playlist_result(entries, program_id, program_title)
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/youtube_dl/extractor/nhk.py b/youtube_dl/extractor/nhk.py
--- a/youtube_dl/extractor/nhk.py
+++ b/youtube_dl/extractor/nhk.py
@@ -1,3 +1,4 @@
+# coding: utf-8
from __future__ import unicode_literals
import re
@@ -23,7 +24,7 @@
def _extract_episode_info(self, url, episode=None):
fetch_episode = episode is None
lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()
- if episode_id.isdigit():
+ if len(episode_id) == 7:
episode_id = episode_id[:4] + '-' + episode_id[4:]
is_video = m_type == 'video'
@@ -84,7 +85,8 @@
class NhkVodIE(NhkBaseIE):
- _VALID_URL = r'%s%s(?P<id>\d{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
+ # the 7-character IDs can have alphabetic chars too: assume [a-z] rather than just [a-f], eg
+ _VALID_URL = r'%s%s(?P<id>[0-9a-z]{7}|[^/]+?-\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)
# Content available only for a limited period of time. Visit
# https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.
_TESTS = [{
@@ -124,6 +126,19 @@
}, {
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',
'only_matching': True,
+ }, {
+ # video, alphabetic character in ID #29670
+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/',
+ 'only_matching': True,
+ 'info_dict': {
+ 'id': 'qfjay6cg',
+ 'ext': 'mp4',
+ 'title': 'DESIGN TALKS plus - Fishermen’s Finery',
+ 'description': 'md5:8a8f958aaafb0d7cb59d38de53f1e448',
+ 'thumbnail': r're:^https?:/(/[a-z0-9.-]+)+\.jpg\?w=1920&h=1080$',
+ 'upload_date': '20210615',
+ 'timestamp': 1623722008,
+ }
}]
def _real_extract(self, url):
| {"golden_diff": "diff --git a/youtube_dl/extractor/nhk.py b/youtube_dl/extractor/nhk.py\n--- a/youtube_dl/extractor/nhk.py\n+++ b/youtube_dl/extractor/nhk.py\n@@ -1,3 +1,4 @@\n+# coding: utf-8\n from __future__ import unicode_literals\n \n import re\n@@ -23,7 +24,7 @@\n def _extract_episode_info(self, url, episode=None):\n fetch_episode = episode is None\n lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()\n- if episode_id.isdigit():\n+ if len(episode_id) == 7:\n episode_id = episode_id[:4] + '-' + episode_id[4:]\n \n is_video = m_type == 'video'\n@@ -84,7 +85,8 @@\n \n \n class NhkVodIE(NhkBaseIE):\n- _VALID_URL = r'%s%s(?P<id>\\d{7}|[^/]+?-\\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)\n+ # the 7-character IDs can have alphabetic chars too: assume [a-z] rather than just [a-f], eg\n+ _VALID_URL = r'%s%s(?P<id>[0-9a-z]{7}|[^/]+?-\\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)\n # Content available only for a limited period of time. Visit\n # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.\n _TESTS = [{\n@@ -124,6 +126,19 @@\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',\n 'only_matching': True,\n+ }, {\n+ # video, alphabetic character in ID #29670\n+ 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/',\n+ 'only_matching': True,\n+ 'info_dict': {\n+ 'id': 'qfjay6cg',\n+ 'ext': 'mp4',\n+ 'title': 'DESIGN TALKS plus - Fishermen\u2019s Finery',\n+ 'description': 'md5:8a8f958aaafb0d7cb59d38de53f1e448',\n+ 'thumbnail': r're:^https?:/(/[a-z0-9.-]+)+\\.jpg\\?w=1920&h=1080$',\n+ 'upload_date': '20210615',\n+ 'timestamp': 1623722008,\n+ }\n }]\n \n def _real_extract(self, url):\n", "issue": "One seemingly random \"Unsupported URL\", while others works just fine\n\r\n## Checklist\r\n\r\n- [x] I'm reporting a broken site support\r\n- [x] I've verified that I'm running youtube-dl version **2021.06.06**\r\n- [x] I've checked that all provided URLs are alive and playable in a browser\r\n- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped\r\n- [x] I've searched the bugtracker for similar issues including closed ones\r\n\r\n\r\n## Verbose log\r\n\r\n\r\n```\r\nyoutube-dl -v -F https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: ['-v', '-F', 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/']\r\n[debug] Encodings: locale cp65001, fs mbcs, out cp65001, pref cp65001\r\n[debug] youtube-dl version 2021.06.06\r\n[debug] Python version 3.4.4 (CPython) - Windows-10-10.0.19041\r\n[debug] exe versions: ffmpeg 4.2.3, ffprobe git-2019-12-17-bd83191\r\n[debug] Proxy map: {}\r\n[generic] 9999a34: Requesting header\r\nWARNING: Falling back on generic information extractor.\r\n[generic] 9999a34: Downloading webpage\r\n[generic] 9999a34: Extracting information\r\nERROR: Unsupported URL: https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\ytdl-org\\tmpkqxnwl31\\build\\youtube_dl\\YoutubeDL.py\", line 815, in wrapper\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\ytdl-org\\tmpkqxnwl31\\build\\youtube_dl\\YoutubeDL.py\", line 836, in __extract_info\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\ytdl-org\\tmpkqxnwl31\\build\\youtube_dl\\extractor\\common.py\", line 534, in extract\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\ytdl-org\\tmpkqxnwl31\\build\\youtube_dl\\extractor\\generic.py\", line 3520, in _real_extract\r\nyoutube_dl.utils.UnsupportedError: Unsupported URL: https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/\r\n\r\n```\r\n\r\n\r\n## Description\r\n\r\nI'm getting \" Unsupported URL\" for this single link. Every other url I've tried from the nhk site (https://www3.nhk.or.jp/nhkworld/), formatted exactly like this one, downloads just fine.\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import urljoin\n\n\nclass NhkBaseIE(InfoExtractor):\n _API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sod%slist/v7a/%s/%s/%s/all%s.json'\n _BASE_URL_REGEX = r'https?://www3\\.nhk\\.or\\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand'\n _TYPE_REGEX = r'/(?P<type>video|audio)/'\n\n def _call_api(self, m_id, lang, is_video, is_episode, is_clip):\n return self._download_json(\n self._API_URL_TEMPLATE % (\n 'v' if is_video else 'r',\n 'clip' if is_clip else 'esd',\n 'episode' if is_episode else 'program',\n m_id, lang, '/all' if is_video else ''),\n m_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'] or []\n\n def _extract_episode_info(self, url, episode=None):\n fetch_episode = episode is None\n lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()\n if episode_id.isdigit():\n episode_id = episode_id[:4] + '-' + episode_id[4:]\n\n is_video = m_type == 'video'\n if fetch_episode:\n episode = self._call_api(\n episode_id, lang, is_video, True, episode_id[:4] == '9999')[0]\n title = episode.get('sub_title_clean') or episode['sub_title']\n\n def get_clean_field(key):\n return episode.get(key + '_clean') or episode.get(key)\n\n series = get_clean_field('title')\n\n thumbnails = []\n for s, w, h in [('', 640, 360), ('_l', 1280, 720)]:\n img_path = episode.get('image' + s)\n if not img_path:\n continue\n thumbnails.append({\n 'id': '%dp' % h,\n 'height': h,\n 'width': w,\n 'url': 'https://www3.nhk.or.jp' + img_path,\n })\n\n info = {\n 'id': episode_id + '-' + lang,\n 'title': '%s - %s' % (series, title) if series and title else title,\n 'description': get_clean_field('description'),\n 'thumbnails': thumbnails,\n 'series': series,\n 'episode': title,\n }\n if is_video:\n vod_id = episode['vod_id']\n info.update({\n '_type': 'url_transparent',\n 'ie_key': 'Piksel',\n 'url': 'https://player.piksel.com/v/refid/nhkworld/prefid/' + vod_id,\n 'id': vod_id,\n })\n else:\n if fetch_episode:\n audio_path = episode['audio']['audio']\n info['formats'] = self._extract_m3u8_formats(\n 'https://nhkworld-vh.akamaihd.net/i%s/master.m3u8' % audio_path,\n episode_id, 'm4a', entry_protocol='m3u8_native',\n m3u8_id='hls', fatal=False)\n for f in info['formats']:\n f['language'] = lang\n else:\n info.update({\n '_type': 'url_transparent',\n 'ie_key': NhkVodIE.ie_key(),\n 'url': url,\n })\n return info\n\n\nclass NhkVodIE(NhkBaseIE):\n _VALID_URL = r'%s%s(?P<id>\\d{7}|[^/]+?-\\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)\n # Content available only for a limited period of time. Visit\n # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.\n _TESTS = [{\n # video clip\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/',\n 'md5': '7a90abcfe610ec22a6bfe15bd46b30ca',\n 'info_dict': {\n 'id': 'a95j5iza',\n 'ext': 'mp4',\n 'title': \"Dining with the Chef - Chef Saito's Family recipe: MENCHI-KATSU\",\n 'description': 'md5:5aee4a9f9d81c26281862382103b0ea5',\n 'timestamp': 1565965194,\n 'upload_date': '20190816',\n },\n }, {\n # audio clip\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/r_inventions-20201104-1/',\n 'info_dict': {\n 'id': 'r_inventions-20201104-1-en',\n 'ext': 'm4a',\n 'title': \"Japan's Top Inventions - Miniature Video Cameras\",\n 'description': 'md5:07ea722bdbbb4936fdd360b6a480c25b',\n },\n 'params': {\n # m3u8 download\n 'skip_download': True,\n },\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/',\n 'only_matching': True,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/plugin-20190404-1/',\n 'only_matching': True,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/fr/ondemand/audio/plugin-20190404-1/',\n 'only_matching': True,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n return self._extract_episode_info(url)\n\n\nclass NhkVodProgramIE(NhkBaseIE):\n _VALID_URL = r'%s/program%s(?P<id>[0-9a-z]+)(?:.+?\\btype=(?P<episode_type>clip|(?:radio|tv)Episode))?' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)\n _TESTS = [{\n # video program episodes\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway',\n 'info_dict': {\n 'id': 'japanrailway',\n 'title': 'Japan Railway Journal',\n },\n 'playlist_mincount': 1,\n }, {\n # video program clips\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway/?type=clip',\n 'info_dict': {\n 'id': 'japanrailway',\n 'title': 'Japan Railway Journal',\n },\n 'playlist_mincount': 5,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/10yearshayaomiyazaki/',\n 'only_matching': True,\n }, {\n # audio program\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/audio/listener/',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n lang, m_type, program_id, episode_type = re.match(self._VALID_URL, url).groups()\n\n episodes = self._call_api(\n program_id, lang, m_type == 'video', False, episode_type == 'clip')\n\n entries = []\n for episode in episodes:\n episode_path = episode.get('url')\n if not episode_path:\n continue\n entries.append(self._extract_episode_info(\n urljoin(url, episode_path), episode))\n\n program_title = None\n if entries:\n program_title = entries[0].get('series')\n\n return self.playlist_result(entries, program_id, program_title)\n", "path": "youtube_dl/extractor/nhk.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import urljoin\n\n\nclass NhkBaseIE(InfoExtractor):\n _API_URL_TEMPLATE = 'https://api.nhk.or.jp/nhkworld/%sod%slist/v7a/%s/%s/%s/all%s.json'\n _BASE_URL_REGEX = r'https?://www3\\.nhk\\.or\\.jp/nhkworld/(?P<lang>[a-z]{2})/ondemand'\n _TYPE_REGEX = r'/(?P<type>video|audio)/'\n\n def _call_api(self, m_id, lang, is_video, is_episode, is_clip):\n return self._download_json(\n self._API_URL_TEMPLATE % (\n 'v' if is_video else 'r',\n 'clip' if is_clip else 'esd',\n 'episode' if is_episode else 'program',\n m_id, lang, '/all' if is_video else ''),\n m_id, query={'apikey': 'EJfK8jdS57GqlupFgAfAAwr573q01y6k'})['data']['episodes'] or []\n\n def _extract_episode_info(self, url, episode=None):\n fetch_episode = episode is None\n lang, m_type, episode_id = re.match(NhkVodIE._VALID_URL, url).groups()\n if len(episode_id) == 7:\n episode_id = episode_id[:4] + '-' + episode_id[4:]\n\n is_video = m_type == 'video'\n if fetch_episode:\n episode = self._call_api(\n episode_id, lang, is_video, True, episode_id[:4] == '9999')[0]\n title = episode.get('sub_title_clean') or episode['sub_title']\n\n def get_clean_field(key):\n return episode.get(key + '_clean') or episode.get(key)\n\n series = get_clean_field('title')\n\n thumbnails = []\n for s, w, h in [('', 640, 360), ('_l', 1280, 720)]:\n img_path = episode.get('image' + s)\n if not img_path:\n continue\n thumbnails.append({\n 'id': '%dp' % h,\n 'height': h,\n 'width': w,\n 'url': 'https://www3.nhk.or.jp' + img_path,\n })\n\n info = {\n 'id': episode_id + '-' + lang,\n 'title': '%s - %s' % (series, title) if series and title else title,\n 'description': get_clean_field('description'),\n 'thumbnails': thumbnails,\n 'series': series,\n 'episode': title,\n }\n if is_video:\n vod_id = episode['vod_id']\n info.update({\n '_type': 'url_transparent',\n 'ie_key': 'Piksel',\n 'url': 'https://player.piksel.com/v/refid/nhkworld/prefid/' + vod_id,\n 'id': vod_id,\n })\n else:\n if fetch_episode:\n audio_path = episode['audio']['audio']\n info['formats'] = self._extract_m3u8_formats(\n 'https://nhkworld-vh.akamaihd.net/i%s/master.m3u8' % audio_path,\n episode_id, 'm4a', entry_protocol='m3u8_native',\n m3u8_id='hls', fatal=False)\n for f in info['formats']:\n f['language'] = lang\n else:\n info.update({\n '_type': 'url_transparent',\n 'ie_key': NhkVodIE.ie_key(),\n 'url': url,\n })\n return info\n\n\nclass NhkVodIE(NhkBaseIE):\n # the 7-character IDs can have alphabetic chars too: assume [a-z] rather than just [a-f], eg\n _VALID_URL = r'%s%s(?P<id>[0-9a-z]{7}|[^/]+?-\\d{8}-[0-9a-z]+)' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)\n # Content available only for a limited period of time. Visit\n # https://www3.nhk.or.jp/nhkworld/en/ondemand/ for working samples.\n _TESTS = [{\n # video clip\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/',\n 'md5': '7a90abcfe610ec22a6bfe15bd46b30ca',\n 'info_dict': {\n 'id': 'a95j5iza',\n 'ext': 'mp4',\n 'title': \"Dining with the Chef - Chef Saito's Family recipe: MENCHI-KATSU\",\n 'description': 'md5:5aee4a9f9d81c26281862382103b0ea5',\n 'timestamp': 1565965194,\n 'upload_date': '20190816',\n },\n }, {\n # audio clip\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/r_inventions-20201104-1/',\n 'info_dict': {\n 'id': 'r_inventions-20201104-1-en',\n 'ext': 'm4a',\n 'title': \"Japan's Top Inventions - Miniature Video Cameras\",\n 'description': 'md5:07ea722bdbbb4936fdd360b6a480c25b',\n },\n 'params': {\n # m3u8 download\n 'skip_download': True,\n },\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/2015173/',\n 'only_matching': True,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/plugin-20190404-1/',\n 'only_matching': True,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/fr/ondemand/audio/plugin-20190404-1/',\n 'only_matching': True,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/audio/j_art-20150903-1/',\n 'only_matching': True,\n }, {\n # video, alphabetic character in ID #29670\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999a34/',\n 'only_matching': True,\n 'info_dict': {\n 'id': 'qfjay6cg',\n 'ext': 'mp4',\n 'title': 'DESIGN TALKS plus - Fishermen\u2019s Finery',\n 'description': 'md5:8a8f958aaafb0d7cb59d38de53f1e448',\n 'thumbnail': r're:^https?:/(/[a-z0-9.-]+)+\\.jpg\\?w=1920&h=1080$',\n 'upload_date': '20210615',\n 'timestamp': 1623722008,\n }\n }]\n\n def _real_extract(self, url):\n return self._extract_episode_info(url)\n\n\nclass NhkVodProgramIE(NhkBaseIE):\n _VALID_URL = r'%s/program%s(?P<id>[0-9a-z]+)(?:.+?\\btype=(?P<episode_type>clip|(?:radio|tv)Episode))?' % (NhkBaseIE._BASE_URL_REGEX, NhkBaseIE._TYPE_REGEX)\n _TESTS = [{\n # video program episodes\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway',\n 'info_dict': {\n 'id': 'japanrailway',\n 'title': 'Japan Railway Journal',\n },\n 'playlist_mincount': 1,\n }, {\n # video program clips\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/japanrailway/?type=clip',\n 'info_dict': {\n 'id': 'japanrailway',\n 'title': 'Japan Railway Journal',\n },\n 'playlist_mincount': 5,\n }, {\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/video/10yearshayaomiyazaki/',\n 'only_matching': True,\n }, {\n # audio program\n 'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/program/audio/listener/',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n lang, m_type, program_id, episode_type = re.match(self._VALID_URL, url).groups()\n\n episodes = self._call_api(\n program_id, lang, m_type == 'video', False, episode_type == 'clip')\n\n entries = []\n for episode in episodes:\n episode_path = episode.get('url')\n if not episode_path:\n continue\n entries.append(self._extract_episode_info(\n urljoin(url, episode_path), episode))\n\n program_title = None\n if entries:\n program_title = entries[0].get('series')\n\n return self.playlist_result(entries, program_id, program_title)\n", "path": "youtube_dl/extractor/nhk.py"}]} | 3,363 | 680 |
gh_patches_debug_17155 | rasdani/github-patches | git_diff | cloudtools__troposphere-1157 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RDS VPC example instance classes are out of date
https://github.com/cloudtools/troposphere/blob/565247a50bd27a8f45c3e408b466b193473cb9a6/examples/RDS_VPC.py#L67-L76
The current list of instance classes is documented here: https://aws.amazon.com/rds/instance-types/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/RDS_VPC.py`
Content:
```
1 # Converted from RDS_VPC.template located at:
2 # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
3
4 from troposphere import GetAtt, Join, Output, Parameter, Ref, Template
5 from troposphere.ec2 import SecurityGroup
6 from troposphere.rds import DBInstance, DBSubnetGroup
7
8
9 t = Template()
10
11 t.add_description(
12 "AWS CloudFormation Sample Template VPC_RDS_DB_Instance: Sample template "
13 "showing how to create an RDS DBInstance in an existing Virtual Private "
14 "Cloud (VPC). **WARNING** This template creates an Amazon Relational "
15 "Database Service database instance. You will be billed for the AWS "
16 "resources used if you create a stack from this template.")
17
18 vpcid = t.add_parameter(Parameter(
19 "VpcId",
20 Type="String",
21 Description="VpcId of your existing Virtual Private Cloud (VPC)"
22 ))
23
24 subnet = t.add_parameter(Parameter(
25 "Subnets",
26 Type="CommaDelimitedList",
27 Description=(
28 "The list of SubnetIds, for at least two Availability Zones in the "
29 "region in your Virtual Private Cloud (VPC)")
30 ))
31
32 dbname = t.add_parameter(Parameter(
33 "DBName",
34 Default="MyDatabase",
35 Description="The database name",
36 Type="String",
37 MinLength="1",
38 MaxLength="64",
39 AllowedPattern="[a-zA-Z][a-zA-Z0-9]*",
40 ConstraintDescription=("must begin with a letter and contain only"
41 " alphanumeric characters.")
42 ))
43
44 dbuser = t.add_parameter(Parameter(
45 "DBUser",
46 NoEcho=True,
47 Description="The database admin account username",
48 Type="String",
49 MinLength="1",
50 MaxLength="16",
51 AllowedPattern="[a-zA-Z][a-zA-Z0-9]*",
52 ConstraintDescription=("must begin with a letter and contain only"
53 " alphanumeric characters.")
54 ))
55
56 dbpassword = t.add_parameter(Parameter(
57 "DBPassword",
58 NoEcho=True,
59 Description="The database admin account password",
60 Type="String",
61 MinLength="1",
62 MaxLength="41",
63 AllowedPattern="[a-zA-Z0-9]*",
64 ConstraintDescription="must contain only alphanumeric characters."
65 ))
66
67 dbclass = t.add_parameter(Parameter(
68 "DBClass",
69 Default="db.m1.small",
70 Description="Database instance class",
71 Type="String",
72 AllowedValues=[
73 "db.m1.small", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge",
74 "db.m2.2xlarge", "db.m2.4xlarge"],
75 ConstraintDescription="must select a valid database instance type.",
76 ))
77
78 dballocatedstorage = t.add_parameter(Parameter(
79 "DBAllocatedStorage",
80 Default="5",
81 Description="The size of the database (Gb)",
82 Type="Number",
83 MinValue="5",
84 MaxValue="1024",
85 ConstraintDescription="must be between 5 and 1024Gb.",
86 ))
87
88
89 mydbsubnetgroup = t.add_resource(DBSubnetGroup(
90 "MyDBSubnetGroup",
91 DBSubnetGroupDescription="Subnets available for the RDS DB Instance",
92 SubnetIds=Ref(subnet),
93 ))
94
95 myvpcsecuritygroup = t.add_resource(SecurityGroup(
96 "myVPCSecurityGroup",
97 GroupDescription="Security group for RDS DB Instance.",
98 VpcId=Ref(vpcid)
99 ))
100
101 mydb = t.add_resource(DBInstance(
102 "MyDB",
103 DBName=Ref(dbname),
104 AllocatedStorage=Ref(dballocatedstorage),
105 DBInstanceClass=Ref(dbclass),
106 Engine="MySQL",
107 EngineVersion="5.5",
108 MasterUsername=Ref(dbuser),
109 MasterUserPassword=Ref(dbpassword),
110 DBSubnetGroupName=Ref(mydbsubnetgroup),
111 VPCSecurityGroups=[Ref(myvpcsecuritygroup)],
112 ))
113
114 t.add_output(Output(
115 "JDBCConnectionString",
116 Description="JDBC connection string for database",
117 Value=Join("", [
118 "jdbc:mysql://",
119 GetAtt("MyDB", "Endpoint.Address"),
120 GetAtt("MyDB", "Endpoint.Port"),
121 "/",
122 Ref(dbname)
123 ])
124 ))
125
126 print(t.to_json())
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/RDS_VPC.py b/examples/RDS_VPC.py
--- a/examples/RDS_VPC.py
+++ b/examples/RDS_VPC.py
@@ -70,8 +70,17 @@
Description="Database instance class",
Type="String",
AllowedValues=[
- "db.m1.small", "db.m1.large", "db.m1.xlarge", "db.m2.xlarge",
- "db.m2.2xlarge", "db.m2.4xlarge"],
+ "db.m5.large", "db.m5.xlarge", "db.m5.2xlarge", "db.m5.4xlarge",
+ "db.m5.12xlarge", "db.m5.24xlarge", "db.m4.large", "db.m4.xlarge",
+ "db.m4.2xlarge", "db.m4.4xlarge", "db.m4.10xlarge", "db.m4.16xlarge",
+ "db.r4.large", "db.r4.xlarge", "db.r4.2xlarge", "db.r4.4xlarge",
+ "db.r4.8xlarge", "db.r4.16xlarge", "db.x1e.xlarge", "db.x1e.2xlarge",
+ "db.x1e.4xlarge", "db.x1e.8xlarge", "db.x1e.16xlarge", "db.x1e.32xlarge",
+ "db.x1.16xlarge", "db.x1.32xlarge", "db.r3.large", "db.r3.xlarge",
+ "db.r3.2xlarge", "db.r3.4xlarge", "db.r3.8xlarge", "db.t2.micro",
+ "db.t2.small", "db.t2.medium", "db.t2.large", "db.t2.xlarge",
+ "db.t2.2xlarge"
+ ],
ConstraintDescription="must select a valid database instance type.",
))
| {"golden_diff": "diff --git a/examples/RDS_VPC.py b/examples/RDS_VPC.py\n--- a/examples/RDS_VPC.py\n+++ b/examples/RDS_VPC.py\n@@ -70,8 +70,17 @@\n Description=\"Database instance class\",\n Type=\"String\",\n AllowedValues=[\n- \"db.m1.small\", \"db.m1.large\", \"db.m1.xlarge\", \"db.m2.xlarge\",\n- \"db.m2.2xlarge\", \"db.m2.4xlarge\"],\n+ \"db.m5.large\", \"db.m5.xlarge\", \"db.m5.2xlarge\", \"db.m5.4xlarge\",\n+ \"db.m5.12xlarge\", \"db.m5.24xlarge\", \"db.m4.large\", \"db.m4.xlarge\",\n+ \"db.m4.2xlarge\", \"db.m4.4xlarge\", \"db.m4.10xlarge\", \"db.m4.16xlarge\",\n+ \"db.r4.large\", \"db.r4.xlarge\", \"db.r4.2xlarge\", \"db.r4.4xlarge\",\n+ \"db.r4.8xlarge\", \"db.r4.16xlarge\", \"db.x1e.xlarge\", \"db.x1e.2xlarge\",\n+ \"db.x1e.4xlarge\", \"db.x1e.8xlarge\", \"db.x1e.16xlarge\", \"db.x1e.32xlarge\",\n+ \"db.x1.16xlarge\", \"db.x1.32xlarge\", \"db.r3.large\", \"db.r3.xlarge\",\n+ \"db.r3.2xlarge\", \"db.r3.4xlarge\", \"db.r3.8xlarge\", \"db.t2.micro\",\n+ \"db.t2.small\", \"db.t2.medium\", \"db.t2.large\", \"db.t2.xlarge\",\n+ \"db.t2.2xlarge\"\n+ ],\n ConstraintDescription=\"must select a valid database instance type.\",\n ))\n", "issue": "RDS VPC example instance classes are out of date\nhttps://github.com/cloudtools/troposphere/blob/565247a50bd27a8f45c3e408b466b193473cb9a6/examples/RDS_VPC.py#L67-L76\r\n\r\nThe current list of instance classes is documented here: https://aws.amazon.com/rds/instance-types/\n", "before_files": [{"content": "# Converted from RDS_VPC.template located at:\n# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/\n\nfrom troposphere import GetAtt, Join, Output, Parameter, Ref, Template\nfrom troposphere.ec2 import SecurityGroup\nfrom troposphere.rds import DBInstance, DBSubnetGroup\n\n\nt = Template()\n\nt.add_description(\n \"AWS CloudFormation Sample Template VPC_RDS_DB_Instance: Sample template \"\n \"showing how to create an RDS DBInstance in an existing Virtual Private \"\n \"Cloud (VPC). **WARNING** This template creates an Amazon Relational \"\n \"Database Service database instance. You will be billed for the AWS \"\n \"resources used if you create a stack from this template.\")\n\nvpcid = t.add_parameter(Parameter(\n \"VpcId\",\n Type=\"String\",\n Description=\"VpcId of your existing Virtual Private Cloud (VPC)\"\n))\n\nsubnet = t.add_parameter(Parameter(\n \"Subnets\",\n Type=\"CommaDelimitedList\",\n Description=(\n \"The list of SubnetIds, for at least two Availability Zones in the \"\n \"region in your Virtual Private Cloud (VPC)\")\n))\n\ndbname = t.add_parameter(Parameter(\n \"DBName\",\n Default=\"MyDatabase\",\n Description=\"The database name\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"64\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=(\"must begin with a letter and contain only\"\n \" alphanumeric characters.\")\n))\n\ndbuser = t.add_parameter(Parameter(\n \"DBUser\",\n NoEcho=True,\n Description=\"The database admin account username\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"16\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=(\"must begin with a letter and contain only\"\n \" alphanumeric characters.\")\n))\n\ndbpassword = t.add_parameter(Parameter(\n \"DBPassword\",\n NoEcho=True,\n Description=\"The database admin account password\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"41\",\n AllowedPattern=\"[a-zA-Z0-9]*\",\n ConstraintDescription=\"must contain only alphanumeric characters.\"\n))\n\ndbclass = t.add_parameter(Parameter(\n \"DBClass\",\n Default=\"db.m1.small\",\n Description=\"Database instance class\",\n Type=\"String\",\n AllowedValues=[\n \"db.m1.small\", \"db.m1.large\", \"db.m1.xlarge\", \"db.m2.xlarge\",\n \"db.m2.2xlarge\", \"db.m2.4xlarge\"],\n ConstraintDescription=\"must select a valid database instance type.\",\n))\n\ndballocatedstorage = t.add_parameter(Parameter(\n \"DBAllocatedStorage\",\n Default=\"5\",\n Description=\"The size of the database (Gb)\",\n Type=\"Number\",\n MinValue=\"5\",\n MaxValue=\"1024\",\n ConstraintDescription=\"must be between 5 and 1024Gb.\",\n))\n\n\nmydbsubnetgroup = t.add_resource(DBSubnetGroup(\n \"MyDBSubnetGroup\",\n DBSubnetGroupDescription=\"Subnets available for the RDS DB Instance\",\n SubnetIds=Ref(subnet),\n))\n\nmyvpcsecuritygroup = t.add_resource(SecurityGroup(\n \"myVPCSecurityGroup\",\n GroupDescription=\"Security group for RDS DB Instance.\",\n VpcId=Ref(vpcid)\n))\n\nmydb = t.add_resource(DBInstance(\n \"MyDB\",\n DBName=Ref(dbname),\n AllocatedStorage=Ref(dballocatedstorage),\n DBInstanceClass=Ref(dbclass),\n Engine=\"MySQL\",\n EngineVersion=\"5.5\",\n MasterUsername=Ref(dbuser),\n MasterUserPassword=Ref(dbpassword),\n DBSubnetGroupName=Ref(mydbsubnetgroup),\n VPCSecurityGroups=[Ref(myvpcsecuritygroup)],\n))\n\nt.add_output(Output(\n \"JDBCConnectionString\",\n Description=\"JDBC connection string for database\",\n Value=Join(\"\", [\n \"jdbc:mysql://\",\n GetAtt(\"MyDB\", \"Endpoint.Address\"),\n GetAtt(\"MyDB\", \"Endpoint.Port\"),\n \"/\",\n Ref(dbname)\n ])\n))\n\nprint(t.to_json())\n", "path": "examples/RDS_VPC.py"}], "after_files": [{"content": "# Converted from RDS_VPC.template located at:\n# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/\n\nfrom troposphere import GetAtt, Join, Output, Parameter, Ref, Template\nfrom troposphere.ec2 import SecurityGroup\nfrom troposphere.rds import DBInstance, DBSubnetGroup\n\n\nt = Template()\n\nt.add_description(\n \"AWS CloudFormation Sample Template VPC_RDS_DB_Instance: Sample template \"\n \"showing how to create an RDS DBInstance in an existing Virtual Private \"\n \"Cloud (VPC). **WARNING** This template creates an Amazon Relational \"\n \"Database Service database instance. You will be billed for the AWS \"\n \"resources used if you create a stack from this template.\")\n\nvpcid = t.add_parameter(Parameter(\n \"VpcId\",\n Type=\"String\",\n Description=\"VpcId of your existing Virtual Private Cloud (VPC)\"\n))\n\nsubnet = t.add_parameter(Parameter(\n \"Subnets\",\n Type=\"CommaDelimitedList\",\n Description=(\n \"The list of SubnetIds, for at least two Availability Zones in the \"\n \"region in your Virtual Private Cloud (VPC)\")\n))\n\ndbname = t.add_parameter(Parameter(\n \"DBName\",\n Default=\"MyDatabase\",\n Description=\"The database name\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"64\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=(\"must begin with a letter and contain only\"\n \" alphanumeric characters.\")\n))\n\ndbuser = t.add_parameter(Parameter(\n \"DBUser\",\n NoEcho=True,\n Description=\"The database admin account username\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"16\",\n AllowedPattern=\"[a-zA-Z][a-zA-Z0-9]*\",\n ConstraintDescription=(\"must begin with a letter and contain only\"\n \" alphanumeric characters.\")\n))\n\ndbpassword = t.add_parameter(Parameter(\n \"DBPassword\",\n NoEcho=True,\n Description=\"The database admin account password\",\n Type=\"String\",\n MinLength=\"1\",\n MaxLength=\"41\",\n AllowedPattern=\"[a-zA-Z0-9]*\",\n ConstraintDescription=\"must contain only alphanumeric characters.\"\n))\n\ndbclass = t.add_parameter(Parameter(\n \"DBClass\",\n Default=\"db.m1.small\",\n Description=\"Database instance class\",\n Type=\"String\",\n AllowedValues=[\n \"db.m5.large\", \"db.m5.xlarge\", \"db.m5.2xlarge\", \"db.m5.4xlarge\",\n \"db.m5.12xlarge\", \"db.m5.24xlarge\", \"db.m4.large\", \"db.m4.xlarge\",\n \"db.m4.2xlarge\", \"db.m4.4xlarge\", \"db.m4.10xlarge\", \"db.m4.16xlarge\",\n \"db.r4.large\", \"db.r4.xlarge\", \"db.r4.2xlarge\", \"db.r4.4xlarge\",\n \"db.r4.8xlarge\", \"db.r4.16xlarge\", \"db.x1e.xlarge\", \"db.x1e.2xlarge\",\n \"db.x1e.4xlarge\", \"db.x1e.8xlarge\", \"db.x1e.16xlarge\", \"db.x1e.32xlarge\",\n \"db.x1.16xlarge\", \"db.x1.32xlarge\", \"db.r3.large\", \"db.r3.xlarge\",\n \"db.r3.2xlarge\", \"db.r3.4xlarge\", \"db.r3.8xlarge\", \"db.t2.micro\",\n \"db.t2.small\", \"db.t2.medium\", \"db.t2.large\", \"db.t2.xlarge\",\n \"db.t2.2xlarge\"\n ],\n ConstraintDescription=\"must select a valid database instance type.\",\n))\n\ndballocatedstorage = t.add_parameter(Parameter(\n \"DBAllocatedStorage\",\n Default=\"5\",\n Description=\"The size of the database (Gb)\",\n Type=\"Number\",\n MinValue=\"5\",\n MaxValue=\"1024\",\n ConstraintDescription=\"must be between 5 and 1024Gb.\",\n))\n\n\nmydbsubnetgroup = t.add_resource(DBSubnetGroup(\n \"MyDBSubnetGroup\",\n DBSubnetGroupDescription=\"Subnets available for the RDS DB Instance\",\n SubnetIds=Ref(subnet),\n))\n\nmyvpcsecuritygroup = t.add_resource(SecurityGroup(\n \"myVPCSecurityGroup\",\n GroupDescription=\"Security group for RDS DB Instance.\",\n VpcId=Ref(vpcid)\n))\n\nmydb = t.add_resource(DBInstance(\n \"MyDB\",\n DBName=Ref(dbname),\n AllocatedStorage=Ref(dballocatedstorage),\n DBInstanceClass=Ref(dbclass),\n Engine=\"MySQL\",\n EngineVersion=\"5.5\",\n MasterUsername=Ref(dbuser),\n MasterUserPassword=Ref(dbpassword),\n DBSubnetGroupName=Ref(mydbsubnetgroup),\n VPCSecurityGroups=[Ref(myvpcsecuritygroup)],\n))\n\nt.add_output(Output(\n \"JDBCConnectionString\",\n Description=\"JDBC connection string for database\",\n Value=Join(\"\", [\n \"jdbc:mysql://\",\n GetAtt(\"MyDB\", \"Endpoint.Address\"),\n GetAtt(\"MyDB\", \"Endpoint.Port\"),\n \"/\",\n Ref(dbname)\n ])\n))\n\nprint(t.to_json())\n", "path": "examples/RDS_VPC.py"}]} | 1,546 | 461 |
gh_patches_debug_7269 | rasdani/github-patches | git_diff | urllib3__urllib3-2711 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`format` session fail to run locally on Window platform
### Subject
When I ran `nox -rs format` to format the files, the format process terminates due to `FileNotFoundError` reported at line 139 at file `noxfile.py`:

### Environment
Window 10
### Steps to Reproduce
Just ran `nox -rs format` on Window machine.
I suspect the created `process` could not find the files properly, and my workaround is to using `session.run()` instead:
```python
@nox.session()
def format(session: nox.Session) -> None:
"""Run code formatters."""
session.install("pre-commit")
session.run("pre-commit", "--version")
# process = subprocess.run(
# ["pre-commit", "run", "--all-files"],
# env=session.env,
# text=True,
# stdout=subprocess.PIPE,
# stderr=subprocess.STDOUT,
# )
# # Ensure that pre-commit itself ran successfully
# assert process.returncode in (0, 1)
session.run("pre-commit", "run", "--all-files") # my workaround
lint(session)
```
Perhaps this failure is due to my improper environment setup for the repo. If so, please correct me.
Thanks.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `noxfile.py`
Content:
```
1 import os
2 import shutil
3 import subprocess
4
5 import nox
6
7 SOURCE_FILES = [
8 "docs/",
9 "dummyserver/",
10 "src/",
11 "test/",
12 "noxfile.py",
13 "setup.py",
14 ]
15
16
17 def tests_impl(
18 session: nox.Session,
19 extras: str = "socks,secure,brotli,zstd",
20 byte_string_comparisons: bool = True,
21 ) -> None:
22 # Install deps and the package itself.
23 session.install("-r", "dev-requirements.txt")
24 session.install(f".[{extras}]")
25
26 # Show the pip version.
27 session.run("pip", "--version")
28 # Print the Python version and bytesize.
29 session.run("python", "--version")
30 session.run("python", "-c", "import struct; print(struct.calcsize('P') * 8)")
31 # Print OpenSSL information.
32 session.run("python", "-m", "OpenSSL.debug")
33
34 # Inspired from https://hynek.me/articles/ditch-codecov-python/
35 # We use parallel mode and then combine in a later CI step
36 session.run(
37 "python",
38 *(("-bb",) if byte_string_comparisons else ()),
39 "-m",
40 "coverage",
41 "run",
42 "--parallel-mode",
43 "-m",
44 "pytest",
45 "-r",
46 "a",
47 f"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}",
48 "--tb=native",
49 "--no-success-flaky-report",
50 *(session.posargs or ("test/",)),
51 env={"PYTHONWARNINGS": "always::DeprecationWarning"},
52 )
53
54
55 @nox.session(python=["3.7", "3.8", "3.9", "3.10", "3.11", "pypy"])
56 def test(session: nox.Session) -> None:
57 tests_impl(session)
58
59
60 @nox.session(python=["2.7"])
61 def unsupported_setup_py(session: nox.Session) -> None:
62 # Can't check both returncode and output with session.run
63 process = subprocess.run(
64 ["python", "setup.py", "install"],
65 env={**session.env},
66 text=True,
67 capture_output=True,
68 )
69 assert process.returncode == 1
70 print(process.stderr)
71 assert "Please use `python -m pip install .` instead." in process.stderr
72
73
74 @nox.session(python=["3"])
75 def test_brotlipy(session: nox.Session) -> None:
76 """Check that if 'brotlipy' is installed instead of 'brotli' or
77 'brotlicffi' that we still don't blow up.
78 """
79 session.install("brotlipy")
80 tests_impl(session, extras="socks,secure", byte_string_comparisons=False)
81
82
83 def git_clone(session: nox.Session, git_url: str) -> None:
84 session.run("git", "clone", "--depth", "1", git_url, external=True)
85
86
87 @nox.session()
88 def downstream_botocore(session: nox.Session) -> None:
89 root = os.getcwd()
90 tmp_dir = session.create_tmp()
91
92 session.cd(tmp_dir)
93 git_clone(session, "https://github.com/boto/botocore")
94 session.chdir("botocore")
95 session.run(
96 "git",
97 "apply",
98 f"{root}/ci/0001-Mark-100-Continue-tests-as-failing.patch",
99 external=True,
100 )
101 session.run("git", "rev-parse", "HEAD", external=True)
102 session.run("python", "scripts/ci/install")
103
104 session.cd(root)
105 session.install(".", silent=False)
106 session.cd(f"{tmp_dir}/botocore")
107
108 session.run("python", "-c", "import urllib3; print(urllib3.__version__)")
109 session.run("python", "scripts/ci/run-tests")
110
111
112 @nox.session()
113 def downstream_requests(session: nox.Session) -> None:
114 root = os.getcwd()
115 tmp_dir = session.create_tmp()
116
117 session.cd(tmp_dir)
118 git_clone(session, "https://github.com/psf/requests")
119 session.chdir("requests")
120 session.run("git", "apply", f"{root}/ci/requests.patch", external=True)
121 session.run("git", "rev-parse", "HEAD", external=True)
122 session.install(".[socks]", silent=False)
123 session.install("-r", "requirements-dev.txt", silent=False)
124
125 session.cd(root)
126 session.install(".", silent=False)
127 session.cd(f"{tmp_dir}/requests")
128
129 session.run("python", "-c", "import urllib3; print(urllib3.__version__)")
130 session.run("pytest", "tests")
131
132
133 @nox.session()
134 def format(session: nox.Session) -> None:
135 """Run code formatters."""
136 session.install("pre-commit")
137 session.run("pre-commit", "--version")
138
139 process = subprocess.run(
140 ["pre-commit", "run", "--all-files"],
141 env=session.env,
142 text=True,
143 stdout=subprocess.PIPE,
144 stderr=subprocess.STDOUT,
145 )
146 # Ensure that pre-commit itself ran successfully
147 assert process.returncode in (0, 1)
148
149 lint(session)
150
151
152 @nox.session
153 def lint(session: nox.Session) -> None:
154 session.install("pre-commit")
155 session.run("pre-commit", "run", "--all-files")
156
157 mypy(session)
158
159
160 @nox.session(python="3.8")
161 def mypy(session: nox.Session) -> None:
162 """Run mypy."""
163 session.install("-r", "mypy-requirements.txt")
164 session.run("mypy", "--version")
165 session.run(
166 "mypy",
167 "dummyserver",
168 "noxfile.py",
169 "src/urllib3",
170 "test",
171 )
172
173
174 @nox.session
175 def docs(session: nox.Session) -> None:
176 session.install("-r", "docs/requirements.txt")
177 session.install(".[socks,secure,brotli,zstd]")
178
179 session.chdir("docs")
180 if os.path.exists("_build"):
181 shutil.rmtree("_build")
182 session.run("sphinx-build", "-b", "html", "-W", ".", "_build/html")
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -133,19 +133,6 @@
@nox.session()
def format(session: nox.Session) -> None:
"""Run code formatters."""
- session.install("pre-commit")
- session.run("pre-commit", "--version")
-
- process = subprocess.run(
- ["pre-commit", "run", "--all-files"],
- env=session.env,
- text=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- )
- # Ensure that pre-commit itself ran successfully
- assert process.returncode in (0, 1)
-
lint(session)
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -133,19 +133,6 @@\n @nox.session()\n def format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n- session.install(\"pre-commit\")\n- session.run(\"pre-commit\", \"--version\")\n-\n- process = subprocess.run(\n- [\"pre-commit\", \"run\", \"--all-files\"],\n- env=session.env,\n- text=True,\n- stdout=subprocess.PIPE,\n- stderr=subprocess.STDOUT,\n- )\n- # Ensure that pre-commit itself ran successfully\n- assert process.returncode in (0, 1)\n-\n lint(session)\n", "issue": "`format` session fail to run locally on Window platform\n### Subject\r\nWhen I ran `nox -rs format` to format the files, the format process terminates due to `FileNotFoundError` reported at line 139 at file `noxfile.py`:\r\n\r\n\r\n\r\n\r\n### Environment\r\nWindow 10\r\n\r\n### Steps to Reproduce\r\nJust ran `nox -rs format` on Window machine.\r\n\r\n\r\nI suspect the created `process` could not find the files properly, and my workaround is to using `session.run()` instead:\r\n```python\r\[email protected]()\r\ndef format(session: nox.Session) -> None:\r\n \"\"\"Run code formatters.\"\"\"\r\n session.install(\"pre-commit\")\r\n session.run(\"pre-commit\", \"--version\")\r\n\r\n # process = subprocess.run(\r\n # [\"pre-commit\", \"run\", \"--all-files\"],\r\n # env=session.env,\r\n # text=True,\r\n # stdout=subprocess.PIPE,\r\n # stderr=subprocess.STDOUT,\r\n # )\r\n # # Ensure that pre-commit itself ran successfully\r\n # assert process.returncode in (0, 1)\r\n\r\n session.run(\"pre-commit\", \"run\", \"--all-files\") # my workaround\r\n\r\n lint(session)\r\n```\r\n\r\nPerhaps this failure is due to my improper environment setup for the repo. If so, please correct me.\r\n\r\nThanks.\n", "before_files": [{"content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(\n session: nox.Session,\n extras: str = \"socks,secure,brotli,zstd\",\n byte_string_comparisons: bool = True,\n) -> None:\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n # Inspired from https://hynek.me/articles/ditch-codecov-python/\n # We use parallel mode and then combine in a later CI step\n session.run(\n \"python\",\n *((\"-bb\",) if byte_string_comparisons else ()),\n \"-m\",\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"a\",\n f\"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\", \"pypy\"])\ndef test(session: nox.Session) -> None:\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_setup_py(session: nox.Session) -> None:\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Please use `python -m pip install .` instead.\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session: nox.Session) -> None:\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\", byte_string_comparisons=False)\n\n\ndef git_clone(session: nox.Session, git_url: str) -> None:\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n\n\[email protected]()\ndef downstream_botocore(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n session.run(\n \"git\",\n \"apply\",\n f\"{root}/ci/0001-Mark-100-Continue-tests-as-failing.patch\",\n external=True,\n )\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected]()\ndef downstream_requests(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\"git\", \"apply\", f\"{root}/ci/requests.patch\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"--version\")\n\n process = subprocess.run(\n [\"pre-commit\", \"run\", \"--all-files\"],\n env=session.env,\n text=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n # Ensure that pre-commit itself ran successfully\n assert process.returncode in (0, 1)\n\n lint(session)\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected](python=\"3.8\")\ndef mypy(session: nox.Session) -> None:\n \"\"\"Run mypy.\"\"\"\n session.install(\"-r\", \"mypy-requirements.txt\")\n session.run(\"mypy\", \"--version\")\n session.run(\n \"mypy\",\n \"dummyserver\",\n \"noxfile.py\",\n \"src/urllib3\",\n \"test\",\n )\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli,zstd]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py"}], "after_files": [{"content": "import os\nimport shutil\nimport subprocess\n\nimport nox\n\nSOURCE_FILES = [\n \"docs/\",\n \"dummyserver/\",\n \"src/\",\n \"test/\",\n \"noxfile.py\",\n \"setup.py\",\n]\n\n\ndef tests_impl(\n session: nox.Session,\n extras: str = \"socks,secure,brotli,zstd\",\n byte_string_comparisons: bool = True,\n) -> None:\n # Install deps and the package itself.\n session.install(\"-r\", \"dev-requirements.txt\")\n session.install(f\".[{extras}]\")\n\n # Show the pip version.\n session.run(\"pip\", \"--version\")\n # Print the Python version and bytesize.\n session.run(\"python\", \"--version\")\n session.run(\"python\", \"-c\", \"import struct; print(struct.calcsize('P') * 8)\")\n # Print OpenSSL information.\n session.run(\"python\", \"-m\", \"OpenSSL.debug\")\n\n # Inspired from https://hynek.me/articles/ditch-codecov-python/\n # We use parallel mode and then combine in a later CI step\n session.run(\n \"python\",\n *((\"-bb\",) if byte_string_comparisons else ()),\n \"-m\",\n \"coverage\",\n \"run\",\n \"--parallel-mode\",\n \"-m\",\n \"pytest\",\n \"-r\",\n \"a\",\n f\"--color={'yes' if 'GITHUB_ACTIONS' in os.environ else 'auto'}\",\n \"--tb=native\",\n \"--no-success-flaky-report\",\n *(session.posargs or (\"test/\",)),\n env={\"PYTHONWARNINGS\": \"always::DeprecationWarning\"},\n )\n\n\[email protected](python=[\"3.7\", \"3.8\", \"3.9\", \"3.10\", \"3.11\", \"pypy\"])\ndef test(session: nox.Session) -> None:\n tests_impl(session)\n\n\[email protected](python=[\"2.7\"])\ndef unsupported_setup_py(session: nox.Session) -> None:\n # Can't check both returncode and output with session.run\n process = subprocess.run(\n [\"python\", \"setup.py\", \"install\"],\n env={**session.env},\n text=True,\n capture_output=True,\n )\n assert process.returncode == 1\n print(process.stderr)\n assert \"Please use `python -m pip install .` instead.\" in process.stderr\n\n\[email protected](python=[\"3\"])\ndef test_brotlipy(session: nox.Session) -> None:\n \"\"\"Check that if 'brotlipy' is installed instead of 'brotli' or\n 'brotlicffi' that we still don't blow up.\n \"\"\"\n session.install(\"brotlipy\")\n tests_impl(session, extras=\"socks,secure\", byte_string_comparisons=False)\n\n\ndef git_clone(session: nox.Session, git_url: str) -> None:\n session.run(\"git\", \"clone\", \"--depth\", \"1\", git_url, external=True)\n\n\[email protected]()\ndef downstream_botocore(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/boto/botocore\")\n session.chdir(\"botocore\")\n session.run(\n \"git\",\n \"apply\",\n f\"{root}/ci/0001-Mark-100-Continue-tests-as-failing.patch\",\n external=True,\n )\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.run(\"python\", \"scripts/ci/install\")\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/botocore\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"python\", \"scripts/ci/run-tests\")\n\n\[email protected]()\ndef downstream_requests(session: nox.Session) -> None:\n root = os.getcwd()\n tmp_dir = session.create_tmp()\n\n session.cd(tmp_dir)\n git_clone(session, \"https://github.com/psf/requests\")\n session.chdir(\"requests\")\n session.run(\"git\", \"apply\", f\"{root}/ci/requests.patch\", external=True)\n session.run(\"git\", \"rev-parse\", \"HEAD\", external=True)\n session.install(\".[socks]\", silent=False)\n session.install(\"-r\", \"requirements-dev.txt\", silent=False)\n\n session.cd(root)\n session.install(\".\", silent=False)\n session.cd(f\"{tmp_dir}/requests\")\n\n session.run(\"python\", \"-c\", \"import urllib3; print(urllib3.__version__)\")\n session.run(\"pytest\", \"tests\")\n\n\[email protected]()\ndef format(session: nox.Session) -> None:\n \"\"\"Run code formatters.\"\"\"\n lint(session)\n\n\[email protected]\ndef lint(session: nox.Session) -> None:\n session.install(\"pre-commit\")\n session.run(\"pre-commit\", \"run\", \"--all-files\")\n\n mypy(session)\n\n\[email protected](python=\"3.8\")\ndef mypy(session: nox.Session) -> None:\n \"\"\"Run mypy.\"\"\"\n session.install(\"-r\", \"mypy-requirements.txt\")\n session.run(\"mypy\", \"--version\")\n session.run(\n \"mypy\",\n \"dummyserver\",\n \"noxfile.py\",\n \"src/urllib3\",\n \"test\",\n )\n\n\[email protected]\ndef docs(session: nox.Session) -> None:\n session.install(\"-r\", \"docs/requirements.txt\")\n session.install(\".[socks,secure,brotli,zstd]\")\n\n session.chdir(\"docs\")\n if os.path.exists(\"_build\"):\n shutil.rmtree(\"_build\")\n session.run(\"sphinx-build\", \"-b\", \"html\", \"-W\", \".\", \"_build/html\")\n", "path": "noxfile.py"}]} | 2,386 | 162 |
gh_patches_debug_30972 | rasdani/github-patches | git_diff | saleor__saleor-3639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Expose in API list of supported payment gateways
In API we expose the `GatewaysEnum` enum which contains all the available payment gateways, but there is no way for the Storefront 2.0 to determine which of them are actually enabled (currently Braintree is hardcoded over there). We should expose a field that would provide this information, e.g:
```
type Shop {
enabledGateways: [GatewaysEnum]
}
```
A resolver function should take into account gateways enabled in `CHECKOUT_PAYMENT_GATEWAYS` setting.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/graphql/checkout/types.py`
Content:
```
1 import graphene
2 import graphene_django_optimizer as gql_optimizer
3
4 from ...checkout import models
5 from ...core.utils.taxes import get_taxes_for_address
6 from ..core.connection import CountableDjangoObjectType
7 from ..core.types.money import TaxedMoney
8 from ..order.utils import applicable_shipping_methods
9 from ..shipping.types import ShippingMethod
10
11
12 class CheckoutLine(CountableDjangoObjectType):
13 total_price = graphene.Field(
14 TaxedMoney,
15 description=(
16 'The sum of the checkout line price, taxes and discounts.'))
17 requires_shipping = graphene.Boolean(
18 description='Indicates whether the item need to be delivered.')
19
20 class Meta:
21 exclude_fields = ['cart', 'data']
22 description = 'Represents an item in the checkout.'
23 interfaces = [graphene.relay.Node]
24 model = models.CartLine
25 filter_fields = ['id']
26
27 def resolve_total_price(self, info):
28 taxes = get_taxes_for_address(self.cart.shipping_address)
29 return self.get_total(taxes=taxes)
30
31 def resolve_requires_shipping(self, info):
32 return self.is_shipping_required()
33
34
35 class Checkout(CountableDjangoObjectType):
36 available_shipping_methods = graphene.List(
37 ShippingMethod, required=False,
38 description='Shipping methods that can be used with this order.')
39 is_shipping_required = graphene.Boolean(
40 description='Returns True, if checkout requires shipping.',
41 required=True)
42 lines = gql_optimizer.field(
43 graphene.List(
44 CheckoutLine, description=(
45 'A list of checkout lines, each containing information about '
46 'an item in the checkout.')),
47 model_field='lines')
48 shipping_price = graphene.Field(
49 TaxedMoney,
50 description='The price of the shipping, with all the taxes included.')
51 subtotal_price = graphene.Field(
52 TaxedMoney,
53 description=(
54 'The price of the checkout before shipping, with taxes included.'))
55 total_price = graphene.Field(
56 TaxedMoney,
57 description=(
58 'The sum of the the checkout line prices, with all the taxes,'
59 'shipping costs, and discounts included.'))
60
61 class Meta:
62 exclude_fields = ['payments']
63 description = 'Checkout object'
64 model = models.Cart
65 interfaces = [graphene.relay.Node]
66 filter_fields = ['token']
67
68 def resolve_total_price(self, info):
69 taxes = get_taxes_for_address(self.shipping_address)
70 return self.get_total(taxes=taxes)
71
72 def resolve_subtotal_price(self, info):
73 taxes = get_taxes_for_address(self.shipping_address)
74 return self.get_subtotal(taxes=taxes)
75
76 def resolve_shipping_price(self, info):
77 taxes = get_taxes_for_address(self.shipping_address)
78 return self.get_shipping_price(taxes=taxes)
79
80 def resolve_lines(self, info):
81 return self.lines.prefetch_related('variant')
82
83 def resolve_available_shipping_methods(self, info):
84 taxes = get_taxes_for_address(self.shipping_address)
85 price = self.get_subtotal(
86 taxes=taxes, discounts=info.context.discounts)
87 return applicable_shipping_methods(self, info, price.gross.amount)
88
89 def resolve_is_shipping_required(self, info):
90 return self.is_shipping_required()
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/graphql/checkout/types.py b/saleor/graphql/checkout/types.py
--- a/saleor/graphql/checkout/types.py
+++ b/saleor/graphql/checkout/types.py
@@ -1,11 +1,13 @@
import graphene
import graphene_django_optimizer as gql_optimizer
+from django.conf import settings
from ...checkout import models
from ...core.utils.taxes import get_taxes_for_address
from ..core.connection import CountableDjangoObjectType
from ..core.types.money import TaxedMoney
from ..order.utils import applicable_shipping_methods
+from ..payment.enums import PaymentGatewayEnum
from ..shipping.types import ShippingMethod
@@ -36,6 +38,9 @@
available_shipping_methods = graphene.List(
ShippingMethod, required=False,
description='Shipping methods that can be used with this order.')
+ available_payment_gateways = graphene.List(
+ PaymentGatewayEnum, description='List of available payment gateways.',
+ required=True)
is_shipping_required = graphene.Boolean(
description='Returns True, if checkout requires shipping.',
required=True)
@@ -86,5 +91,8 @@
taxes=taxes, discounts=info.context.discounts)
return applicable_shipping_methods(self, info, price.gross.amount)
+ def resolve_available_payment_gateways(self, info):
+ return settings.CHECKOUT_PAYMENT_GATEWAYS.keys()
+
def resolve_is_shipping_required(self, info):
return self.is_shipping_required()
| {"golden_diff": "diff --git a/saleor/graphql/checkout/types.py b/saleor/graphql/checkout/types.py\n--- a/saleor/graphql/checkout/types.py\n+++ b/saleor/graphql/checkout/types.py\n@@ -1,11 +1,13 @@\n import graphene\n import graphene_django_optimizer as gql_optimizer\n+from django.conf import settings\n \n from ...checkout import models\n from ...core.utils.taxes import get_taxes_for_address\n from ..core.connection import CountableDjangoObjectType\n from ..core.types.money import TaxedMoney\n from ..order.utils import applicable_shipping_methods\n+from ..payment.enums import PaymentGatewayEnum\n from ..shipping.types import ShippingMethod\n \n \n@@ -36,6 +38,9 @@\n available_shipping_methods = graphene.List(\n ShippingMethod, required=False,\n description='Shipping methods that can be used with this order.')\n+ available_payment_gateways = graphene.List(\n+ PaymentGatewayEnum, description='List of available payment gateways.',\n+ required=True)\n is_shipping_required = graphene.Boolean(\n description='Returns True, if checkout requires shipping.',\n required=True)\n@@ -86,5 +91,8 @@\n taxes=taxes, discounts=info.context.discounts)\n return applicable_shipping_methods(self, info, price.gross.amount)\n \n+ def resolve_available_payment_gateways(self, info):\n+ return settings.CHECKOUT_PAYMENT_GATEWAYS.keys()\n+\n def resolve_is_shipping_required(self, info):\n return self.is_shipping_required()\n", "issue": "Expose in API list of supported payment gateways\nIn API we expose the `GatewaysEnum` enum which contains all the available payment gateways, but there is no way for the Storefront 2.0 to determine which of them are actually enabled (currently Braintree is hardcoded over there). We should expose a field that would provide this information, e.g:\r\n```\r\ntype Shop {\r\n enabledGateways: [GatewaysEnum]\r\n}\r\n```\r\n\r\nA resolver function should take into account gateways enabled in `CHECKOUT_PAYMENT_GATEWAYS` setting.\n", "before_files": [{"content": "import graphene\nimport graphene_django_optimizer as gql_optimizer\n\nfrom ...checkout import models\nfrom ...core.utils.taxes import get_taxes_for_address\nfrom ..core.connection import CountableDjangoObjectType\nfrom ..core.types.money import TaxedMoney\nfrom ..order.utils import applicable_shipping_methods\nfrom ..shipping.types import ShippingMethod\n\n\nclass CheckoutLine(CountableDjangoObjectType):\n total_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The sum of the checkout line price, taxes and discounts.'))\n requires_shipping = graphene.Boolean(\n description='Indicates whether the item need to be delivered.')\n\n class Meta:\n exclude_fields = ['cart', 'data']\n description = 'Represents an item in the checkout.'\n interfaces = [graphene.relay.Node]\n model = models.CartLine\n filter_fields = ['id']\n\n def resolve_total_price(self, info):\n taxes = get_taxes_for_address(self.cart.shipping_address)\n return self.get_total(taxes=taxes)\n\n def resolve_requires_shipping(self, info):\n return self.is_shipping_required()\n\n\nclass Checkout(CountableDjangoObjectType):\n available_shipping_methods = graphene.List(\n ShippingMethod, required=False,\n description='Shipping methods that can be used with this order.')\n is_shipping_required = graphene.Boolean(\n description='Returns True, if checkout requires shipping.',\n required=True)\n lines = gql_optimizer.field(\n graphene.List(\n CheckoutLine, description=(\n 'A list of checkout lines, each containing information about '\n 'an item in the checkout.')),\n model_field='lines')\n shipping_price = graphene.Field(\n TaxedMoney,\n description='The price of the shipping, with all the taxes included.')\n subtotal_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The price of the checkout before shipping, with taxes included.'))\n total_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The sum of the the checkout line prices, with all the taxes,'\n 'shipping costs, and discounts included.'))\n\n class Meta:\n exclude_fields = ['payments']\n description = 'Checkout object'\n model = models.Cart\n interfaces = [graphene.relay.Node]\n filter_fields = ['token']\n\n def resolve_total_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_total(taxes=taxes)\n\n def resolve_subtotal_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_subtotal(taxes=taxes)\n\n def resolve_shipping_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_shipping_price(taxes=taxes)\n\n def resolve_lines(self, info):\n return self.lines.prefetch_related('variant')\n\n def resolve_available_shipping_methods(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n price = self.get_subtotal(\n taxes=taxes, discounts=info.context.discounts)\n return applicable_shipping_methods(self, info, price.gross.amount)\n\n def resolve_is_shipping_required(self, info):\n return self.is_shipping_required()\n", "path": "saleor/graphql/checkout/types.py"}], "after_files": [{"content": "import graphene\nimport graphene_django_optimizer as gql_optimizer\nfrom django.conf import settings\n\nfrom ...checkout import models\nfrom ...core.utils.taxes import get_taxes_for_address\nfrom ..core.connection import CountableDjangoObjectType\nfrom ..core.types.money import TaxedMoney\nfrom ..order.utils import applicable_shipping_methods\nfrom ..payment.enums import PaymentGatewayEnum\nfrom ..shipping.types import ShippingMethod\n\n\nclass CheckoutLine(CountableDjangoObjectType):\n total_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The sum of the checkout line price, taxes and discounts.'))\n requires_shipping = graphene.Boolean(\n description='Indicates whether the item need to be delivered.')\n\n class Meta:\n exclude_fields = ['cart', 'data']\n description = 'Represents an item in the checkout.'\n interfaces = [graphene.relay.Node]\n model = models.CartLine\n filter_fields = ['id']\n\n def resolve_total_price(self, info):\n taxes = get_taxes_for_address(self.cart.shipping_address)\n return self.get_total(taxes=taxes)\n\n def resolve_requires_shipping(self, info):\n return self.is_shipping_required()\n\n\nclass Checkout(CountableDjangoObjectType):\n available_shipping_methods = graphene.List(\n ShippingMethod, required=False,\n description='Shipping methods that can be used with this order.')\n available_payment_gateways = graphene.List(\n PaymentGatewayEnum, description='List of available payment gateways.',\n required=True)\n is_shipping_required = graphene.Boolean(\n description='Returns True, if checkout requires shipping.',\n required=True)\n lines = gql_optimizer.field(\n graphene.List(\n CheckoutLine, description=(\n 'A list of checkout lines, each containing information about '\n 'an item in the checkout.')),\n model_field='lines')\n shipping_price = graphene.Field(\n TaxedMoney,\n description='The price of the shipping, with all the taxes included.')\n subtotal_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The price of the checkout before shipping, with taxes included.'))\n total_price = graphene.Field(\n TaxedMoney,\n description=(\n 'The sum of the the checkout line prices, with all the taxes,'\n 'shipping costs, and discounts included.'))\n\n class Meta:\n exclude_fields = ['payments']\n description = 'Checkout object'\n model = models.Cart\n interfaces = [graphene.relay.Node]\n filter_fields = ['token']\n\n def resolve_total_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_total(taxes=taxes)\n\n def resolve_subtotal_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_subtotal(taxes=taxes)\n\n def resolve_shipping_price(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n return self.get_shipping_price(taxes=taxes)\n\n def resolve_lines(self, info):\n return self.lines.prefetch_related('variant')\n\n def resolve_available_shipping_methods(self, info):\n taxes = get_taxes_for_address(self.shipping_address)\n price = self.get_subtotal(\n taxes=taxes, discounts=info.context.discounts)\n return applicable_shipping_methods(self, info, price.gross.amount)\n\n def resolve_available_payment_gateways(self, info):\n return settings.CHECKOUT_PAYMENT_GATEWAYS.keys()\n\n def resolve_is_shipping_required(self, info):\n return self.is_shipping_required()\n", "path": "saleor/graphql/checkout/types.py"}]} | 1,230 | 318 |
gh_patches_debug_17205 | rasdani/github-patches | git_diff | fidals__shopelectro-532 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
tracking.es6:30-33: Send `purchase` event to YA and GA...
The puzzle `504-30170dc3` from #504 has to be resolved:
https://github.com/fidals/shopelectro/blob/4e607fb856842b43192511b1b4eaff19d2052b9a/front/js/shared/tracking.es6#L30-L33
The puzzle was created by Artemiy on 17-Aug-18.
Estimate: 60 minutes,
If you have any technical questions, don't ask me, submit new tickets instead. The task will be "done" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/views/ecommerce.py`
Content:
```
1 from django.conf import settings
2 from django.http import HttpResponse, JsonResponse
3 from django.shortcuts import get_object_or_404, render
4 from django.views.decorators.http import require_POST
5
6 from ecommerce import mailer, views as ec_views
7 from pages.models import CustomPage
8
9 from shopelectro.cart import SECart
10 from shopelectro.forms import OrderForm
11 from shopelectro.models import Product, Order
12
13
14 # ECOMMERCE VIEWS
15 class OrderPage(ec_views.OrderPage):
16 order_form = OrderForm
17 cart = SECart
18
19 def get_context_data(self, request, **kwargs):
20 return {
21 **super(OrderPage, self).get_context_data(request, **kwargs),
22 'page': CustomPage.objects.get(slug='order'),
23 }
24
25
26 class AddToCart(ec_views.AddToCart):
27 cart = SECart
28 product_model = Product
29 order_form = OrderForm
30
31
32 class RemoveFromCart(ec_views.RemoveFromCart):
33 cart = SECart
34 product_model = Product
35 order_form = OrderForm
36
37
38 class ChangeCount(ec_views.ChangeCount):
39 cart = SECart
40 product_model = Product
41 order_form = OrderForm
42
43
44 class FlushCart(ec_views.FlushCart):
45 product_model = Product
46 order_form = OrderForm
47
48
49 class OrderSuccess(ec_views.OrderSuccess):
50 order = Order
51
52
53 @require_POST
54 def one_click_buy(request):
55 """
56 Handle one-click-buy.
57
58 Accept XHR, save Order to DB, send mail about it
59 and return 200 OK.
60 """
61 SECart(request.session).clear()
62
63 cart = SECart(request.session)
64 product = get_object_or_404(Product, id=request.POST['product'])
65 cart.add(product, int(request.POST['quantity']))
66 order = Order(phone=request.POST['phone'])
67 order.set_positions(cart)
68 ec_views.save_order_to_session(request.session, order)
69 mailer.send_order(
70 subject=settings.EMAIL_SUBJECTS['one_click'],
71 order=order,
72 to_customer=False,
73 )
74 return HttpResponse('ok')
75
76
77 @require_POST
78 def order_call(request):
79 """Send email about ordered call."""
80 phone, time, url = ec_views.get_keys_from_post(
81 request, 'phone', 'time', 'url')
82
83 mailer.send_backcall(
84 subject=settings.EMAIL_SUBJECTS['call'],
85 phone=phone,
86 time=time,
87 url=url,
88 )
89
90 return HttpResponse('ok')
91
92
93 class YandexOrder(OrderPage):
94
95 def post(self, request):
96 cart = self.cart(request.session)
97 form = self.order_form(request.POST.dict())
98
99 if not form.is_valid():
100 return render(request, self.template, {'cart': cart, 'form': form})
101
102 order = form.save()
103 order.set_positions(cart)
104 ec_views.save_order_to_session(request.session, order)
105
106 # Took form fields from Yandex docs https://goo.gl/afKfsz
107 response_data = {
108 'yandex_kassa_link': settings.YANDEX_KASSA_LINK, # Required
109 'shopId': settings.SHOP['id'], # Required
110 'scid': settings.SHOP['scid'], # Required
111 'shopSuccessURL': settings.SHOP['success_url'],
112 'shopFailURL': settings.SHOP['fail_url'],
113 'customerNumber': order.id, # Required
114 'sum': order.total_price, # Required
115 'orderNumber': order.fake_order_number,
116 'cps_phone': order.phone,
117 'cps_email': order.email,
118 'paymentType': request.POST.get('payment_type'),
119 }
120
121 return JsonResponse(response_data)
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/views/ecommerce.py b/shopelectro/views/ecommerce.py
--- a/shopelectro/views/ecommerce.py
+++ b/shopelectro/views/ecommerce.py
@@ -1,4 +1,5 @@
from django.conf import settings
+from django.core import serializers
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404, render
from django.views.decorators.http import require_POST
@@ -47,7 +48,20 @@
class OrderSuccess(ec_views.OrderSuccess):
- order = Order
+ order = Order.objects.all().prefetch_related('positions')
+
+ def get_context_data(self, **kwargs):
+ context = super().get_context_data(**kwargs)
+ positions_json = serializers.serialize(
+ 'json',
+ context['order'].positions.all(),
+ fields=['name', 'quantity', 'price'],
+ )
+
+ return {
+ **context,
+ 'positions_json': positions_json,
+ }
@require_POST
| {"golden_diff": "diff --git a/shopelectro/views/ecommerce.py b/shopelectro/views/ecommerce.py\n--- a/shopelectro/views/ecommerce.py\n+++ b/shopelectro/views/ecommerce.py\n@@ -1,4 +1,5 @@\n from django.conf import settings\n+from django.core import serializers\n from django.http import HttpResponse, JsonResponse\n from django.shortcuts import get_object_or_404, render\n from django.views.decorators.http import require_POST\n@@ -47,7 +48,20 @@\n \n \n class OrderSuccess(ec_views.OrderSuccess):\n- order = Order\n+ order = Order.objects.all().prefetch_related('positions')\n+\n+ def get_context_data(self, **kwargs):\n+ context = super().get_context_data(**kwargs)\n+ positions_json = serializers.serialize(\n+ 'json',\n+ context['order'].positions.all(),\n+ fields=['name', 'quantity', 'price'],\n+ )\n+\n+ return {\n+ **context,\n+ 'positions_json': positions_json,\n+ }\n \n \n @require_POST\n", "issue": "tracking.es6:30-33: Send `purchase` event to YA and GA...\nThe puzzle `504-30170dc3` from #504 has to be resolved:\n\nhttps://github.com/fidals/shopelectro/blob/4e607fb856842b43192511b1b4eaff19d2052b9a/front/js/shared/tracking.es6#L30-L33\n\nThe puzzle was created by Artemiy on 17-Aug-18. \n\nEstimate: 60 minutes, \n\nIf you have any technical questions, don't ask me, submit new tickets instead. The task will be \"done\" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).\n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.decorators.http import require_POST\n\nfrom ecommerce import mailer, views as ec_views\nfrom pages.models import CustomPage\n\nfrom shopelectro.cart import SECart\nfrom shopelectro.forms import OrderForm\nfrom shopelectro.models import Product, Order\n\n\n# ECOMMERCE VIEWS\nclass OrderPage(ec_views.OrderPage):\n order_form = OrderForm\n cart = SECart\n\n def get_context_data(self, request, **kwargs):\n return {\n **super(OrderPage, self).get_context_data(request, **kwargs),\n 'page': CustomPage.objects.get(slug='order'),\n }\n\n\nclass AddToCart(ec_views.AddToCart):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass RemoveFromCart(ec_views.RemoveFromCart):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass ChangeCount(ec_views.ChangeCount):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass FlushCart(ec_views.FlushCart):\n product_model = Product\n order_form = OrderForm\n\n\nclass OrderSuccess(ec_views.OrderSuccess):\n order = Order\n\n\n@require_POST\ndef one_click_buy(request):\n \"\"\"\n Handle one-click-buy.\n\n Accept XHR, save Order to DB, send mail about it\n and return 200 OK.\n \"\"\"\n SECart(request.session).clear()\n\n cart = SECart(request.session)\n product = get_object_or_404(Product, id=request.POST['product'])\n cart.add(product, int(request.POST['quantity']))\n order = Order(phone=request.POST['phone'])\n order.set_positions(cart)\n ec_views.save_order_to_session(request.session, order)\n mailer.send_order(\n subject=settings.EMAIL_SUBJECTS['one_click'],\n order=order,\n to_customer=False,\n )\n return HttpResponse('ok')\n\n\n@require_POST\ndef order_call(request):\n \"\"\"Send email about ordered call.\"\"\"\n phone, time, url = ec_views.get_keys_from_post(\n request, 'phone', 'time', 'url')\n\n mailer.send_backcall(\n subject=settings.EMAIL_SUBJECTS['call'],\n phone=phone,\n time=time,\n url=url,\n )\n\n return HttpResponse('ok')\n\n\nclass YandexOrder(OrderPage):\n\n def post(self, request):\n cart = self.cart(request.session)\n form = self.order_form(request.POST.dict())\n\n if not form.is_valid():\n return render(request, self.template, {'cart': cart, 'form': form})\n\n order = form.save()\n order.set_positions(cart)\n ec_views.save_order_to_session(request.session, order)\n\n # Took form fields from Yandex docs https://goo.gl/afKfsz\n response_data = {\n 'yandex_kassa_link': settings.YANDEX_KASSA_LINK, # Required\n 'shopId': settings.SHOP['id'], # Required\n 'scid': settings.SHOP['scid'], # Required\n 'shopSuccessURL': settings.SHOP['success_url'],\n 'shopFailURL': settings.SHOP['fail_url'],\n 'customerNumber': order.id, # Required\n 'sum': order.total_price, # Required\n 'orderNumber': order.fake_order_number,\n 'cps_phone': order.phone,\n 'cps_email': order.email,\n 'paymentType': request.POST.get('payment_type'),\n }\n\n return JsonResponse(response_data)\n", "path": "shopelectro/views/ecommerce.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.core import serializers\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.decorators.http import require_POST\n\nfrom ecommerce import mailer, views as ec_views\nfrom pages.models import CustomPage\n\nfrom shopelectro.cart import SECart\nfrom shopelectro.forms import OrderForm\nfrom shopelectro.models import Product, Order\n\n\n# ECOMMERCE VIEWS\nclass OrderPage(ec_views.OrderPage):\n order_form = OrderForm\n cart = SECart\n\n def get_context_data(self, request, **kwargs):\n return {\n **super(OrderPage, self).get_context_data(request, **kwargs),\n 'page': CustomPage.objects.get(slug='order'),\n }\n\n\nclass AddToCart(ec_views.AddToCart):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass RemoveFromCart(ec_views.RemoveFromCart):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass ChangeCount(ec_views.ChangeCount):\n cart = SECart\n product_model = Product\n order_form = OrderForm\n\n\nclass FlushCart(ec_views.FlushCart):\n product_model = Product\n order_form = OrderForm\n\n\nclass OrderSuccess(ec_views.OrderSuccess):\n order = Order.objects.all().prefetch_related('positions')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n positions_json = serializers.serialize(\n 'json',\n context['order'].positions.all(),\n fields=['name', 'quantity', 'price'],\n )\n\n return {\n **context,\n 'positions_json': positions_json,\n }\n\n\n@require_POST\ndef one_click_buy(request):\n \"\"\"\n Handle one-click-buy.\n\n Accept XHR, save Order to DB, send mail about it\n and return 200 OK.\n \"\"\"\n SECart(request.session).clear()\n\n cart = SECart(request.session)\n product = get_object_or_404(Product, id=request.POST['product'])\n cart.add(product, int(request.POST['quantity']))\n order = Order(phone=request.POST['phone'])\n order.set_positions(cart)\n ec_views.save_order_to_session(request.session, order)\n mailer.send_order(\n subject=settings.EMAIL_SUBJECTS['one_click'],\n order=order,\n to_customer=False,\n )\n return HttpResponse('ok')\n\n\n@require_POST\ndef order_call(request):\n \"\"\"Send email about ordered call.\"\"\"\n phone, time, url = ec_views.get_keys_from_post(\n request, 'phone', 'time', 'url')\n\n mailer.send_backcall(\n subject=settings.EMAIL_SUBJECTS['call'],\n phone=phone,\n time=time,\n url=url,\n )\n\n return HttpResponse('ok')\n\n\nclass YandexOrder(OrderPage):\n\n def post(self, request):\n cart = self.cart(request.session)\n form = self.order_form(request.POST.dict())\n\n if not form.is_valid():\n return render(request, self.template, {'cart': cart, 'form': form})\n\n order = form.save()\n order.set_positions(cart)\n ec_views.save_order_to_session(request.session, order)\n\n # Took form fields from Yandex docs https://goo.gl/afKfsz\n response_data = {\n 'yandex_kassa_link': settings.YANDEX_KASSA_LINK, # Required\n 'shopId': settings.SHOP['id'], # Required\n 'scid': settings.SHOP['scid'], # Required\n 'shopSuccessURL': settings.SHOP['success_url'],\n 'shopFailURL': settings.SHOP['fail_url'],\n 'customerNumber': order.id, # Required\n 'sum': order.total_price, # Required\n 'orderNumber': order.fake_order_number,\n 'cps_phone': order.phone,\n 'cps_email': order.email,\n 'paymentType': request.POST.get('payment_type'),\n }\n\n return JsonResponse(response_data)\n", "path": "shopelectro/views/ecommerce.py"}]} | 1,559 | 232 |
gh_patches_debug_32456 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Create physics exceptions and warnings
Several of the exceptions and warnings that come up in PlasmaPy result from when we try to violate the laws of physics. Most of the time these violations cause a `UserWarning` or raise a `ValueError`, but these are pretty generic and don't give much insight into the cause of the problem. To help with exception handling, I propose that we create new exceptions and warnings, such as `PhysicsError` and `PhysicsWarning`. We could additionally be more specific and also have `RelativityError` and/or `RelativityWarning`. One possibility would be to put these in a new file called `plasmapy/utils/exceptions.py`.
This would be a good first contribution for someone who is somewhat familiar with Python. Here's documentation on [user-defined exceptions](https://docs.python.org/3/tutorial/errors.html#user-defined-exceptions. Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plasmapy/utils/exceptions.py`
Content:
```
1 """
2 Custom Error and Warning names to improve readability
3 """
4
5
6 # ----------
7 # Exceptions:
8 # ----------
9
10 class PlasmaPyError(Exception):
11 """
12 Base class of PlasmaPy custom errors.
13
14 All custom exceptions raised by PlasmaPy should inherit from this class
15 and be defined in this module.
16
17 Custom exceptions can inherit from other exception types too. Thus, if code
18 already knows how to handle a ValueError, it won't need any specific
19 modification.
20 """
21
22
23 class PhysicsError(PlasmaPyError, ValueError):
24 """Error for use of a physics value outside PlasmaPy theoretical bounds"""
25
26
27 class RelativityError(PhysicsError):
28 """Error for use of a speed greater than or equal to the speed of light"""
29
30
31 # ----------
32 # Warnings:
33 # ----------
34
35 class PlasmaPyWarning(Warning):
36 """Base class of PlasmaPy custom warnings.
37
38 All PlasmaPy custom warnings should inherit from this class and be defined
39 in this module.
40
41 Warnings should be issued using warnings.warn, which will not break
42 execution if unhandled.
43 """
44
45
46 class PhysicsWarning(PlasmaPyWarning):
47 """Warning for using a mildly worrisome physics value"""
48
49
50 class RelativityWarning(PhysicsWarning):
51 """Warning for use of a speed quantity approaching the speed of light"""
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plasmapy/utils/exceptions.py b/plasmapy/utils/exceptions.py
--- a/plasmapy/utils/exceptions.py
+++ b/plasmapy/utils/exceptions.py
@@ -18,14 +18,51 @@
already knows how to handle a ValueError, it won't need any specific
modification.
"""
+ pass
class PhysicsError(PlasmaPyError, ValueError):
"""Error for use of a physics value outside PlasmaPy theoretical bounds"""
+ pass
class RelativityError(PhysicsError):
"""Error for use of a speed greater than or equal to the speed of light"""
+ pass
+
+
+class AtomicError(PlasmaPyError):
+ """Error for use by an atomic subpackage"""
+ pass
+
+
+class MissingAtomicDataError(AtomicError):
+ """Error for use when atomic data is missing."""
+ pass
+
+
+class NoChargeInfoError(AtomicError):
+ """Error for use when charge information is needed but missing."""
+
+
+class IonError(NoChargeInfoError):
+ """Error for use when an ion is invalid."""
+ pass
+
+
+class IsotopeError(AtomicError):
+ """Error for use when an isotope is invalid."""
+ pass
+
+
+class ElementError(IsotopeError, IonError):
+ """Error for use when an element is invalid."""
+ pass
+
+
+class ParticleError(ElementError):
+ """Error for use when a particle is invalid."""
+ pass
# ----------
@@ -41,11 +78,24 @@
Warnings should be issued using warnings.warn, which will not break
execution if unhandled.
"""
+ pass
class PhysicsWarning(PlasmaPyWarning):
"""Warning for using a mildly worrisome physics value"""
+ pass
class RelativityWarning(PhysicsWarning):
"""Warning for use of a speed quantity approaching the speed of light"""
+ pass
+
+
+class AtomicWarning(PlasmaPyWarning):
+ """Warnings for use in the atomic subpackage."""
+ pass
+
+
+class MissingAtomicDataWarning(AtomicWarning):
+ """Warning for use when atomic data is missing."""
+ pass
| {"golden_diff": "diff --git a/plasmapy/utils/exceptions.py b/plasmapy/utils/exceptions.py\n--- a/plasmapy/utils/exceptions.py\n+++ b/plasmapy/utils/exceptions.py\n@@ -18,14 +18,51 @@\n already knows how to handle a ValueError, it won't need any specific\n modification.\n \"\"\"\n+ pass\n \n \n class PhysicsError(PlasmaPyError, ValueError):\n \"\"\"Error for use of a physics value outside PlasmaPy theoretical bounds\"\"\"\n+ pass\n \n \n class RelativityError(PhysicsError):\n \"\"\"Error for use of a speed greater than or equal to the speed of light\"\"\"\n+ pass\n+\n+\n+class AtomicError(PlasmaPyError):\n+ \"\"\"Error for use by an atomic subpackage\"\"\"\n+ pass\n+\n+\n+class MissingAtomicDataError(AtomicError):\n+ \"\"\"Error for use when atomic data is missing.\"\"\"\n+ pass\n+\n+\n+class NoChargeInfoError(AtomicError):\n+ \"\"\"Error for use when charge information is needed but missing.\"\"\"\n+\n+\n+class IonError(NoChargeInfoError):\n+ \"\"\"Error for use when an ion is invalid.\"\"\"\n+ pass\n+\n+\n+class IsotopeError(AtomicError):\n+ \"\"\"Error for use when an isotope is invalid.\"\"\"\n+ pass\n+\n+\n+class ElementError(IsotopeError, IonError):\n+ \"\"\"Error for use when an element is invalid.\"\"\"\n+ pass\n+\n+\n+class ParticleError(ElementError):\n+ \"\"\"Error for use when a particle is invalid.\"\"\"\n+ pass\n \n \n # ----------\n@@ -41,11 +78,24 @@\n Warnings should be issued using warnings.warn, which will not break\n execution if unhandled.\n \"\"\"\n+ pass\n \n \n class PhysicsWarning(PlasmaPyWarning):\n \"\"\"Warning for using a mildly worrisome physics value\"\"\"\n+ pass\n \n \n class RelativityWarning(PhysicsWarning):\n \"\"\"Warning for use of a speed quantity approaching the speed of light\"\"\"\n+ pass\n+\n+\n+class AtomicWarning(PlasmaPyWarning):\n+ \"\"\"Warnings for use in the atomic subpackage.\"\"\"\n+ pass\n+\n+\n+class MissingAtomicDataWarning(AtomicWarning):\n+ \"\"\"Warning for use when atomic data is missing.\"\"\"\n+ pass\n", "issue": "Create physics exceptions and warnings\nSeveral of the exceptions and warnings that come up in PlasmaPy result from when we try to violate the laws of physics. Most of the time these violations cause a `UserWarning` or raise a `ValueError`, but these are pretty generic and don't give much insight into the cause of the problem. To help with exception handling, I propose that we create new exceptions and warnings, such as `PhysicsError` and `PhysicsWarning`. We could additionally be more specific and also have `RelativityError` and/or `RelativityWarning`. One possibility would be to put these in a new file called `plasmapy/utils/exceptions.py`.\r\n\r\nThis would be a good first contribution for someone who is somewhat familiar with Python. Here's documentation on [user-defined exceptions](https://docs.python.org/3/tutorial/errors.html#user-defined-exceptions. Thank you!\r\n\n", "before_files": [{"content": "\"\"\"\nCustom Error and Warning names to improve readability\n\"\"\"\n\n\n# ----------\n# Exceptions:\n# ----------\n\nclass PlasmaPyError(Exception):\n \"\"\"\n Base class of PlasmaPy custom errors.\n\n All custom exceptions raised by PlasmaPy should inherit from this class\n and be defined in this module.\n\n Custom exceptions can inherit from other exception types too. Thus, if code\n already knows how to handle a ValueError, it won't need any specific\n modification.\n \"\"\"\n\n\nclass PhysicsError(PlasmaPyError, ValueError):\n \"\"\"Error for use of a physics value outside PlasmaPy theoretical bounds\"\"\"\n\n\nclass RelativityError(PhysicsError):\n \"\"\"Error for use of a speed greater than or equal to the speed of light\"\"\"\n\n\n# ----------\n# Warnings:\n# ----------\n\nclass PlasmaPyWarning(Warning):\n \"\"\"Base class of PlasmaPy custom warnings.\n\n All PlasmaPy custom warnings should inherit from this class and be defined\n in this module.\n\n Warnings should be issued using warnings.warn, which will not break\n execution if unhandled.\n \"\"\"\n\n\nclass PhysicsWarning(PlasmaPyWarning):\n \"\"\"Warning for using a mildly worrisome physics value\"\"\"\n\n\nclass RelativityWarning(PhysicsWarning):\n \"\"\"Warning for use of a speed quantity approaching the speed of light\"\"\"\n", "path": "plasmapy/utils/exceptions.py"}], "after_files": [{"content": "\"\"\"\nCustom Error and Warning names to improve readability\n\"\"\"\n\n\n# ----------\n# Exceptions:\n# ----------\n\nclass PlasmaPyError(Exception):\n \"\"\"\n Base class of PlasmaPy custom errors.\n\n All custom exceptions raised by PlasmaPy should inherit from this class\n and be defined in this module.\n\n Custom exceptions can inherit from other exception types too. Thus, if code\n already knows how to handle a ValueError, it won't need any specific\n modification.\n \"\"\"\n pass\n\n\nclass PhysicsError(PlasmaPyError, ValueError):\n \"\"\"Error for use of a physics value outside PlasmaPy theoretical bounds\"\"\"\n pass\n\n\nclass RelativityError(PhysicsError):\n \"\"\"Error for use of a speed greater than or equal to the speed of light\"\"\"\n pass\n\n\nclass AtomicError(PlasmaPyError):\n \"\"\"Error for use by an atomic subpackage\"\"\"\n pass\n\n\nclass MissingAtomicDataError(AtomicError):\n \"\"\"Error for use when atomic data is missing.\"\"\"\n pass\n\n\nclass NoChargeInfoError(AtomicError):\n \"\"\"Error for use when charge information is needed but missing.\"\"\"\n\n\nclass IonError(NoChargeInfoError):\n \"\"\"Error for use when an ion is invalid.\"\"\"\n pass\n\n\nclass IsotopeError(AtomicError):\n \"\"\"Error for use when an isotope is invalid.\"\"\"\n pass\n\n\nclass ElementError(IsotopeError, IonError):\n \"\"\"Error for use when an element is invalid.\"\"\"\n pass\n\n\nclass ParticleError(ElementError):\n \"\"\"Error for use when a particle is invalid.\"\"\"\n pass\n\n\n# ----------\n# Warnings:\n# ----------\n\nclass PlasmaPyWarning(Warning):\n \"\"\"Base class of PlasmaPy custom warnings.\n\n All PlasmaPy custom warnings should inherit from this class and be defined\n in this module.\n\n Warnings should be issued using warnings.warn, which will not break\n execution if unhandled.\n \"\"\"\n pass\n\n\nclass PhysicsWarning(PlasmaPyWarning):\n \"\"\"Warning for using a mildly worrisome physics value\"\"\"\n pass\n\n\nclass RelativityWarning(PhysicsWarning):\n \"\"\"Warning for use of a speed quantity approaching the speed of light\"\"\"\n pass\n\n\nclass AtomicWarning(PlasmaPyWarning):\n \"\"\"Warnings for use in the atomic subpackage.\"\"\"\n pass\n\n\nclass MissingAtomicDataWarning(AtomicWarning):\n \"\"\"Warning for use when atomic data is missing.\"\"\"\n pass\n", "path": "plasmapy/utils/exceptions.py"}]} | 824 | 492 |
gh_patches_debug_916 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-581 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can we keep a mturk task from outside parlai/mturk/tasks?
Hi @JackUrb, I have a few questions regarding the mturk evaluation:
1. This link (http://parl.ai/static/docs/mturk.html#running-a-task) says that
> to run an MTurk task, first ensure that the task directory is in `parlai/mturk/tasks/`.
Is it by design? I tried to keep my task in some other directory (outside root parlai directory) and tried to import parlai as a package but that doesn't seem to work. Basically I am trying to use ParlAI as one of the dependency for one of my project for Human In the loop evaluation.
2. How easy/hard it is to provide support for keeping the mturk task anywhere?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright (c) 2017-present, Facebook, Inc.
2 # All rights reserved.
3 # This source code is licensed under the BSD-style license found in the
4 # LICENSE file in the root directory of this source tree. An additional grant
5 # of patent rights can be found in the PATENTS file in the same directory.
6
7
8 from setuptools import setup, find_packages
9 import sys
10
11 if sys.version_info < (3,):
12 sys.exit('Sorry, Python3 is required for ParlAI.')
13
14 with open('README.md', encoding="utf8") as f:
15 readme = f.read()
16
17 with open('LICENSE') as f:
18 license = f.read()
19
20 with open('requirements.txt') as f:
21 reqs = f.read()
22
23 setup(
24 name='parlai',
25 version='0.1.0',
26 description='Unified API for accessing dialog datasets.',
27 long_description=readme,
28 url='http://parl.ai/',
29 license=license,
30 packages=find_packages(exclude=(
31 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')),
32 install_requires=reqs.strip().split('\n'),
33 )
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -30,4 +30,5 @@
packages=find_packages(exclude=(
'data', 'docs', 'downloads', 'examples', 'logs', 'tests')),
install_requires=reqs.strip().split('\n'),
+ include_package_data=True,
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,4 +30,5 @@\n packages=find_packages(exclude=(\n 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')),\n install_requires=reqs.strip().split('\\n'),\n+ include_package_data=True,\n )\n", "issue": "Can we keep a mturk task from outside parlai/mturk/tasks?\nHi @JackUrb, I have a few questions regarding the mturk evaluation:\r\n\r\n1. This link (http://parl.ai/static/docs/mturk.html#running-a-task) says that \r\n\r\n> to run an MTurk task, first ensure that the task directory is in `parlai/mturk/tasks/`. \r\n\r\n Is it by design? I tried to keep my task in some other directory (outside root parlai directory) and tried to import parlai as a package but that doesn't seem to work. Basically I am trying to use ParlAI as one of the dependency for one of my project for Human In the loop evaluation.\r\n\r\n2. How easy/hard it is to provide support for keeping the mturk task anywhere?\r\n\n", "before_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\n\nfrom setuptools import setup, find_packages\nimport sys\n\nif sys.version_info < (3,):\n sys.exit('Sorry, Python3 is required for ParlAI.')\n\nwith open('README.md', encoding=\"utf8\") as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nwith open('requirements.txt') as f:\n reqs = f.read()\n\nsetup(\n name='parlai',\n version='0.1.0',\n description='Unified API for accessing dialog datasets.',\n long_description=readme,\n url='http://parl.ai/',\n license=license,\n packages=find_packages(exclude=(\n 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')),\n install_requires=reqs.strip().split('\\n'),\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\n\nfrom setuptools import setup, find_packages\nimport sys\n\nif sys.version_info < (3,):\n sys.exit('Sorry, Python3 is required for ParlAI.')\n\nwith open('README.md', encoding=\"utf8\") as f:\n readme = f.read()\n\nwith open('LICENSE') as f:\n license = f.read()\n\nwith open('requirements.txt') as f:\n reqs = f.read()\n\nsetup(\n name='parlai',\n version='0.1.0',\n description='Unified API for accessing dialog datasets.',\n long_description=readme,\n url='http://parl.ai/',\n license=license,\n packages=find_packages(exclude=(\n 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')),\n install_requires=reqs.strip().split('\\n'),\n include_package_data=True,\n)\n", "path": "setup.py"}]} | 737 | 77 |
gh_patches_debug_26439 | rasdani/github-patches | git_diff | python-discord__bot-772 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Verify command shouldn't log tracebacks for DM failure.
Sentry Issue: [BOT-11](https://sentry.io/organizations/python-discord/issues/1526042507/?referrer=github_integration)
It currently uses `log.exception()` for all exceptions. `Forbidden` should be a special case and use a lower log level. A better approach may be to only handle `Forbidden` and re-raise all other exceptions (to defer it to the default command error handler). A `finally` block can be used to still delete the message afterwards.
```py
try:
await ctx.author.send(WELCOME_MESSAGE)
except Exception:
# Catch the exception, in case they have DMs off or something
log.exception(f"Unable to send welcome message to user {ctx.author}.")
```
```
Forbidden: 403 FORBIDDEN (error code: 50007): Cannot send messages to this user
File "bot/cogs/verification.py", line 129, in accept_command
await ctx.author.send(WELCOME_MESSAGE)
File "discord/abc.py", line 856, in send
data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)
File "discord/http.py", line 216, in request
raise Forbidden(r, data)
Unable to send welcome message to user <REDACTED>.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/verification.py`
Content:
```
1 import logging
2 from datetime import datetime
3
4 from discord import Colour, Message, NotFound, Object
5 from discord.ext import tasks
6 from discord.ext.commands import Cog, Context, command
7
8 from bot.bot import Bot
9 from bot.cogs.moderation import ModLog
10 from bot.constants import (
11 Bot as BotConfig,
12 Channels, Colours, Event,
13 Filter, Icons, MODERATION_ROLES, Roles
14 )
15 from bot.decorators import InChannelCheckFailure, in_channel, without_role
16 from bot.utils.checks import without_role_check
17
18 log = logging.getLogger(__name__)
19
20 WELCOME_MESSAGE = f"""
21 Hello! Welcome to the server, and thanks for verifying yourself!
22
23 For your records, these are the documents you accepted:
24
25 `1)` Our rules, here: <https://pythondiscord.com/pages/rules>
26 `2)` Our privacy policy, here: <https://pythondiscord.com/pages/privacy> - you can find information on how to have \
27 your information removed here as well.
28
29 Feel free to review them at any point!
30
31 Additionally, if you'd like to receive notifications for the announcements we post in <#{Channels.announcements}> \
32 from time to time, you can send `!subscribe` to <#{Channels.bot}> at any time to assign yourself the \
33 **Announcements** role. We'll mention this role every time we make an announcement.
34
35 If you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to <#{Channels.bot}>.
36 """
37
38 PERIODIC_PING = (
39 f"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`."
40 f" If you encounter any problems during the verification process, ping the <@&{Roles.admin}> role in this channel."
41 )
42 BOT_MESSAGE_DELETE_DELAY = 10
43
44
45 class Verification(Cog):
46 """User verification and role self-management."""
47
48 def __init__(self, bot: Bot):
49 self.bot = bot
50 self.periodic_ping.start()
51
52 @property
53 def mod_log(self) -> ModLog:
54 """Get currently loaded ModLog cog instance."""
55 return self.bot.get_cog("ModLog")
56
57 @Cog.listener()
58 async def on_message(self, message: Message) -> None:
59 """Check new message event for messages to the checkpoint channel & process."""
60 if message.channel.id != Channels.verification:
61 return # Only listen for #checkpoint messages
62
63 if message.author.bot:
64 # They're a bot, delete their message after the delay.
65 # But not the periodic ping; we like that one.
66 if message.content != PERIODIC_PING:
67 await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)
68 return
69
70 # if a user mentions a role or guild member
71 # alert the mods in mod-alerts channel
72 if message.mentions or message.role_mentions:
73 log.debug(
74 f"{message.author} mentioned one or more users "
75 f"and/or roles in {message.channel.name}"
76 )
77
78 embed_text = (
79 f"{message.author.mention} sent a message in "
80 f"{message.channel.mention} that contained user and/or role mentions."
81 f"\n\n**Original message:**\n>>> {message.content}"
82 )
83
84 # Send pretty mod log embed to mod-alerts
85 await self.mod_log.send_log_message(
86 icon_url=Icons.filtering,
87 colour=Colour(Colours.soft_red),
88 title=f"User/Role mentioned in {message.channel.name}",
89 text=embed_text,
90 thumbnail=message.author.avatar_url_as(static_format="png"),
91 channel_id=Channels.mod_alerts,
92 ping_everyone=Filter.ping_everyone,
93 )
94
95 ctx = await self.bot.get_context(message) # type: Context
96
97 if ctx.command is not None and ctx.command.name == "accept":
98 return # They used the accept command
99
100 for role in ctx.author.roles:
101 if role.id == Roles.verified:
102 log.warning(f"{ctx.author} posted '{ctx.message.content}' "
103 "in the verification channel, but is already verified.")
104 return # They're already verified
105
106 log.debug(f"{ctx.author} posted '{ctx.message.content}' in the verification "
107 "channel. We are providing instructions how to verify.")
108 await ctx.send(
109 f"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, "
110 f"and gain access to the rest of the server.",
111 delete_after=20
112 )
113
114 log.trace(f"Deleting the message posted by {ctx.author}")
115
116 try:
117 await ctx.message.delete()
118 except NotFound:
119 log.trace("No message found, it must have been deleted by another bot.")
120
121 @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True)
122 @without_role(Roles.verified)
123 @in_channel(Channels.verification)
124 async def accept_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
125 """Accept our rules and gain access to the rest of the server."""
126 log.debug(f"{ctx.author} called !accept. Assigning the 'Developer' role.")
127 await ctx.author.add_roles(Object(Roles.verified), reason="Accepted the rules")
128 try:
129 await ctx.author.send(WELCOME_MESSAGE)
130 except Exception:
131 # Catch the exception, in case they have DMs off or something
132 log.exception(f"Unable to send welcome message to user {ctx.author}.")
133
134 log.trace(f"Deleting the message posted by {ctx.author}.")
135
136 try:
137 self.mod_log.ignore(Event.message_delete, ctx.message.id)
138 await ctx.message.delete()
139 except NotFound:
140 log.trace("No message found, it must have been deleted by another bot.")
141
142 @command(name='subscribe')
143 @in_channel(Channels.bot)
144 async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
145 """Subscribe to announcement notifications by assigning yourself the role."""
146 has_role = False
147
148 for role in ctx.author.roles:
149 if role.id == Roles.announcements:
150 has_role = True
151 break
152
153 if has_role:
154 await ctx.send(f"{ctx.author.mention} You're already subscribed!")
155 return
156
157 log.debug(f"{ctx.author} called !subscribe. Assigning the 'Announcements' role.")
158 await ctx.author.add_roles(Object(Roles.announcements), reason="Subscribed to announcements")
159
160 log.trace(f"Deleting the message posted by {ctx.author}.")
161
162 await ctx.send(
163 f"{ctx.author.mention} Subscribed to <#{Channels.announcements}> notifications.",
164 )
165
166 @command(name='unsubscribe')
167 @in_channel(Channels.bot)
168 async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
169 """Unsubscribe from announcement notifications by removing the role from yourself."""
170 has_role = False
171
172 for role in ctx.author.roles:
173 if role.id == Roles.announcements:
174 has_role = True
175 break
176
177 if not has_role:
178 await ctx.send(f"{ctx.author.mention} You're already unsubscribed!")
179 return
180
181 log.debug(f"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.")
182 await ctx.author.remove_roles(Object(Roles.announcements), reason="Unsubscribed from announcements")
183
184 log.trace(f"Deleting the message posted by {ctx.author}.")
185
186 await ctx.send(
187 f"{ctx.author.mention} Unsubscribed from <#{Channels.announcements}> notifications."
188 )
189
190 # This cannot be static (must have a __func__ attribute).
191 async def cog_command_error(self, ctx: Context, error: Exception) -> None:
192 """Check for & ignore any InChannelCheckFailure."""
193 if isinstance(error, InChannelCheckFailure):
194 error.handled = True
195
196 @staticmethod
197 def bot_check(ctx: Context) -> bool:
198 """Block any command within the verification channel that is not !accept."""
199 if ctx.channel.id == Channels.verification and without_role_check(ctx, *MODERATION_ROLES):
200 return ctx.command.name == "accept"
201 else:
202 return True
203
204 @tasks.loop(hours=12)
205 async def periodic_ping(self) -> None:
206 """Every week, mention @everyone to remind them to verify."""
207 messages = self.bot.get_channel(Channels.verification).history(limit=10)
208 need_to_post = True # True if a new message needs to be sent.
209
210 async for message in messages:
211 if message.author == self.bot.user and message.content == PERIODIC_PING:
212 delta = datetime.utcnow() - message.created_at # Time since last message.
213 if delta.days >= 7: # Message is older than a week.
214 await message.delete()
215 else:
216 need_to_post = False
217
218 break
219
220 if need_to_post:
221 await self.bot.get_channel(Channels.verification).send(PERIODIC_PING)
222
223 @periodic_ping.before_loop
224 async def before_ping(self) -> None:
225 """Only start the loop when the bot is ready."""
226 await self.bot.wait_until_ready()
227
228 def cog_unload(self) -> None:
229 """Cancel the periodic ping task when the cog is unloaded."""
230 self.periodic_ping.cancel()
231
232
233 def setup(bot: Bot) -> None:
234 """Load the Verification cog."""
235 bot.add_cog(Verification(bot))
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/cogs/verification.py b/bot/cogs/verification.py
--- a/bot/cogs/verification.py
+++ b/bot/cogs/verification.py
@@ -1,7 +1,8 @@
import logging
+from contextlib import suppress
from datetime import datetime
-from discord import Colour, Message, NotFound, Object
+from discord import Colour, Forbidden, Message, NotFound, Object
from discord.ext import tasks
from discord.ext.commands import Cog, Context, command
@@ -127,17 +128,13 @@
await ctx.author.add_roles(Object(Roles.verified), reason="Accepted the rules")
try:
await ctx.author.send(WELCOME_MESSAGE)
- except Exception:
- # Catch the exception, in case they have DMs off or something
- log.exception(f"Unable to send welcome message to user {ctx.author}.")
-
- log.trace(f"Deleting the message posted by {ctx.author}.")
-
- try:
- self.mod_log.ignore(Event.message_delete, ctx.message.id)
- await ctx.message.delete()
- except NotFound:
- log.trace("No message found, it must have been deleted by another bot.")
+ except Forbidden:
+ log.info(f"Sending welcome message failed for {ctx.author}.")
+ finally:
+ log.trace(f"Deleting accept message by {ctx.author}.")
+ with suppress(NotFound):
+ self.mod_log.ignore(Event.message_delete, ctx.message.id)
+ await ctx.message.delete()
@command(name='subscribe')
@in_channel(Channels.bot)
| {"golden_diff": "diff --git a/bot/cogs/verification.py b/bot/cogs/verification.py\n--- a/bot/cogs/verification.py\n+++ b/bot/cogs/verification.py\n@@ -1,7 +1,8 @@\n import logging\n+from contextlib import suppress\n from datetime import datetime\n \n-from discord import Colour, Message, NotFound, Object\n+from discord import Colour, Forbidden, Message, NotFound, Object\n from discord.ext import tasks\n from discord.ext.commands import Cog, Context, command\n \n@@ -127,17 +128,13 @@\n await ctx.author.add_roles(Object(Roles.verified), reason=\"Accepted the rules\")\n try:\n await ctx.author.send(WELCOME_MESSAGE)\n- except Exception:\n- # Catch the exception, in case they have DMs off or something\n- log.exception(f\"Unable to send welcome message to user {ctx.author}.\")\n-\n- log.trace(f\"Deleting the message posted by {ctx.author}.\")\n-\n- try:\n- self.mod_log.ignore(Event.message_delete, ctx.message.id)\n- await ctx.message.delete()\n- except NotFound:\n- log.trace(\"No message found, it must have been deleted by another bot.\")\n+ except Forbidden:\n+ log.info(f\"Sending welcome message failed for {ctx.author}.\")\n+ finally:\n+ log.trace(f\"Deleting accept message by {ctx.author}.\")\n+ with suppress(NotFound):\n+ self.mod_log.ignore(Event.message_delete, ctx.message.id)\n+ await ctx.message.delete()\n \n @command(name='subscribe')\n @in_channel(Channels.bot)\n", "issue": "Verify command shouldn't log tracebacks for DM failure.\nSentry Issue: [BOT-11](https://sentry.io/organizations/python-discord/issues/1526042507/?referrer=github_integration)\r\n\r\nIt currently uses `log.exception()` for all exceptions. `Forbidden` should be a special case and use a lower log level. A better approach may be to only handle `Forbidden` and re-raise all other exceptions (to defer it to the default command error handler). A `finally` block can be used to still delete the message afterwards.\r\n\r\n```py\r\n try:\r\n await ctx.author.send(WELCOME_MESSAGE)\r\n except Exception:\r\n # Catch the exception, in case they have DMs off or something\r\n log.exception(f\"Unable to send welcome message to user {ctx.author}.\")\r\n```\r\n\r\n```\r\nForbidden: 403 FORBIDDEN (error code: 50007): Cannot send messages to this user\r\n File \"bot/cogs/verification.py\", line 129, in accept_command\r\n await ctx.author.send(WELCOME_MESSAGE)\r\n File \"discord/abc.py\", line 856, in send\r\n data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)\r\n File \"discord/http.py\", line 216, in request\r\n raise Forbidden(r, data)\r\n\r\nUnable to send welcome message to user <REDACTED>.\r\n```\n", "before_files": [{"content": "import logging\nfrom datetime import datetime\n\nfrom discord import Colour, Message, NotFound, Object\nfrom discord.ext import tasks\nfrom discord.ext.commands import Cog, Context, command\n\nfrom bot.bot import Bot\nfrom bot.cogs.moderation import ModLog\nfrom bot.constants import (\n Bot as BotConfig,\n Channels, Colours, Event,\n Filter, Icons, MODERATION_ROLES, Roles\n)\nfrom bot.decorators import InChannelCheckFailure, in_channel, without_role\nfrom bot.utils.checks import without_role_check\n\nlog = logging.getLogger(__name__)\n\nWELCOME_MESSAGE = f\"\"\"\nHello! Welcome to the server, and thanks for verifying yourself!\n\nFor your records, these are the documents you accepted:\n\n`1)` Our rules, here: <https://pythondiscord.com/pages/rules>\n`2)` Our privacy policy, here: <https://pythondiscord.com/pages/privacy> - you can find information on how to have \\\nyour information removed here as well.\n\nFeel free to review them at any point!\n\nAdditionally, if you'd like to receive notifications for the announcements we post in <#{Channels.announcements}> \\\nfrom time to time, you can send `!subscribe` to <#{Channels.bot}> at any time to assign yourself the \\\n**Announcements** role. We'll mention this role every time we make an announcement.\n\nIf you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to <#{Channels.bot}>.\n\"\"\"\n\nPERIODIC_PING = (\n f\"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`.\"\n f\" If you encounter any problems during the verification process, ping the <@&{Roles.admin}> role in this channel.\"\n)\nBOT_MESSAGE_DELETE_DELAY = 10\n\n\nclass Verification(Cog):\n \"\"\"User verification and role self-management.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.periodic_ping.start()\n\n @property\n def mod_log(self) -> ModLog:\n \"\"\"Get currently loaded ModLog cog instance.\"\"\"\n return self.bot.get_cog(\"ModLog\")\n\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Check new message event for messages to the checkpoint channel & process.\"\"\"\n if message.channel.id != Channels.verification:\n return # Only listen for #checkpoint messages\n\n if message.author.bot:\n # They're a bot, delete their message after the delay.\n # But not the periodic ping; we like that one.\n if message.content != PERIODIC_PING:\n await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)\n return\n\n # if a user mentions a role or guild member\n # alert the mods in mod-alerts channel\n if message.mentions or message.role_mentions:\n log.debug(\n f\"{message.author} mentioned one or more users \"\n f\"and/or roles in {message.channel.name}\"\n )\n\n embed_text = (\n f\"{message.author.mention} sent a message in \"\n f\"{message.channel.mention} that contained user and/or role mentions.\"\n f\"\\n\\n**Original message:**\\n>>> {message.content}\"\n )\n\n # Send pretty mod log embed to mod-alerts\n await self.mod_log.send_log_message(\n icon_url=Icons.filtering,\n colour=Colour(Colours.soft_red),\n title=f\"User/Role mentioned in {message.channel.name}\",\n text=embed_text,\n thumbnail=message.author.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.mod_alerts,\n ping_everyone=Filter.ping_everyone,\n )\n\n ctx = await self.bot.get_context(message) # type: Context\n\n if ctx.command is not None and ctx.command.name == \"accept\":\n return # They used the accept command\n\n for role in ctx.author.roles:\n if role.id == Roles.verified:\n log.warning(f\"{ctx.author} posted '{ctx.message.content}' \"\n \"in the verification channel, but is already verified.\")\n return # They're already verified\n\n log.debug(f\"{ctx.author} posted '{ctx.message.content}' in the verification \"\n \"channel. We are providing instructions how to verify.\")\n await ctx.send(\n f\"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, \"\n f\"and gain access to the rest of the server.\",\n delete_after=20\n )\n\n log.trace(f\"Deleting the message posted by {ctx.author}\")\n\n try:\n await ctx.message.delete()\n except NotFound:\n log.trace(\"No message found, it must have been deleted by another bot.\")\n\n @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True)\n @without_role(Roles.verified)\n @in_channel(Channels.verification)\n async def accept_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Accept our rules and gain access to the rest of the server.\"\"\"\n log.debug(f\"{ctx.author} called !accept. Assigning the 'Developer' role.\")\n await ctx.author.add_roles(Object(Roles.verified), reason=\"Accepted the rules\")\n try:\n await ctx.author.send(WELCOME_MESSAGE)\n except Exception:\n # Catch the exception, in case they have DMs off or something\n log.exception(f\"Unable to send welcome message to user {ctx.author}.\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n try:\n self.mod_log.ignore(Event.message_delete, ctx.message.id)\n await ctx.message.delete()\n except NotFound:\n log.trace(\"No message found, it must have been deleted by another bot.\")\n\n @command(name='subscribe')\n @in_channel(Channels.bot)\n async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Subscribe to announcement notifications by assigning yourself the role.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == Roles.announcements:\n has_role = True\n break\n\n if has_role:\n await ctx.send(f\"{ctx.author.mention} You're already subscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !subscribe. Assigning the 'Announcements' role.\")\n await ctx.author.add_roles(Object(Roles.announcements), reason=\"Subscribed to announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Subscribed to <#{Channels.announcements}> notifications.\",\n )\n\n @command(name='unsubscribe')\n @in_channel(Channels.bot)\n async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Unsubscribe from announcement notifications by removing the role from yourself.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == Roles.announcements:\n has_role = True\n break\n\n if not has_role:\n await ctx.send(f\"{ctx.author.mention} You're already unsubscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.\")\n await ctx.author.remove_roles(Object(Roles.announcements), reason=\"Unsubscribed from announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Unsubscribed from <#{Channels.announcements}> notifications.\"\n )\n\n # This cannot be static (must have a __func__ attribute).\n async def cog_command_error(self, ctx: Context, error: Exception) -> None:\n \"\"\"Check for & ignore any InChannelCheckFailure.\"\"\"\n if isinstance(error, InChannelCheckFailure):\n error.handled = True\n\n @staticmethod\n def bot_check(ctx: Context) -> bool:\n \"\"\"Block any command within the verification channel that is not !accept.\"\"\"\n if ctx.channel.id == Channels.verification and without_role_check(ctx, *MODERATION_ROLES):\n return ctx.command.name == \"accept\"\n else:\n return True\n\n @tasks.loop(hours=12)\n async def periodic_ping(self) -> None:\n \"\"\"Every week, mention @everyone to remind them to verify.\"\"\"\n messages = self.bot.get_channel(Channels.verification).history(limit=10)\n need_to_post = True # True if a new message needs to be sent.\n\n async for message in messages:\n if message.author == self.bot.user and message.content == PERIODIC_PING:\n delta = datetime.utcnow() - message.created_at # Time since last message.\n if delta.days >= 7: # Message is older than a week.\n await message.delete()\n else:\n need_to_post = False\n\n break\n\n if need_to_post:\n await self.bot.get_channel(Channels.verification).send(PERIODIC_PING)\n\n @periodic_ping.before_loop\n async def before_ping(self) -> None:\n \"\"\"Only start the loop when the bot is ready.\"\"\"\n await self.bot.wait_until_ready()\n\n def cog_unload(self) -> None:\n \"\"\"Cancel the periodic ping task when the cog is unloaded.\"\"\"\n self.periodic_ping.cancel()\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Verification cog.\"\"\"\n bot.add_cog(Verification(bot))\n", "path": "bot/cogs/verification.py"}], "after_files": [{"content": "import logging\nfrom contextlib import suppress\nfrom datetime import datetime\n\nfrom discord import Colour, Forbidden, Message, NotFound, Object\nfrom discord.ext import tasks\nfrom discord.ext.commands import Cog, Context, command\n\nfrom bot.bot import Bot\nfrom bot.cogs.moderation import ModLog\nfrom bot.constants import (\n Bot as BotConfig,\n Channels, Colours, Event,\n Filter, Icons, MODERATION_ROLES, Roles\n)\nfrom bot.decorators import InChannelCheckFailure, in_channel, without_role\nfrom bot.utils.checks import without_role_check\n\nlog = logging.getLogger(__name__)\n\nWELCOME_MESSAGE = f\"\"\"\nHello! Welcome to the server, and thanks for verifying yourself!\n\nFor your records, these are the documents you accepted:\n\n`1)` Our rules, here: <https://pythondiscord.com/pages/rules>\n`2)` Our privacy policy, here: <https://pythondiscord.com/pages/privacy> - you can find information on how to have \\\nyour information removed here as well.\n\nFeel free to review them at any point!\n\nAdditionally, if you'd like to receive notifications for the announcements we post in <#{Channels.announcements}> \\\nfrom time to time, you can send `!subscribe` to <#{Channels.bot}> at any time to assign yourself the \\\n**Announcements** role. We'll mention this role every time we make an announcement.\n\nIf you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to <#{Channels.bot}>.\n\"\"\"\n\nPERIODIC_PING = (\n f\"@everyone To verify that you have read our rules, please type `{BotConfig.prefix}accept`.\"\n f\" If you encounter any problems during the verification process, ping the <@&{Roles.admin}> role in this channel.\"\n)\nBOT_MESSAGE_DELETE_DELAY = 10\n\n\nclass Verification(Cog):\n \"\"\"User verification and role self-management.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.periodic_ping.start()\n\n @property\n def mod_log(self) -> ModLog:\n \"\"\"Get currently loaded ModLog cog instance.\"\"\"\n return self.bot.get_cog(\"ModLog\")\n\n @Cog.listener()\n async def on_message(self, message: Message) -> None:\n \"\"\"Check new message event for messages to the checkpoint channel & process.\"\"\"\n if message.channel.id != Channels.verification:\n return # Only listen for #checkpoint messages\n\n if message.author.bot:\n # They're a bot, delete their message after the delay.\n # But not the periodic ping; we like that one.\n if message.content != PERIODIC_PING:\n await message.delete(delay=BOT_MESSAGE_DELETE_DELAY)\n return\n\n # if a user mentions a role or guild member\n # alert the mods in mod-alerts channel\n if message.mentions or message.role_mentions:\n log.debug(\n f\"{message.author} mentioned one or more users \"\n f\"and/or roles in {message.channel.name}\"\n )\n\n embed_text = (\n f\"{message.author.mention} sent a message in \"\n f\"{message.channel.mention} that contained user and/or role mentions.\"\n f\"\\n\\n**Original message:**\\n>>> {message.content}\"\n )\n\n # Send pretty mod log embed to mod-alerts\n await self.mod_log.send_log_message(\n icon_url=Icons.filtering,\n colour=Colour(Colours.soft_red),\n title=f\"User/Role mentioned in {message.channel.name}\",\n text=embed_text,\n thumbnail=message.author.avatar_url_as(static_format=\"png\"),\n channel_id=Channels.mod_alerts,\n ping_everyone=Filter.ping_everyone,\n )\n\n ctx = await self.bot.get_context(message) # type: Context\n\n if ctx.command is not None and ctx.command.name == \"accept\":\n return # They used the accept command\n\n for role in ctx.author.roles:\n if role.id == Roles.verified:\n log.warning(f\"{ctx.author} posted '{ctx.message.content}' \"\n \"in the verification channel, but is already verified.\")\n return # They're already verified\n\n log.debug(f\"{ctx.author} posted '{ctx.message.content}' in the verification \"\n \"channel. We are providing instructions how to verify.\")\n await ctx.send(\n f\"{ctx.author.mention} Please type `!accept` to verify that you accept our rules, \"\n f\"and gain access to the rest of the server.\",\n delete_after=20\n )\n\n log.trace(f\"Deleting the message posted by {ctx.author}\")\n\n try:\n await ctx.message.delete()\n except NotFound:\n log.trace(\"No message found, it must have been deleted by another bot.\")\n\n @command(name='accept', aliases=('verify', 'verified', 'accepted'), hidden=True)\n @without_role(Roles.verified)\n @in_channel(Channels.verification)\n async def accept_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Accept our rules and gain access to the rest of the server.\"\"\"\n log.debug(f\"{ctx.author} called !accept. Assigning the 'Developer' role.\")\n await ctx.author.add_roles(Object(Roles.verified), reason=\"Accepted the rules\")\n try:\n await ctx.author.send(WELCOME_MESSAGE)\n except Forbidden:\n log.info(f\"Sending welcome message failed for {ctx.author}.\")\n finally:\n log.trace(f\"Deleting accept message by {ctx.author}.\")\n with suppress(NotFound):\n self.mod_log.ignore(Event.message_delete, ctx.message.id)\n await ctx.message.delete()\n\n @command(name='subscribe')\n @in_channel(Channels.bot)\n async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Subscribe to announcement notifications by assigning yourself the role.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == Roles.announcements:\n has_role = True\n break\n\n if has_role:\n await ctx.send(f\"{ctx.author.mention} You're already subscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !subscribe. Assigning the 'Announcements' role.\")\n await ctx.author.add_roles(Object(Roles.announcements), reason=\"Subscribed to announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Subscribed to <#{Channels.announcements}> notifications.\",\n )\n\n @command(name='unsubscribe')\n @in_channel(Channels.bot)\n async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args\n \"\"\"Unsubscribe from announcement notifications by removing the role from yourself.\"\"\"\n has_role = False\n\n for role in ctx.author.roles:\n if role.id == Roles.announcements:\n has_role = True\n break\n\n if not has_role:\n await ctx.send(f\"{ctx.author.mention} You're already unsubscribed!\")\n return\n\n log.debug(f\"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.\")\n await ctx.author.remove_roles(Object(Roles.announcements), reason=\"Unsubscribed from announcements\")\n\n log.trace(f\"Deleting the message posted by {ctx.author}.\")\n\n await ctx.send(\n f\"{ctx.author.mention} Unsubscribed from <#{Channels.announcements}> notifications.\"\n )\n\n # This cannot be static (must have a __func__ attribute).\n async def cog_command_error(self, ctx: Context, error: Exception) -> None:\n \"\"\"Check for & ignore any InChannelCheckFailure.\"\"\"\n if isinstance(error, InChannelCheckFailure):\n error.handled = True\n\n @staticmethod\n def bot_check(ctx: Context) -> bool:\n \"\"\"Block any command within the verification channel that is not !accept.\"\"\"\n if ctx.channel.id == Channels.verification and without_role_check(ctx, *MODERATION_ROLES):\n return ctx.command.name == \"accept\"\n else:\n return True\n\n @tasks.loop(hours=12)\n async def periodic_ping(self) -> None:\n \"\"\"Every week, mention @everyone to remind them to verify.\"\"\"\n messages = self.bot.get_channel(Channels.verification).history(limit=10)\n need_to_post = True # True if a new message needs to be sent.\n\n async for message in messages:\n if message.author == self.bot.user and message.content == PERIODIC_PING:\n delta = datetime.utcnow() - message.created_at # Time since last message.\n if delta.days >= 7: # Message is older than a week.\n await message.delete()\n else:\n need_to_post = False\n\n break\n\n if need_to_post:\n await self.bot.get_channel(Channels.verification).send(PERIODIC_PING)\n\n @periodic_ping.before_loop\n async def before_ping(self) -> None:\n \"\"\"Only start the loop when the bot is ready.\"\"\"\n await self.bot.wait_until_ready()\n\n def cog_unload(self) -> None:\n \"\"\"Cancel the periodic ping task when the cog is unloaded.\"\"\"\n self.periodic_ping.cancel()\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Verification cog.\"\"\"\n bot.add_cog(Verification(bot))\n", "path": "bot/cogs/verification.py"}]} | 3,218 | 343 |
gh_patches_debug_16255 | rasdani/github-patches | git_diff | redis__redis-py-3042 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`disable_decoding` not used in async version of `read_response`.
**Version**: redis-py version - 5.0.0 (I still see it on main)
**Description**:
Is there a reason why `disable_decoding` is not being used in async version of `read_response`?
It seems like there's no async way to get raw data (`NEVER_DECODE`) from Redis.
Are there any plans of adding the `NEVER_DECODE` support for async `read_response`?
We want to save pickled data (not compatible with `UTF-8`) in Redis, and for now we will use the workaround (as in https://github.com/redis/redis-py/issues/1006#issuecomment-1100506453 ), but it's not a very clean solution.
redis/_parsers/hiredis.py:204
https://github.com/redis/redis-py/blob/d3a3ada03e080f39144807c9fbe44876c40e0548/redis/_parsers/hiredis.py#L192-L217
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redis/_parsers/hiredis.py`
Content:
```
1 import asyncio
2 import socket
3 import sys
4 from typing import Callable, List, Optional, Union
5
6 if sys.version_info.major >= 3 and sys.version_info.minor >= 11:
7 from asyncio import timeout as async_timeout
8 else:
9 from async_timeout import timeout as async_timeout
10
11 from redis.compat import TypedDict
12
13 from ..exceptions import ConnectionError, InvalidResponse, RedisError
14 from ..typing import EncodableT
15 from ..utils import HIREDIS_AVAILABLE
16 from .base import AsyncBaseParser, BaseParser
17 from .socket import (
18 NONBLOCKING_EXCEPTION_ERROR_NUMBERS,
19 NONBLOCKING_EXCEPTIONS,
20 SENTINEL,
21 SERVER_CLOSED_CONNECTION_ERROR,
22 )
23
24
25 class _HiredisReaderArgs(TypedDict, total=False):
26 protocolError: Callable[[str], Exception]
27 replyError: Callable[[str], Exception]
28 encoding: Optional[str]
29 errors: Optional[str]
30
31
32 class _HiredisParser(BaseParser):
33 "Parser class for connections using Hiredis"
34
35 def __init__(self, socket_read_size):
36 if not HIREDIS_AVAILABLE:
37 raise RedisError("Hiredis is not installed")
38 self.socket_read_size = socket_read_size
39 self._buffer = bytearray(socket_read_size)
40
41 def __del__(self):
42 try:
43 self.on_disconnect()
44 except Exception:
45 pass
46
47 def on_connect(self, connection, **kwargs):
48 import hiredis
49
50 self._sock = connection._sock
51 self._socket_timeout = connection.socket_timeout
52 kwargs = {
53 "protocolError": InvalidResponse,
54 "replyError": self.parse_error,
55 "errors": connection.encoder.encoding_errors,
56 }
57
58 if connection.encoder.decode_responses:
59 kwargs["encoding"] = connection.encoder.encoding
60 self._reader = hiredis.Reader(**kwargs)
61 self._next_response = False
62
63 def on_disconnect(self):
64 self._sock = None
65 self._reader = None
66 self._next_response = False
67
68 def can_read(self, timeout):
69 if not self._reader:
70 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
71
72 if self._next_response is False:
73 self._next_response = self._reader.gets()
74 if self._next_response is False:
75 return self.read_from_socket(timeout=timeout, raise_on_timeout=False)
76 return True
77
78 def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True):
79 sock = self._sock
80 custom_timeout = timeout is not SENTINEL
81 try:
82 if custom_timeout:
83 sock.settimeout(timeout)
84 bufflen = self._sock.recv_into(self._buffer)
85 if bufflen == 0:
86 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
87 self._reader.feed(self._buffer, 0, bufflen)
88 # data was read from the socket and added to the buffer.
89 # return True to indicate that data was read.
90 return True
91 except socket.timeout:
92 if raise_on_timeout:
93 raise TimeoutError("Timeout reading from socket")
94 return False
95 except NONBLOCKING_EXCEPTIONS as ex:
96 # if we're in nonblocking mode and the recv raises a
97 # blocking error, simply return False indicating that
98 # there's no data to be read. otherwise raise the
99 # original exception.
100 allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)
101 if not raise_on_timeout and ex.errno == allowed:
102 return False
103 raise ConnectionError(f"Error while reading from socket: {ex.args}")
104 finally:
105 if custom_timeout:
106 sock.settimeout(self._socket_timeout)
107
108 def read_response(self, disable_decoding=False):
109 if not self._reader:
110 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
111
112 # _next_response might be cached from a can_read() call
113 if self._next_response is not False:
114 response = self._next_response
115 self._next_response = False
116 return response
117
118 if disable_decoding:
119 response = self._reader.gets(False)
120 else:
121 response = self._reader.gets()
122
123 while response is False:
124 self.read_from_socket()
125 if disable_decoding:
126 response = self._reader.gets(False)
127 else:
128 response = self._reader.gets()
129 # if the response is a ConnectionError or the response is a list and
130 # the first item is a ConnectionError, raise it as something bad
131 # happened
132 if isinstance(response, ConnectionError):
133 raise response
134 elif (
135 isinstance(response, list)
136 and response
137 and isinstance(response[0], ConnectionError)
138 ):
139 raise response[0]
140 return response
141
142
143 class _AsyncHiredisParser(AsyncBaseParser):
144 """Async implementation of parser class for connections using Hiredis"""
145
146 __slots__ = ("_reader",)
147
148 def __init__(self, socket_read_size: int):
149 if not HIREDIS_AVAILABLE:
150 raise RedisError("Hiredis is not available.")
151 super().__init__(socket_read_size=socket_read_size)
152 self._reader = None
153
154 def on_connect(self, connection):
155 import hiredis
156
157 self._stream = connection._reader
158 kwargs: _HiredisReaderArgs = {
159 "protocolError": InvalidResponse,
160 "replyError": self.parse_error,
161 }
162 if connection.encoder.decode_responses:
163 kwargs["encoding"] = connection.encoder.encoding
164 kwargs["errors"] = connection.encoder.encoding_errors
165
166 self._reader = hiredis.Reader(**kwargs)
167 self._connected = True
168
169 def on_disconnect(self):
170 self._connected = False
171
172 async def can_read_destructive(self):
173 if not self._connected:
174 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
175 if self._reader.gets():
176 return True
177 try:
178 async with async_timeout(0):
179 return await self.read_from_socket()
180 except asyncio.TimeoutError:
181 return False
182
183 async def read_from_socket(self):
184 buffer = await self._stream.read(self._read_size)
185 if not buffer or not isinstance(buffer, bytes):
186 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
187 self._reader.feed(buffer)
188 # data was read from the socket and added to the buffer.
189 # return True to indicate that data was read.
190 return True
191
192 async def read_response(
193 self, disable_decoding: bool = False
194 ) -> Union[EncodableT, List[EncodableT]]:
195 # If `on_disconnect()` has been called, prohibit any more reads
196 # even if they could happen because data might be present.
197 # We still allow reads in progress to finish
198 if not self._connected:
199 raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
200
201 response = self._reader.gets()
202 while response is False:
203 await self.read_from_socket()
204 response = self._reader.gets()
205
206 # if the response is a ConnectionError or the response is a list and
207 # the first item is a ConnectionError, raise it as something bad
208 # happened
209 if isinstance(response, ConnectionError):
210 raise response
211 elif (
212 isinstance(response, list)
213 and response
214 and isinstance(response[0], ConnectionError)
215 ):
216 raise response[0]
217 return response
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redis/_parsers/hiredis.py b/redis/_parsers/hiredis.py
--- a/redis/_parsers/hiredis.py
+++ b/redis/_parsers/hiredis.py
@@ -198,10 +198,16 @@
if not self._connected:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None
- response = self._reader.gets()
+ if disable_decoding:
+ response = self._reader.gets(False)
+ else:
+ response = self._reader.gets()
while response is False:
await self.read_from_socket()
- response = self._reader.gets()
+ if disable_decoding:
+ response = self._reader.gets(False)
+ else:
+ response = self._reader.gets()
# if the response is a ConnectionError or the response is a list and
# the first item is a ConnectionError, raise it as something bad
| {"golden_diff": "diff --git a/redis/_parsers/hiredis.py b/redis/_parsers/hiredis.py\n--- a/redis/_parsers/hiredis.py\n+++ b/redis/_parsers/hiredis.py\n@@ -198,10 +198,16 @@\n if not self._connected:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None\n \n- response = self._reader.gets()\n+ if disable_decoding:\n+ response = self._reader.gets(False)\n+ else:\n+ response = self._reader.gets()\n while response is False:\n await self.read_from_socket()\n- response = self._reader.gets()\n+ if disable_decoding:\n+ response = self._reader.gets(False)\n+ else:\n+ response = self._reader.gets()\n \n # if the response is a ConnectionError or the response is a list and\n # the first item is a ConnectionError, raise it as something bad\n", "issue": "`disable_decoding` not used in async version of `read_response`.\n**Version**: redis-py version - 5.0.0 (I still see it on main)\r\n\r\n**Description**:\r\nIs there a reason why `disable_decoding` is not being used in async version of `read_response`? \r\nIt seems like there's no async way to get raw data (`NEVER_DECODE`) from Redis.\r\nAre there any plans of adding the `NEVER_DECODE` support for async `read_response`?\r\nWe want to save pickled data (not compatible with `UTF-8`) in Redis, and for now we will use the workaround (as in https://github.com/redis/redis-py/issues/1006#issuecomment-1100506453 ), but it's not a very clean solution.\r\n\r\nredis/_parsers/hiredis.py:204\r\nhttps://github.com/redis/redis-py/blob/d3a3ada03e080f39144807c9fbe44876c40e0548/redis/_parsers/hiredis.py#L192-L217\r\n\r\n\n", "before_files": [{"content": "import asyncio\nimport socket\nimport sys\nfrom typing import Callable, List, Optional, Union\n\nif sys.version_info.major >= 3 and sys.version_info.minor >= 11:\n from asyncio import timeout as async_timeout\nelse:\n from async_timeout import timeout as async_timeout\n\nfrom redis.compat import TypedDict\n\nfrom ..exceptions import ConnectionError, InvalidResponse, RedisError\nfrom ..typing import EncodableT\nfrom ..utils import HIREDIS_AVAILABLE\nfrom .base import AsyncBaseParser, BaseParser\nfrom .socket import (\n NONBLOCKING_EXCEPTION_ERROR_NUMBERS,\n NONBLOCKING_EXCEPTIONS,\n SENTINEL,\n SERVER_CLOSED_CONNECTION_ERROR,\n)\n\n\nclass _HiredisReaderArgs(TypedDict, total=False):\n protocolError: Callable[[str], Exception]\n replyError: Callable[[str], Exception]\n encoding: Optional[str]\n errors: Optional[str]\n\n\nclass _HiredisParser(BaseParser):\n \"Parser class for connections using Hiredis\"\n\n def __init__(self, socket_read_size):\n if not HIREDIS_AVAILABLE:\n raise RedisError(\"Hiredis is not installed\")\n self.socket_read_size = socket_read_size\n self._buffer = bytearray(socket_read_size)\n\n def __del__(self):\n try:\n self.on_disconnect()\n except Exception:\n pass\n\n def on_connect(self, connection, **kwargs):\n import hiredis\n\n self._sock = connection._sock\n self._socket_timeout = connection.socket_timeout\n kwargs = {\n \"protocolError\": InvalidResponse,\n \"replyError\": self.parse_error,\n \"errors\": connection.encoder.encoding_errors,\n }\n\n if connection.encoder.decode_responses:\n kwargs[\"encoding\"] = connection.encoder.encoding\n self._reader = hiredis.Reader(**kwargs)\n self._next_response = False\n\n def on_disconnect(self):\n self._sock = None\n self._reader = None\n self._next_response = False\n\n def can_read(self, timeout):\n if not self._reader:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n if self._next_response is False:\n self._next_response = self._reader.gets()\n if self._next_response is False:\n return self.read_from_socket(timeout=timeout, raise_on_timeout=False)\n return True\n\n def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True):\n sock = self._sock\n custom_timeout = timeout is not SENTINEL\n try:\n if custom_timeout:\n sock.settimeout(timeout)\n bufflen = self._sock.recv_into(self._buffer)\n if bufflen == 0:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n self._reader.feed(self._buffer, 0, bufflen)\n # data was read from the socket and added to the buffer.\n # return True to indicate that data was read.\n return True\n except socket.timeout:\n if raise_on_timeout:\n raise TimeoutError(\"Timeout reading from socket\")\n return False\n except NONBLOCKING_EXCEPTIONS as ex:\n # if we're in nonblocking mode and the recv raises a\n # blocking error, simply return False indicating that\n # there's no data to be read. otherwise raise the\n # original exception.\n allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)\n if not raise_on_timeout and ex.errno == allowed:\n return False\n raise ConnectionError(f\"Error while reading from socket: {ex.args}\")\n finally:\n if custom_timeout:\n sock.settimeout(self._socket_timeout)\n\n def read_response(self, disable_decoding=False):\n if not self._reader:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n # _next_response might be cached from a can_read() call\n if self._next_response is not False:\n response = self._next_response\n self._next_response = False\n return response\n\n if disable_decoding:\n response = self._reader.gets(False)\n else:\n response = self._reader.gets()\n\n while response is False:\n self.read_from_socket()\n if disable_decoding:\n response = self._reader.gets(False)\n else:\n response = self._reader.gets()\n # if the response is a ConnectionError or the response is a list and\n # the first item is a ConnectionError, raise it as something bad\n # happened\n if isinstance(response, ConnectionError):\n raise response\n elif (\n isinstance(response, list)\n and response\n and isinstance(response[0], ConnectionError)\n ):\n raise response[0]\n return response\n\n\nclass _AsyncHiredisParser(AsyncBaseParser):\n \"\"\"Async implementation of parser class for connections using Hiredis\"\"\"\n\n __slots__ = (\"_reader\",)\n\n def __init__(self, socket_read_size: int):\n if not HIREDIS_AVAILABLE:\n raise RedisError(\"Hiredis is not available.\")\n super().__init__(socket_read_size=socket_read_size)\n self._reader = None\n\n def on_connect(self, connection):\n import hiredis\n\n self._stream = connection._reader\n kwargs: _HiredisReaderArgs = {\n \"protocolError\": InvalidResponse,\n \"replyError\": self.parse_error,\n }\n if connection.encoder.decode_responses:\n kwargs[\"encoding\"] = connection.encoder.encoding\n kwargs[\"errors\"] = connection.encoder.encoding_errors\n\n self._reader = hiredis.Reader(**kwargs)\n self._connected = True\n\n def on_disconnect(self):\n self._connected = False\n\n async def can_read_destructive(self):\n if not self._connected:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n if self._reader.gets():\n return True\n try:\n async with async_timeout(0):\n return await self.read_from_socket()\n except asyncio.TimeoutError:\n return False\n\n async def read_from_socket(self):\n buffer = await self._stream.read(self._read_size)\n if not buffer or not isinstance(buffer, bytes):\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None\n self._reader.feed(buffer)\n # data was read from the socket and added to the buffer.\n # return True to indicate that data was read.\n return True\n\n async def read_response(\n self, disable_decoding: bool = False\n ) -> Union[EncodableT, List[EncodableT]]:\n # If `on_disconnect()` has been called, prohibit any more reads\n # even if they could happen because data might be present.\n # We still allow reads in progress to finish\n if not self._connected:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None\n\n response = self._reader.gets()\n while response is False:\n await self.read_from_socket()\n response = self._reader.gets()\n\n # if the response is a ConnectionError or the response is a list and\n # the first item is a ConnectionError, raise it as something bad\n # happened\n if isinstance(response, ConnectionError):\n raise response\n elif (\n isinstance(response, list)\n and response\n and isinstance(response[0], ConnectionError)\n ):\n raise response[0]\n return response\n", "path": "redis/_parsers/hiredis.py"}], "after_files": [{"content": "import asyncio\nimport socket\nimport sys\nfrom typing import Callable, List, Optional, Union\n\nif sys.version_info.major >= 3 and sys.version_info.minor >= 11:\n from asyncio import timeout as async_timeout\nelse:\n from async_timeout import timeout as async_timeout\n\nfrom redis.compat import TypedDict\n\nfrom ..exceptions import ConnectionError, InvalidResponse, RedisError\nfrom ..typing import EncodableT\nfrom ..utils import HIREDIS_AVAILABLE\nfrom .base import AsyncBaseParser, BaseParser\nfrom .socket import (\n NONBLOCKING_EXCEPTION_ERROR_NUMBERS,\n NONBLOCKING_EXCEPTIONS,\n SENTINEL,\n SERVER_CLOSED_CONNECTION_ERROR,\n)\n\n\nclass _HiredisReaderArgs(TypedDict, total=False):\n protocolError: Callable[[str], Exception]\n replyError: Callable[[str], Exception]\n encoding: Optional[str]\n errors: Optional[str]\n\n\nclass _HiredisParser(BaseParser):\n \"Parser class for connections using Hiredis\"\n\n def __init__(self, socket_read_size):\n if not HIREDIS_AVAILABLE:\n raise RedisError(\"Hiredis is not installed\")\n self.socket_read_size = socket_read_size\n self._buffer = bytearray(socket_read_size)\n\n def __del__(self):\n try:\n self.on_disconnect()\n except Exception:\n pass\n\n def on_connect(self, connection, **kwargs):\n import hiredis\n\n self._sock = connection._sock\n self._socket_timeout = connection.socket_timeout\n kwargs = {\n \"protocolError\": InvalidResponse,\n \"replyError\": self.parse_error,\n \"errors\": connection.encoder.encoding_errors,\n }\n\n if connection.encoder.decode_responses:\n kwargs[\"encoding\"] = connection.encoder.encoding\n self._reader = hiredis.Reader(**kwargs)\n self._next_response = False\n\n def on_disconnect(self):\n self._sock = None\n self._reader = None\n self._next_response = False\n\n def can_read(self, timeout):\n if not self._reader:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n if self._next_response is False:\n self._next_response = self._reader.gets()\n if self._next_response is False:\n return self.read_from_socket(timeout=timeout, raise_on_timeout=False)\n return True\n\n def read_from_socket(self, timeout=SENTINEL, raise_on_timeout=True):\n sock = self._sock\n custom_timeout = timeout is not SENTINEL\n try:\n if custom_timeout:\n sock.settimeout(timeout)\n bufflen = self._sock.recv_into(self._buffer)\n if bufflen == 0:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n self._reader.feed(self._buffer, 0, bufflen)\n # data was read from the socket and added to the buffer.\n # return True to indicate that data was read.\n return True\n except socket.timeout:\n if raise_on_timeout:\n raise TimeoutError(\"Timeout reading from socket\")\n return False\n except NONBLOCKING_EXCEPTIONS as ex:\n # if we're in nonblocking mode and the recv raises a\n # blocking error, simply return False indicating that\n # there's no data to be read. otherwise raise the\n # original exception.\n allowed = NONBLOCKING_EXCEPTION_ERROR_NUMBERS.get(ex.__class__, -1)\n if not raise_on_timeout and ex.errno == allowed:\n return False\n raise ConnectionError(f\"Error while reading from socket: {ex.args}\")\n finally:\n if custom_timeout:\n sock.settimeout(self._socket_timeout)\n\n def read_response(self, disable_decoding=False):\n if not self._reader:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n\n # _next_response might be cached from a can_read() call\n if self._next_response is not False:\n response = self._next_response\n self._next_response = False\n return response\n\n if disable_decoding:\n response = self._reader.gets(False)\n else:\n response = self._reader.gets()\n\n while response is False:\n self.read_from_socket()\n if disable_decoding:\n response = self._reader.gets(False)\n else:\n response = self._reader.gets()\n # if the response is a ConnectionError or the response is a list and\n # the first item is a ConnectionError, raise it as something bad\n # happened\n if isinstance(response, ConnectionError):\n raise response\n elif (\n isinstance(response, list)\n and response\n and isinstance(response[0], ConnectionError)\n ):\n raise response[0]\n return response\n\n\nclass _AsyncHiredisParser(AsyncBaseParser):\n \"\"\"Async implementation of parser class for connections using Hiredis\"\"\"\n\n __slots__ = (\"_reader\",)\n\n def __init__(self, socket_read_size: int):\n if not HIREDIS_AVAILABLE:\n raise RedisError(\"Hiredis is not available.\")\n super().__init__(socket_read_size=socket_read_size)\n self._reader = None\n\n def on_connect(self, connection):\n import hiredis\n\n self._stream = connection._reader\n kwargs: _HiredisReaderArgs = {\n \"protocolError\": InvalidResponse,\n \"replyError\": self.parse_error,\n }\n if connection.encoder.decode_responses:\n kwargs[\"encoding\"] = connection.encoder.encoding\n kwargs[\"errors\"] = connection.encoder.encoding_errors\n\n self._reader = hiredis.Reader(**kwargs)\n self._connected = True\n\n def on_disconnect(self):\n self._connected = False\n\n async def can_read_destructive(self):\n if not self._connected:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)\n if self._reader.gets():\n return True\n try:\n async with async_timeout(0):\n return await self.read_from_socket()\n except asyncio.TimeoutError:\n return False\n\n async def read_from_socket(self):\n buffer = await self._stream.read(self._read_size)\n if not buffer or not isinstance(buffer, bytes):\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None\n self._reader.feed(buffer)\n # data was read from the socket and added to the buffer.\n # return True to indicate that data was read.\n return True\n\n async def read_response(\n self, disable_decoding: bool = False\n ) -> Union[EncodableT, List[EncodableT]]:\n # If `on_disconnect()` has been called, prohibit any more reads\n # even if they could happen because data might be present.\n # We still allow reads in progress to finish\n if not self._connected:\n raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) from None\n\n if disable_decoding:\n response = self._reader.gets(False)\n else:\n response = self._reader.gets()\n while response is False:\n await self.read_from_socket()\n if disable_decoding:\n response = self._reader.gets(False)\n else:\n response = self._reader.gets()\n\n # if the response is a ConnectionError or the response is a list and\n # the first item is a ConnectionError, raise it as something bad\n # happened\n if isinstance(response, ConnectionError):\n raise response\n elif (\n isinstance(response, list)\n and response\n and isinstance(response[0], ConnectionError)\n ):\n raise response[0]\n return response\n", "path": "redis/_parsers/hiredis.py"}]} | 2,632 | 211 |
gh_patches_debug_2453 | rasdani/github-patches | git_diff | cupy__cupy-3468 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove mock from test requirements?
I'm learning how to write mock tests, and I noticed things like `import mock` are workarounds to support PY27 and older PY3. Since CuPy now support PY35+ only and `mock` becomes part of the standard Python library, I suppose this line is no longer needed:
https://github.com/cupy/cupy/blob/74dcb4172578a0771e06f4e44b10b5f73f68fb59/setup.py#L39
and all `import mock` can be replaced by `from unittest import mock`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 from setuptools import setup, find_packages
5 import sys
6
7 import cupy_setup_build
8
9
10 if sys.version_info[:3] == (3, 5, 0):
11 if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):
12 msg = """
13 CuPy does not work with Python 3.5.0.
14
15 We strongly recommend to use another version of Python.
16 If you want to use CuPy with Python 3.5.0 at your own risk,
17 set 1 to CUPY_PYTHON_350_FORCE environment variable."""
18 print(msg)
19 sys.exit(1)
20
21
22 requirements = {
23 'setup': [
24 'fastrlock>=0.3',
25 ],
26 'install': [
27 'numpy>=1.15',
28 'fastrlock>=0.3',
29 ],
30 'stylecheck': [
31 'autopep8==1.3.5',
32 'flake8==3.5.0',
33 'pbr==4.0.4',
34 'pycodestyle==2.3.1',
35 ],
36 'test': [
37 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.
38 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0
39 'mock',
40 ],
41 'doctest': [
42 'matplotlib',
43 'optuna',
44 ],
45 'docs': [
46 'sphinx==3.0.4',
47 'sphinx_rtd_theme',
48 ],
49 'travis': [
50 '-r stylecheck',
51 '-r docs',
52 ],
53 'appveyor': [
54 '-r test',
55 ],
56 'jenkins': [
57 '-r test',
58 'pytest-timeout',
59 'pytest-cov',
60 'coveralls',
61 'codecov',
62 ],
63 }
64
65
66 def reduce_requirements(key):
67 # Resolve recursive requirements notation (-r)
68 reqs = requirements[key]
69 resolved_reqs = []
70 for req in reqs:
71 if req.startswith('-r'):
72 depend_key = req[2:].lstrip()
73 reduce_requirements(depend_key)
74 resolved_reqs += requirements[depend_key]
75 else:
76 resolved_reqs.append(req)
77 requirements[key] = resolved_reqs
78
79
80 for k in requirements.keys():
81 reduce_requirements(k)
82
83
84 extras_require = {k: v for k, v in requirements.items() if k != 'install'}
85
86
87 setup_requires = requirements['setup']
88 install_requires = requirements['install']
89 tests_require = requirements['test']
90
91
92 package_data = {
93 'cupy': [
94 'core/include/cupy/complex/arithmetic.h',
95 'core/include/cupy/complex/catrig.h',
96 'core/include/cupy/complex/catrigf.h',
97 'core/include/cupy/complex/ccosh.h',
98 'core/include/cupy/complex/ccoshf.h',
99 'core/include/cupy/complex/cexp.h',
100 'core/include/cupy/complex/cexpf.h',
101 'core/include/cupy/complex/clog.h',
102 'core/include/cupy/complex/clogf.h',
103 'core/include/cupy/complex/complex.h',
104 'core/include/cupy/complex/complex_inl.h',
105 'core/include/cupy/complex/cpow.h',
106 'core/include/cupy/complex/cproj.h',
107 'core/include/cupy/complex/csinh.h',
108 'core/include/cupy/complex/csinhf.h',
109 'core/include/cupy/complex/csqrt.h',
110 'core/include/cupy/complex/csqrtf.h',
111 'core/include/cupy/complex/ctanh.h',
112 'core/include/cupy/complex/ctanhf.h',
113 'core/include/cupy/complex/math_private.h',
114 'core/include/cupy/carray.cuh',
115 'core/include/cupy/complex.cuh',
116 'core/include/cupy/atomics.cuh',
117 'core/include/cupy/cuComplex_bridge.h',
118 'core/include/cupy/_cuda/cuda-*/*.h',
119 'core/include/cupy/_cuda/cuda-*/*.hpp',
120 'cuda/cupy_thrust.cu',
121 ],
122 }
123
124 package_data['cupy'] += cupy_setup_build.prepare_wheel_libs()
125
126 package_name = cupy_setup_build.get_package_name()
127 long_description = cupy_setup_build.get_long_description()
128 ext_modules = cupy_setup_build.get_ext_modules()
129 build_ext = cupy_setup_build.custom_build_ext
130 sdist = cupy_setup_build.sdist_with_cython
131
132 here = os.path.abspath(os.path.dirname(__file__))
133 # Get __version__ variable
134 exec(open(os.path.join(here, 'cupy', '_version.py')).read())
135
136 CLASSIFIERS = """\
137 Development Status :: 5 - Production/Stable
138 Intended Audience :: Science/Research
139 Intended Audience :: Developers
140 License :: OSI Approved :: MIT License
141 Programming Language :: Python
142 Programming Language :: Python :: 3
143 Programming Language :: Python :: 3.5
144 Programming Language :: Python :: 3.6
145 Programming Language :: Python :: 3.7
146 Programming Language :: Python :: 3 :: Only
147 Programming Language :: Cython
148 Topic :: Software Development
149 Topic :: Scientific/Engineering
150 Operating System :: Microsoft :: Windows
151 Operating System :: POSIX
152 Operating System :: MacOS
153 """
154
155
156 setup(
157 name=package_name,
158 version=__version__, # NOQA
159 description='CuPy: NumPy-like API accelerated with CUDA',
160 long_description=long_description,
161 author='Seiya Tokui',
162 author_email='[email protected]',
163 url='https://cupy.chainer.org/',
164 license='MIT License',
165 project_urls={
166 "Bug Tracker": "https://github.com/cupy/cupy/issues",
167 "Documentation": "https://docs-cupy.chainer.org/",
168 "Source Code": "https://github.com/cupy/cupy",
169 },
170 classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
171 packages=find_packages(exclude=['install', 'tests']),
172 package_data=package_data,
173 zip_safe=False,
174 python_requires='>=3.5.0',
175 setup_requires=setup_requires,
176 install_requires=install_requires,
177 tests_require=tests_require,
178 extras_require=extras_require,
179 ext_modules=ext_modules,
180 cmdclass={'build_ext': build_ext,
181 'sdist': sdist},
182 )
183
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,6 @@
'test': [
'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.
'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0
- 'mock',
],
'doctest': [
'matplotlib',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -36,7 +36,6 @@\n 'test': [\n 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.\n 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0\n- 'mock',\n ],\n 'doctest': [\n 'matplotlib',\n", "issue": "Remove mock from test requirements?\nI'm learning how to write mock tests, and I noticed things like `import mock` are workarounds to support PY27 and older PY3. Since CuPy now support PY35+ only and `mock` becomes part of the standard Python library, I suppose this line is no longer needed:\r\nhttps://github.com/cupy/cupy/blob/74dcb4172578a0771e06f4e44b10b5f73f68fb59/setup.py#L39\r\nand all `import mock` can be replaced by `from unittest import mock`?\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nimport cupy_setup_build\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nCuPy does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use CuPy with Python 3.5.0 at your own risk,\nset 1 to CUPY_PYTHON_350_FORCE environment variable.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nrequirements = {\n 'setup': [\n 'fastrlock>=0.3',\n ],\n 'install': [\n 'numpy>=1.15',\n 'fastrlock>=0.3',\n ],\n 'stylecheck': [\n 'autopep8==1.3.5',\n 'flake8==3.5.0',\n 'pbr==4.0.4',\n 'pycodestyle==2.3.1',\n ],\n 'test': [\n 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.\n 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0\n 'mock',\n ],\n 'doctest': [\n 'matplotlib',\n 'optuna',\n ],\n 'docs': [\n 'sphinx==3.0.4',\n 'sphinx_rtd_theme',\n ],\n 'travis': [\n '-r stylecheck',\n '-r docs',\n ],\n 'appveyor': [\n '-r test',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n\npackage_data = {\n 'cupy': [\n 'core/include/cupy/complex/arithmetic.h',\n 'core/include/cupy/complex/catrig.h',\n 'core/include/cupy/complex/catrigf.h',\n 'core/include/cupy/complex/ccosh.h',\n 'core/include/cupy/complex/ccoshf.h',\n 'core/include/cupy/complex/cexp.h',\n 'core/include/cupy/complex/cexpf.h',\n 'core/include/cupy/complex/clog.h',\n 'core/include/cupy/complex/clogf.h',\n 'core/include/cupy/complex/complex.h',\n 'core/include/cupy/complex/complex_inl.h',\n 'core/include/cupy/complex/cpow.h',\n 'core/include/cupy/complex/cproj.h',\n 'core/include/cupy/complex/csinh.h',\n 'core/include/cupy/complex/csinhf.h',\n 'core/include/cupy/complex/csqrt.h',\n 'core/include/cupy/complex/csqrtf.h',\n 'core/include/cupy/complex/ctanh.h',\n 'core/include/cupy/complex/ctanhf.h',\n 'core/include/cupy/complex/math_private.h',\n 'core/include/cupy/carray.cuh',\n 'core/include/cupy/complex.cuh',\n 'core/include/cupy/atomics.cuh',\n 'core/include/cupy/cuComplex_bridge.h',\n 'core/include/cupy/_cuda/cuda-*/*.h',\n 'core/include/cupy/_cuda/cuda-*/*.hpp',\n 'cuda/cupy_thrust.cu',\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\nsdist = cupy_setup_build.sdist_with_cython\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, 'cupy', '_version.py')).read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.5\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nOperating System :: MacOS\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy-like API accelerated with CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.chainer.org/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs-cupy.chainer.org/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.5.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext,\n 'sdist': sdist},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nimport cupy_setup_build\n\n\nif sys.version_info[:3] == (3, 5, 0):\n if not int(os.getenv('CUPY_PYTHON_350_FORCE', '0')):\n msg = \"\"\"\nCuPy does not work with Python 3.5.0.\n\nWe strongly recommend to use another version of Python.\nIf you want to use CuPy with Python 3.5.0 at your own risk,\nset 1 to CUPY_PYTHON_350_FORCE environment variable.\"\"\"\n print(msg)\n sys.exit(1)\n\n\nrequirements = {\n 'setup': [\n 'fastrlock>=0.3',\n ],\n 'install': [\n 'numpy>=1.15',\n 'fastrlock>=0.3',\n ],\n 'stylecheck': [\n 'autopep8==1.3.5',\n 'flake8==3.5.0',\n 'pbr==4.0.4',\n 'pycodestyle==2.3.1',\n ],\n 'test': [\n 'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.\n 'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0\n ],\n 'doctest': [\n 'matplotlib',\n 'optuna',\n ],\n 'docs': [\n 'sphinx==3.0.4',\n 'sphinx_rtd_theme',\n ],\n 'travis': [\n '-r stylecheck',\n '-r docs',\n ],\n 'appveyor': [\n '-r test',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n\npackage_data = {\n 'cupy': [\n 'core/include/cupy/complex/arithmetic.h',\n 'core/include/cupy/complex/catrig.h',\n 'core/include/cupy/complex/catrigf.h',\n 'core/include/cupy/complex/ccosh.h',\n 'core/include/cupy/complex/ccoshf.h',\n 'core/include/cupy/complex/cexp.h',\n 'core/include/cupy/complex/cexpf.h',\n 'core/include/cupy/complex/clog.h',\n 'core/include/cupy/complex/clogf.h',\n 'core/include/cupy/complex/complex.h',\n 'core/include/cupy/complex/complex_inl.h',\n 'core/include/cupy/complex/cpow.h',\n 'core/include/cupy/complex/cproj.h',\n 'core/include/cupy/complex/csinh.h',\n 'core/include/cupy/complex/csinhf.h',\n 'core/include/cupy/complex/csqrt.h',\n 'core/include/cupy/complex/csqrtf.h',\n 'core/include/cupy/complex/ctanh.h',\n 'core/include/cupy/complex/ctanhf.h',\n 'core/include/cupy/complex/math_private.h',\n 'core/include/cupy/carray.cuh',\n 'core/include/cupy/complex.cuh',\n 'core/include/cupy/atomics.cuh',\n 'core/include/cupy/cuComplex_bridge.h',\n 'core/include/cupy/_cuda/cuda-*/*.h',\n 'core/include/cupy/_cuda/cuda-*/*.hpp',\n 'cuda/cupy_thrust.cu',\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\nsdist = cupy_setup_build.sdist_with_cython\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nexec(open(os.path.join(here, 'cupy', '_version.py')).read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.5\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nOperating System :: MacOS\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: NumPy-like API accelerated with CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.chainer.org/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs-cupy.chainer.org/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.5.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext,\n 'sdist': sdist},\n)\n", "path": "setup.py"}]} | 2,243 | 115 |
gh_patches_debug_17298 | rasdani/github-patches | git_diff | pytorch__TensorRT-2080 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🐛 [Bug] Encountered TRT Error in Dynamo compile path
## Bug Description
When compiling GPT-2 with Dynamo compile, the following error is encountered:
```python
[07/01/2023-00:07:12] [TRT] [E] 3: [executionContext.cpp::enqueueInternal::795] Error Code 3: API Usage Error (Parameter check failed at: runtime/api/executionContext.cpp::enqueueInternal::795, condition: bindings[x] || nullBindingOK
)
```
Additionally, this does not seem to cause failures in the Dynamo runtime (not caught by `pass_through_build_failures`), and it appears both with and without the experimental runtime.
## To Reproduce
```python
model = GPT2Model.from_pretrained("gpt2").eval().cuda()
input_ids = torch.randint(0, 2, (1, 14), dtype=torch.int32).cuda()
attention_mask = torch.randint(0, 2, (1, 14), dtype=torch.int32).cuda()
traced = transformers_trace(model, input_names=["input_ids", "attention_mask"]).eval().cuda()
fx_trt_model = torch_tensorrt.compile(traced, ir="dynamo_compile", inputs=[input_ids, attention_mask], debug=True, pass_through_build_failures=True, min_block_size=10)
```
## Expected behavior
The model should not encounter TRT errors while compiling.
## Environment
- Torch-TensorRT Version (e.g. 1.0.0): 2844630
- PyTorch Version (e.g. 1.0): `2.1.0.dev20230620+cu118`
- TensorRT Version: `8.6.1`
## Additional context
This error no longer appears when we apply the `@fake_tensor_unsupported` flag to the backends, as was removed in #1955. It is unclear whether this is the direct _cause_ of the bug, however.
Additionally, this bug seems to appear only sometimes, and not consistently.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py/torch_tensorrt/dynamo/backend/backends.py`
Content:
```
1 import logging
2 from typing import Sequence
3 import torch
4 from functools import partial
5 import torch._dynamo as td
6
7 from torch_tensorrt.dynamo.backend._settings import CompilationSettings
8 from torch_tensorrt.dynamo.backend.lowering._decompositions import (
9 get_decompositions,
10 )
11 from torch_tensorrt.dynamo.backend.lowering._pre_aot_lowering import (
12 pre_aot_substitutions,
13 )
14 from torch_tensorrt.dynamo.backend.lowering._partition import (
15 partition,
16 get_submod_inputs,
17 )
18 from torch_tensorrt.dynamo.backend.utils import parse_dynamo_kwargs
19 from torch_tensorrt.dynamo.backend.conversion import convert_module
20
21 from torch._functorch.aot_autograd import aot_module_simplified, make_boxed_compiler
22
23
24 logger = logging.getLogger(__name__)
25
26
27 @td.register_backend(name="torch_tensorrt")
28 def torch_tensorrt_backend(
29 gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs
30 ):
31 DEFAULT_BACKEND = aot_torch_tensorrt_aten_backend
32
33 return DEFAULT_BACKEND(gm, sample_inputs, **kwargs)
34
35
36 @td.register_backend(name="aot_torch_tensorrt_aten")
37 def aot_torch_tensorrt_aten_backend(
38 gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs
39 ):
40 settings = parse_dynamo_kwargs(kwargs)
41
42 custom_backend = partial(
43 _pretraced_backend,
44 settings=settings,
45 )
46
47 # Perform Pre-AOT Lowering for Module-Level Replacement
48 gm = pre_aot_substitutions(gm)
49
50 # Invoke AOTAutograd to translate operators to aten
51 return aot_module_simplified(
52 gm,
53 sample_inputs,
54 fw_compiler=make_boxed_compiler(custom_backend),
55 decompositions=get_decompositions(),
56 )
57
58
59 def _pretraced_backend(
60 gm: torch.fx.GraphModule,
61 sample_inputs: Sequence[torch.Tensor],
62 settings: CompilationSettings = CompilationSettings(),
63 ):
64 """Helper function to manage translation of traced FX module to TRT engines
65
66 Args:
67 module: FX GraphModule to convert
68 inputs: Inputs to the module
69 settings: Compilation settings
70 Returns:
71 Compiled FX GraphModule
72 """
73 try:
74 logger.debug("Post-AOT Autograd graph:\n" + str(gm.graph))
75
76 trt_compiled = _compile_module(
77 gm,
78 sample_inputs,
79 settings=settings,
80 )
81 return trt_compiled
82 except:
83 if not settings.pass_through_build_failures:
84 logger.warning(
85 "TRT conversion failed on the subgraph. See trace above. "
86 + "Returning GraphModule forward instead.",
87 exc_info=True,
88 )
89 return gm.forward
90 else:
91 raise AssertionError(
92 "Halting compilation on build failure since "
93 + "pass_through_build_failures was specified as True. "
94 + "To return the default Torch implementation and avoid "
95 + "halting compilation on engine build failures, "
96 + "specify pass_through_build_failures=False."
97 )
98
99
100 def _compile_module(
101 gm: torch.fx.GraphModule,
102 sample_inputs: Sequence[torch.Tensor],
103 settings: CompilationSettings = CompilationSettings(),
104 ) -> torch.fx.GraphModule:
105 """Compile a traced FX module
106
107 Includes: Partitioning + Conversion Phases
108
109 Args:
110 module: FX GraphModule to convert
111 inputs: Inputs to the module
112 settings: Compilation settings
113 Returns:
114 Compiled FX GraphModule
115 """
116 # Partition module into components that can be TRT-accelerated
117 partitioned_module = partition(
118 gm,
119 verbose=settings.debug,
120 min_block_size=settings.min_block_size,
121 torch_executed_ops=settings.torch_executed_ops,
122 )
123
124 # Iterate over all components that can be accelerated
125 # Generate the corresponding TRT Module for those
126 for name, _ in partitioned_module.named_children():
127 submodule = getattr(partitioned_module, name)
128
129 # Get submodule inputs
130 submodule_inputs = get_submod_inputs(
131 partitioned_module, submodule, sample_inputs
132 )
133
134 # Create TRT Module from submodule
135 trt_mod = convert_module(
136 submodule,
137 submodule_inputs,
138 settings=settings,
139 )
140
141 # Replace FX Module with TRT Module
142 setattr(partitioned_module, name, trt_mod)
143
144 return partitioned_module
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py/torch_tensorrt/dynamo/backend/backends.py b/py/torch_tensorrt/dynamo/backend/backends.py
--- a/py/torch_tensorrt/dynamo/backend/backends.py
+++ b/py/torch_tensorrt/dynamo/backend/backends.py
@@ -121,6 +121,9 @@
torch_executed_ops=settings.torch_executed_ops,
)
+ # Store TRT replicas of Torch subgraphs
+ trt_modules = {}
+
# Iterate over all components that can be accelerated
# Generate the corresponding TRT Module for those
for name, _ in partitioned_module.named_children():
@@ -138,7 +141,10 @@
settings=settings,
)
- # Replace FX Module with TRT Module
+ trt_modules[name] = trt_mod
+
+ # Replace all FX Modules with TRT Modules
+ for name, trt_mod in trt_modules.items():
setattr(partitioned_module, name, trt_mod)
return partitioned_module
| {"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/backend/backends.py b/py/torch_tensorrt/dynamo/backend/backends.py\n--- a/py/torch_tensorrt/dynamo/backend/backends.py\n+++ b/py/torch_tensorrt/dynamo/backend/backends.py\n@@ -121,6 +121,9 @@\n torch_executed_ops=settings.torch_executed_ops,\n )\n \n+ # Store TRT replicas of Torch subgraphs\n+ trt_modules = {}\n+\n # Iterate over all components that can be accelerated\n # Generate the corresponding TRT Module for those\n for name, _ in partitioned_module.named_children():\n@@ -138,7 +141,10 @@\n settings=settings,\n )\n \n- # Replace FX Module with TRT Module\n+ trt_modules[name] = trt_mod\n+\n+ # Replace all FX Modules with TRT Modules\n+ for name, trt_mod in trt_modules.items():\n setattr(partitioned_module, name, trt_mod)\n \n return partitioned_module\n", "issue": "\ud83d\udc1b [Bug] Encountered TRT Error in Dynamo compile path\n## Bug Description\r\nWhen compiling GPT-2 with Dynamo compile, the following error is encountered:\r\n```python\r\n[07/01/2023-00:07:12] [TRT] [E] 3: [executionContext.cpp::enqueueInternal::795] Error Code 3: API Usage Error (Parameter check failed at: runtime/api/executionContext.cpp::enqueueInternal::795, condition: bindings[x] || nullBindingOK\r\n)\r\n```\r\nAdditionally, this does not seem to cause failures in the Dynamo runtime (not caught by `pass_through_build_failures`), and it appears both with and without the experimental runtime.\r\n\r\n## To Reproduce\r\n```python\r\nmodel = GPT2Model.from_pretrained(\"gpt2\").eval().cuda()\r\ninput_ids = torch.randint(0, 2, (1, 14), dtype=torch.int32).cuda()\r\nattention_mask = torch.randint(0, 2, (1, 14), dtype=torch.int32).cuda()\r\ntraced = transformers_trace(model, input_names=[\"input_ids\", \"attention_mask\"]).eval().cuda()\r\nfx_trt_model = torch_tensorrt.compile(traced, ir=\"dynamo_compile\", inputs=[input_ids, attention_mask], debug=True, pass_through_build_failures=True, min_block_size=10)\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe model should not encounter TRT errors while compiling.\r\n\r\n## Environment\r\n\r\n - Torch-TensorRT Version (e.g. 1.0.0): 2844630\r\n - PyTorch Version (e.g. 1.0): `2.1.0.dev20230620+cu118`\r\n - TensorRT Version: `8.6.1`\r\n\r\n## Additional context\r\n\r\nThis error no longer appears when we apply the `@fake_tensor_unsupported` flag to the backends, as was removed in #1955. It is unclear whether this is the direct _cause_ of the bug, however.\r\n\r\nAdditionally, this bug seems to appear only sometimes, and not consistently.\n", "before_files": [{"content": "import logging\nfrom typing import Sequence\nimport torch\nfrom functools import partial\nimport torch._dynamo as td\n\nfrom torch_tensorrt.dynamo.backend._settings import CompilationSettings\nfrom torch_tensorrt.dynamo.backend.lowering._decompositions import (\n get_decompositions,\n)\nfrom torch_tensorrt.dynamo.backend.lowering._pre_aot_lowering import (\n pre_aot_substitutions,\n)\nfrom torch_tensorrt.dynamo.backend.lowering._partition import (\n partition,\n get_submod_inputs,\n)\nfrom torch_tensorrt.dynamo.backend.utils import parse_dynamo_kwargs\nfrom torch_tensorrt.dynamo.backend.conversion import convert_module\n\nfrom torch._functorch.aot_autograd import aot_module_simplified, make_boxed_compiler\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]_backend(name=\"torch_tensorrt\")\ndef torch_tensorrt_backend(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs\n):\n DEFAULT_BACKEND = aot_torch_tensorrt_aten_backend\n\n return DEFAULT_BACKEND(gm, sample_inputs, **kwargs)\n\n\[email protected]_backend(name=\"aot_torch_tensorrt_aten\")\ndef aot_torch_tensorrt_aten_backend(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs\n):\n settings = parse_dynamo_kwargs(kwargs)\n\n custom_backend = partial(\n _pretraced_backend,\n settings=settings,\n )\n\n # Perform Pre-AOT Lowering for Module-Level Replacement\n gm = pre_aot_substitutions(gm)\n\n # Invoke AOTAutograd to translate operators to aten\n return aot_module_simplified(\n gm,\n sample_inputs,\n fw_compiler=make_boxed_compiler(custom_backend),\n decompositions=get_decompositions(),\n )\n\n\ndef _pretraced_backend(\n gm: torch.fx.GraphModule,\n sample_inputs: Sequence[torch.Tensor],\n settings: CompilationSettings = CompilationSettings(),\n):\n \"\"\"Helper function to manage translation of traced FX module to TRT engines\n\n Args:\n module: FX GraphModule to convert\n inputs: Inputs to the module\n settings: Compilation settings\n Returns:\n Compiled FX GraphModule\n \"\"\"\n try:\n logger.debug(\"Post-AOT Autograd graph:\\n\" + str(gm.graph))\n\n trt_compiled = _compile_module(\n gm,\n sample_inputs,\n settings=settings,\n )\n return trt_compiled\n except:\n if not settings.pass_through_build_failures:\n logger.warning(\n \"TRT conversion failed on the subgraph. See trace above. \"\n + \"Returning GraphModule forward instead.\",\n exc_info=True,\n )\n return gm.forward\n else:\n raise AssertionError(\n \"Halting compilation on build failure since \"\n + \"pass_through_build_failures was specified as True. \"\n + \"To return the default Torch implementation and avoid \"\n + \"halting compilation on engine build failures, \"\n + \"specify pass_through_build_failures=False.\"\n )\n\n\ndef _compile_module(\n gm: torch.fx.GraphModule,\n sample_inputs: Sequence[torch.Tensor],\n settings: CompilationSettings = CompilationSettings(),\n) -> torch.fx.GraphModule:\n \"\"\"Compile a traced FX module\n\n Includes: Partitioning + Conversion Phases\n\n Args:\n module: FX GraphModule to convert\n inputs: Inputs to the module\n settings: Compilation settings\n Returns:\n Compiled FX GraphModule\n \"\"\"\n # Partition module into components that can be TRT-accelerated\n partitioned_module = partition(\n gm,\n verbose=settings.debug,\n min_block_size=settings.min_block_size,\n torch_executed_ops=settings.torch_executed_ops,\n )\n\n # Iterate over all components that can be accelerated\n # Generate the corresponding TRT Module for those\n for name, _ in partitioned_module.named_children():\n submodule = getattr(partitioned_module, name)\n\n # Get submodule inputs\n submodule_inputs = get_submod_inputs(\n partitioned_module, submodule, sample_inputs\n )\n\n # Create TRT Module from submodule\n trt_mod = convert_module(\n submodule,\n submodule_inputs,\n settings=settings,\n )\n\n # Replace FX Module with TRT Module\n setattr(partitioned_module, name, trt_mod)\n\n return partitioned_module\n", "path": "py/torch_tensorrt/dynamo/backend/backends.py"}], "after_files": [{"content": "import logging\nfrom typing import Sequence\nimport torch\nfrom functools import partial\nimport torch._dynamo as td\n\nfrom torch_tensorrt.dynamo.backend._settings import CompilationSettings\nfrom torch_tensorrt.dynamo.backend.lowering._decompositions import (\n get_decompositions,\n)\nfrom torch_tensorrt.dynamo.backend.lowering._pre_aot_lowering import (\n pre_aot_substitutions,\n)\nfrom torch_tensorrt.dynamo.backend.lowering._partition import (\n partition,\n get_submod_inputs,\n)\nfrom torch_tensorrt.dynamo.backend.utils import parse_dynamo_kwargs\nfrom torch_tensorrt.dynamo.backend.conversion import convert_module\n\nfrom torch._functorch.aot_autograd import aot_module_simplified, make_boxed_compiler\n\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]_backend(name=\"torch_tensorrt\")\ndef torch_tensorrt_backend(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs\n):\n DEFAULT_BACKEND = aot_torch_tensorrt_aten_backend\n\n return DEFAULT_BACKEND(gm, sample_inputs, **kwargs)\n\n\[email protected]_backend(name=\"aot_torch_tensorrt_aten\")\ndef aot_torch_tensorrt_aten_backend(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor], **kwargs\n):\n settings = parse_dynamo_kwargs(kwargs)\n\n custom_backend = partial(\n _pretraced_backend,\n settings=settings,\n )\n\n # Perform Pre-AOT Lowering for Module-Level Replacement\n gm = pre_aot_substitutions(gm)\n\n # Invoke AOTAutograd to translate operators to aten\n return aot_module_simplified(\n gm,\n sample_inputs,\n fw_compiler=make_boxed_compiler(custom_backend),\n decompositions=get_decompositions(),\n )\n\n\ndef _pretraced_backend(\n gm: torch.fx.GraphModule,\n sample_inputs: Sequence[torch.Tensor],\n settings: CompilationSettings = CompilationSettings(),\n):\n \"\"\"Helper function to manage translation of traced FX module to TRT engines\n\n Args:\n module: FX GraphModule to convert\n inputs: Inputs to the module\n settings: Compilation settings\n Returns:\n Compiled FX GraphModule\n \"\"\"\n try:\n logger.debug(\"Post-AOT Autograd graph:\\n\" + str(gm.graph))\n\n trt_compiled = _compile_module(\n gm,\n sample_inputs,\n settings=settings,\n )\n return trt_compiled\n except:\n if not settings.pass_through_build_failures:\n logger.warning(\n \"TRT conversion failed on the subgraph. See trace above. \"\n + \"Returning GraphModule forward instead.\",\n exc_info=True,\n )\n return gm.forward\n else:\n raise AssertionError(\n \"Halting compilation on build failure since \"\n + \"pass_through_build_failures was specified as True. \"\n + \"To return the default Torch implementation and avoid \"\n + \"halting compilation on engine build failures, \"\n + \"specify pass_through_build_failures=False.\"\n )\n\n\ndef _compile_module(\n gm: torch.fx.GraphModule,\n sample_inputs: Sequence[torch.Tensor],\n settings: CompilationSettings = CompilationSettings(),\n) -> torch.fx.GraphModule:\n \"\"\"Compile a traced FX module\n\n Includes: Partitioning + Conversion Phases\n\n Args:\n module: FX GraphModule to convert\n inputs: Inputs to the module\n settings: Compilation settings\n Returns:\n Compiled FX GraphModule\n \"\"\"\n # Partition module into components that can be TRT-accelerated\n partitioned_module = partition(\n gm,\n verbose=settings.debug,\n min_block_size=settings.min_block_size,\n torch_executed_ops=settings.torch_executed_ops,\n )\n\n # Store TRT replicas of Torch subgraphs\n trt_modules = {}\n\n # Iterate over all components that can be accelerated\n # Generate the corresponding TRT Module for those\n for name, _ in partitioned_module.named_children():\n submodule = getattr(partitioned_module, name)\n\n # Get submodule inputs\n submodule_inputs = get_submod_inputs(\n partitioned_module, submodule, sample_inputs\n )\n\n # Create TRT Module from submodule\n trt_mod = convert_module(\n submodule,\n submodule_inputs,\n settings=settings,\n )\n\n trt_modules[name] = trt_mod\n\n # Replace all FX Modules with TRT Modules\n for name, trt_mod in trt_modules.items():\n setattr(partitioned_module, name, trt_mod)\n\n return partitioned_module\n", "path": "py/torch_tensorrt/dynamo/backend/backends.py"}]} | 1,994 | 231 |
gh_patches_debug_30885 | rasdani/github-patches | git_diff | plotly__dash-2207 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Render arbitrary keys of components inside dictionaries.
It should be possible to render arbitrary keys as component inside an object prop.
Example typescript props:
```ts
type Props = {
dynamic: {[k: string]: JSX.Element}
}
```
prop-types: `PropTypes.objectOf(PropTypes.node)`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dash/development/_collect_nodes.py`
Content:
```
1 def is_node(value):
2 return value in ("node", "element")
3
4
5 def is_shape(value):
6 return value in ("shape", "exact")
7
8
9 def collect_array(a_value, base, nodes):
10 a_type = a_value["name"]
11 if is_node(a_type):
12 nodes.append(base)
13 elif a_type in ("shape", "exact"):
14 nodes = collect_nodes(a_value["value"], base + "[]", nodes)
15 elif a_type == "union":
16 nodes = collect_union(a_value["value"], base + "[]", nodes)
17 return nodes
18
19
20 def collect_union(type_list, base, nodes):
21 for t in type_list:
22 if is_node(t["name"]):
23 nodes.append(base)
24 elif is_shape(t["name"]):
25 nodes = collect_nodes(t["value"], base, nodes)
26 elif t["name"] == "arrayOf":
27 nodes = collect_array(t["value"], base, nodes)
28 return nodes
29
30
31 def collect_nodes(metadata, base="", nodes=None):
32 nodes = nodes or []
33
34 for prop_name, value in metadata.items():
35 # Support for recursive shapes, the type is directly in the field.
36 t_value = value.get("type", value)
37 p_type = t_value.get("name")
38
39 if base:
40 key = f"{base}.{prop_name}"
41 else:
42 key = prop_name
43 if is_node(p_type):
44 nodes.append(key)
45 elif p_type == "arrayOf":
46 a_value = t_value.get("value", t_value)
47 nodes = collect_array(a_value, key, nodes)
48 elif is_shape(p_type):
49 nodes = collect_nodes(t_value["value"], key, nodes)
50 elif p_type == "union":
51 nodes = collect_union(t_value["value"], key, nodes)
52
53 return nodes
54
55
56 def filter_base_nodes(nodes):
57 return [n for n in nodes if not any(e in n for e in ("[]", "."))]
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dash/development/_collect_nodes.py b/dash/development/_collect_nodes.py
--- a/dash/development/_collect_nodes.py
+++ b/dash/development/_collect_nodes.py
@@ -14,6 +14,8 @@
nodes = collect_nodes(a_value["value"], base + "[]", nodes)
elif a_type == "union":
nodes = collect_union(a_value["value"], base + "[]", nodes)
+ elif a_type == "objectOf":
+ nodes = collect_object(a_value["value"], base + "[]", nodes)
return nodes
@@ -25,6 +27,22 @@
nodes = collect_nodes(t["value"], base, nodes)
elif t["name"] == "arrayOf":
nodes = collect_array(t["value"], base, nodes)
+ elif t["name"] == "objectOf":
+ nodes = collect_object(t["value"], base, nodes)
+ return nodes
+
+
+def collect_object(o_value, base, nodes):
+ o_name = o_value.get("name")
+ o_key = base + "{}"
+ if is_node(o_name):
+ nodes.append(o_key)
+ elif is_shape(o_name):
+ nodes = collect_nodes(o_value.get("value", {}), o_key, nodes)
+ elif o_name == "union":
+ nodes = collect_union(o_value.get("value"), o_key, nodes)
+ elif o_name == "arrayOf":
+ nodes = collect_array(o_value, o_key, nodes)
return nodes
@@ -49,9 +67,12 @@
nodes = collect_nodes(t_value["value"], key, nodes)
elif p_type == "union":
nodes = collect_union(t_value["value"], key, nodes)
+ elif p_type == "objectOf":
+ o_value = t_value.get("value", {})
+ nodes = collect_object(o_value, key, nodes)
return nodes
def filter_base_nodes(nodes):
- return [n for n in nodes if not any(e in n for e in ("[]", "."))]
+ return [n for n in nodes if not any(e in n for e in ("[]", ".", "{}"))]
| {"golden_diff": "diff --git a/dash/development/_collect_nodes.py b/dash/development/_collect_nodes.py\n--- a/dash/development/_collect_nodes.py\n+++ b/dash/development/_collect_nodes.py\n@@ -14,6 +14,8 @@\n nodes = collect_nodes(a_value[\"value\"], base + \"[]\", nodes)\n elif a_type == \"union\":\n nodes = collect_union(a_value[\"value\"], base + \"[]\", nodes)\n+ elif a_type == \"objectOf\":\n+ nodes = collect_object(a_value[\"value\"], base + \"[]\", nodes)\n return nodes\n \n \n@@ -25,6 +27,22 @@\n nodes = collect_nodes(t[\"value\"], base, nodes)\n elif t[\"name\"] == \"arrayOf\":\n nodes = collect_array(t[\"value\"], base, nodes)\n+ elif t[\"name\"] == \"objectOf\":\n+ nodes = collect_object(t[\"value\"], base, nodes)\n+ return nodes\n+\n+\n+def collect_object(o_value, base, nodes):\n+ o_name = o_value.get(\"name\")\n+ o_key = base + \"{}\"\n+ if is_node(o_name):\n+ nodes.append(o_key)\n+ elif is_shape(o_name):\n+ nodes = collect_nodes(o_value.get(\"value\", {}), o_key, nodes)\n+ elif o_name == \"union\":\n+ nodes = collect_union(o_value.get(\"value\"), o_key, nodes)\n+ elif o_name == \"arrayOf\":\n+ nodes = collect_array(o_value, o_key, nodes)\n return nodes\n \n \n@@ -49,9 +67,12 @@\n nodes = collect_nodes(t_value[\"value\"], key, nodes)\n elif p_type == \"union\":\n nodes = collect_union(t_value[\"value\"], key, nodes)\n+ elif p_type == \"objectOf\":\n+ o_value = t_value.get(\"value\", {})\n+ nodes = collect_object(o_value, key, nodes)\n \n return nodes\n \n \n def filter_base_nodes(nodes):\n- return [n for n in nodes if not any(e in n for e in (\"[]\", \".\"))]\n+ return [n for n in nodes if not any(e in n for e in (\"[]\", \".\", \"{}\"))]\n", "issue": "Render arbitrary keys of components inside dictionaries.\nIt should be possible to render arbitrary keys as component inside an object prop.\r\n\r\nExample typescript props:\r\n```ts\r\ntype Props = {\r\n dynamic: {[k: string]: JSX.Element}\r\n}\r\n```\r\nprop-types: `PropTypes.objectOf(PropTypes.node)` \n", "before_files": [{"content": "def is_node(value):\n return value in (\"node\", \"element\")\n\n\ndef is_shape(value):\n return value in (\"shape\", \"exact\")\n\n\ndef collect_array(a_value, base, nodes):\n a_type = a_value[\"name\"]\n if is_node(a_type):\n nodes.append(base)\n elif a_type in (\"shape\", \"exact\"):\n nodes = collect_nodes(a_value[\"value\"], base + \"[]\", nodes)\n elif a_type == \"union\":\n nodes = collect_union(a_value[\"value\"], base + \"[]\", nodes)\n return nodes\n\n\ndef collect_union(type_list, base, nodes):\n for t in type_list:\n if is_node(t[\"name\"]):\n nodes.append(base)\n elif is_shape(t[\"name\"]):\n nodes = collect_nodes(t[\"value\"], base, nodes)\n elif t[\"name\"] == \"arrayOf\":\n nodes = collect_array(t[\"value\"], base, nodes)\n return nodes\n\n\ndef collect_nodes(metadata, base=\"\", nodes=None):\n nodes = nodes or []\n\n for prop_name, value in metadata.items():\n # Support for recursive shapes, the type is directly in the field.\n t_value = value.get(\"type\", value)\n p_type = t_value.get(\"name\")\n\n if base:\n key = f\"{base}.{prop_name}\"\n else:\n key = prop_name\n if is_node(p_type):\n nodes.append(key)\n elif p_type == \"arrayOf\":\n a_value = t_value.get(\"value\", t_value)\n nodes = collect_array(a_value, key, nodes)\n elif is_shape(p_type):\n nodes = collect_nodes(t_value[\"value\"], key, nodes)\n elif p_type == \"union\":\n nodes = collect_union(t_value[\"value\"], key, nodes)\n\n return nodes\n\n\ndef filter_base_nodes(nodes):\n return [n for n in nodes if not any(e in n for e in (\"[]\", \".\"))]\n", "path": "dash/development/_collect_nodes.py"}], "after_files": [{"content": "def is_node(value):\n return value in (\"node\", \"element\")\n\n\ndef is_shape(value):\n return value in (\"shape\", \"exact\")\n\n\ndef collect_array(a_value, base, nodes):\n a_type = a_value[\"name\"]\n if is_node(a_type):\n nodes.append(base)\n elif a_type in (\"shape\", \"exact\"):\n nodes = collect_nodes(a_value[\"value\"], base + \"[]\", nodes)\n elif a_type == \"union\":\n nodes = collect_union(a_value[\"value\"], base + \"[]\", nodes)\n elif a_type == \"objectOf\":\n nodes = collect_object(a_value[\"value\"], base + \"[]\", nodes)\n return nodes\n\n\ndef collect_union(type_list, base, nodes):\n for t in type_list:\n if is_node(t[\"name\"]):\n nodes.append(base)\n elif is_shape(t[\"name\"]):\n nodes = collect_nodes(t[\"value\"], base, nodes)\n elif t[\"name\"] == \"arrayOf\":\n nodes = collect_array(t[\"value\"], base, nodes)\n elif t[\"name\"] == \"objectOf\":\n nodes = collect_object(t[\"value\"], base, nodes)\n return nodes\n\n\ndef collect_object(o_value, base, nodes):\n o_name = o_value.get(\"name\")\n o_key = base + \"{}\"\n if is_node(o_name):\n nodes.append(o_key)\n elif is_shape(o_name):\n nodes = collect_nodes(o_value.get(\"value\", {}), o_key, nodes)\n elif o_name == \"union\":\n nodes = collect_union(o_value.get(\"value\"), o_key, nodes)\n elif o_name == \"arrayOf\":\n nodes = collect_array(o_value, o_key, nodes)\n return nodes\n\n\ndef collect_nodes(metadata, base=\"\", nodes=None):\n nodes = nodes or []\n\n for prop_name, value in metadata.items():\n # Support for recursive shapes, the type is directly in the field.\n t_value = value.get(\"type\", value)\n p_type = t_value.get(\"name\")\n\n if base:\n key = f\"{base}.{prop_name}\"\n else:\n key = prop_name\n if is_node(p_type):\n nodes.append(key)\n elif p_type == \"arrayOf\":\n a_value = t_value.get(\"value\", t_value)\n nodes = collect_array(a_value, key, nodes)\n elif is_shape(p_type):\n nodes = collect_nodes(t_value[\"value\"], key, nodes)\n elif p_type == \"union\":\n nodes = collect_union(t_value[\"value\"], key, nodes)\n elif p_type == \"objectOf\":\n o_value = t_value.get(\"value\", {})\n nodes = collect_object(o_value, key, nodes)\n\n return nodes\n\n\ndef filter_base_nodes(nodes):\n return [n for n in nodes if not any(e in n for e in (\"[]\", \".\", \"{}\"))]\n", "path": "dash/development/_collect_nodes.py"}]} | 847 | 486 |
gh_patches_debug_1509 | rasdani/github-patches | git_diff | searx__searx-1483 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
wolframalpha engine is broken
The wolframalpha engine appears to be broken in 0.15.0
```
searx-run[9330]: ERROR:searx.search:engine wolframalpha : exception : Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.
searx-run[9330]: Traceback (most recent call last):
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 104, in search_one_request_safe
searx-run[9330]: search_results = search_one_request(engine, query, request_params)
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 87, in search_one_request
searx-run[9330]: return engine.response(response)
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/engines/wolframalpha_api.py", line 68, in response
searx-run[9330]: search_results = etree.XML(resp.text)
searx-run[9330]: File "src/lxml/etree.pyx", line 3192, in lxml.etree.XML
searx-run[9330]: File "src/lxml/parser.pxi", line 1872, in lxml.etree._parseMemoryDocument
searx-run[9330]: ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.
```
wolframalpha engine is broken
The wolframalpha engine appears to be broken in 0.15.0
```
searx-run[9330]: ERROR:searx.search:engine wolframalpha : exception : Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.
searx-run[9330]: Traceback (most recent call last):
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 104, in search_one_request_safe
searx-run[9330]: search_results = search_one_request(engine, query, request_params)
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py", line 87, in search_one_request
searx-run[9330]: return engine.response(response)
searx-run[9330]: File "/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/engines/wolframalpha_api.py", line 68, in response
searx-run[9330]: search_results = etree.XML(resp.text)
searx-run[9330]: File "src/lxml/etree.pyx", line 3192, in lxml.etree.XML
searx-run[9330]: File "src/lxml/parser.pxi", line 1872, in lxml.etree._parseMemoryDocument
searx-run[9330]: ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/wolframalpha_api.py`
Content:
```
1 # Wolfram Alpha (Science)
2 #
3 # @website https://www.wolframalpha.com
4 # @provide-api yes (https://api.wolframalpha.com/v2/)
5 #
6 # @using-api yes
7 # @results XML
8 # @stable yes
9 # @parse url, infobox
10
11 from lxml import etree
12 from searx.url_utils import urlencode
13
14 # search-url
15 search_url = 'https://api.wolframalpha.com/v2/query?appid={api_key}&{query}'
16 site_url = 'https://www.wolframalpha.com/input/?{query}'
17 api_key = '' # defined in settings.yml
18
19 # xpath variables
20 failure_xpath = '/queryresult[attribute::success="false"]'
21 input_xpath = '//pod[starts-with(attribute::id, "Input")]/subpod/plaintext'
22 pods_xpath = '//pod'
23 subpods_xpath = './subpod'
24 pod_primary_xpath = './@primary'
25 pod_id_xpath = './@id'
26 pod_title_xpath = './@title'
27 plaintext_xpath = './plaintext'
28 image_xpath = './img'
29 img_src_xpath = './@src'
30 img_alt_xpath = './@alt'
31
32 # pods to display as image in infobox
33 # this pods do return a plaintext, but they look better and are more useful as images
34 image_pods = {'VisualRepresentation',
35 'Illustration'}
36
37
38 # do search-request
39 def request(query, params):
40 params['url'] = search_url.format(query=urlencode({'input': query}), api_key=api_key)
41 params['headers']['Referer'] = site_url.format(query=urlencode({'i': query}))
42
43 return params
44
45
46 # replace private user area characters to make text legible
47 def replace_pua_chars(text):
48 pua_chars = {u'\uf522': u'\u2192', # rigth arrow
49 u'\uf7b1': u'\u2115', # set of natural numbers
50 u'\uf7b4': u'\u211a', # set of rational numbers
51 u'\uf7b5': u'\u211d', # set of real numbers
52 u'\uf7bd': u'\u2124', # set of integer numbers
53 u'\uf74c': 'd', # differential
54 u'\uf74d': u'\u212f', # euler's number
55 u'\uf74e': 'i', # imaginary number
56 u'\uf7d9': '='} # equals sign
57
58 for k, v in pua_chars.items():
59 text = text.replace(k, v)
60
61 return text
62
63
64 # get response from search-request
65 def response(resp):
66 results = []
67
68 search_results = etree.XML(resp.text)
69
70 # return empty array if there are no results
71 if search_results.xpath(failure_xpath):
72 return []
73
74 try:
75 infobox_title = search_results.xpath(input_xpath)[0].text
76 except:
77 infobox_title = ""
78
79 pods = search_results.xpath(pods_xpath)
80 result_chunks = []
81 result_content = ""
82 for pod in pods:
83 pod_id = pod.xpath(pod_id_xpath)[0]
84 pod_title = pod.xpath(pod_title_xpath)[0]
85 pod_is_result = pod.xpath(pod_primary_xpath)
86
87 subpods = pod.xpath(subpods_xpath)
88 if not subpods:
89 continue
90
91 # Appends either a text or an image, depending on which one is more suitable
92 for subpod in subpods:
93 content = subpod.xpath(plaintext_xpath)[0].text
94 image = subpod.xpath(image_xpath)
95
96 if content and pod_id not in image_pods:
97
98 if pod_is_result or not result_content:
99 if pod_id != "Input":
100 result_content = "%s: %s" % (pod_title, content)
101
102 # if no input pod was found, title is first plaintext pod
103 if not infobox_title:
104 infobox_title = content
105
106 content = replace_pua_chars(content)
107 result_chunks.append({'label': pod_title, 'value': content})
108
109 elif image:
110 result_chunks.append({'label': pod_title,
111 'image': {'src': image[0].xpath(img_src_xpath)[0],
112 'alt': image[0].xpath(img_alt_xpath)[0]}})
113
114 if not result_chunks:
115 return []
116
117 title = "Wolfram|Alpha (%s)" % infobox_title
118
119 # append infobox
120 results.append({'infobox': infobox_title,
121 'attributes': result_chunks,
122 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}]})
123
124 # append link to site
125 results.append({'url': resp.request.headers['Referer'],
126 'title': title,
127 'content': result_content})
128
129 return results
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/wolframalpha_api.py b/searx/engines/wolframalpha_api.py
--- a/searx/engines/wolframalpha_api.py
+++ b/searx/engines/wolframalpha_api.py
@@ -65,7 +65,7 @@
def response(resp):
results = []
- search_results = etree.XML(resp.text)
+ search_results = etree.XML(resp.content)
# return empty array if there are no results
if search_results.xpath(failure_xpath):
| {"golden_diff": "diff --git a/searx/engines/wolframalpha_api.py b/searx/engines/wolframalpha_api.py\n--- a/searx/engines/wolframalpha_api.py\n+++ b/searx/engines/wolframalpha_api.py\n@@ -65,7 +65,7 @@\n def response(resp):\n results = []\n \n- search_results = etree.XML(resp.text)\n+ search_results = etree.XML(resp.content)\n \n # return empty array if there are no results\n if search_results.xpath(failure_xpath):\n", "issue": "wolframalpha engine is broken\nThe wolframalpha engine appears to be broken in 0.15.0\r\n```\r\nsearx-run[9330]: ERROR:searx.search:engine wolframalpha : exception : Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.\r\nsearx-run[9330]: Traceback (most recent call last):\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py\", line 104, in search_one_request_safe\r\nsearx-run[9330]: search_results = search_one_request(engine, query, request_params)\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py\", line 87, in search_one_request\r\nsearx-run[9330]: return engine.response(response)\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/engines/wolframalpha_api.py\", line 68, in response\r\nsearx-run[9330]: search_results = etree.XML(resp.text)\r\nsearx-run[9330]: File \"src/lxml/etree.pyx\", line 3192, in lxml.etree.XML\r\nsearx-run[9330]: File \"src/lxml/parser.pxi\", line 1872, in lxml.etree._parseMemoryDocument\r\nsearx-run[9330]: ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.\r\n```\nwolframalpha engine is broken\nThe wolframalpha engine appears to be broken in 0.15.0\r\n```\r\nsearx-run[9330]: ERROR:searx.search:engine wolframalpha : exception : Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.\r\nsearx-run[9330]: Traceback (most recent call last):\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py\", line 104, in search_one_request_safe\r\nsearx-run[9330]: search_results = search_one_request(engine, query, request_params)\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/search.py\", line 87, in search_one_request\r\nsearx-run[9330]: return engine.response(response)\r\nsearx-run[9330]: File \"/nix/store/rf8v47ispmh7bp0rbl291ml1fivfs424-searx-0.15.0/lib/python3.6/site-packages/searx/engines/wolframalpha_api.py\", line 68, in response\r\nsearx-run[9330]: search_results = etree.XML(resp.text)\r\nsearx-run[9330]: File \"src/lxml/etree.pyx\", line 3192, in lxml.etree.XML\r\nsearx-run[9330]: File \"src/lxml/parser.pxi\", line 1872, in lxml.etree._parseMemoryDocument\r\nsearx-run[9330]: ValueError: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration.\r\n```\n", "before_files": [{"content": "# Wolfram Alpha (Science)\n#\n# @website https://www.wolframalpha.com\n# @provide-api yes (https://api.wolframalpha.com/v2/)\n#\n# @using-api yes\n# @results XML\n# @stable yes\n# @parse url, infobox\n\nfrom lxml import etree\nfrom searx.url_utils import urlencode\n\n# search-url\nsearch_url = 'https://api.wolframalpha.com/v2/query?appid={api_key}&{query}'\nsite_url = 'https://www.wolframalpha.com/input/?{query}'\napi_key = '' # defined in settings.yml\n\n# xpath variables\nfailure_xpath = '/queryresult[attribute::success=\"false\"]'\ninput_xpath = '//pod[starts-with(attribute::id, \"Input\")]/subpod/plaintext'\npods_xpath = '//pod'\nsubpods_xpath = './subpod'\npod_primary_xpath = './@primary'\npod_id_xpath = './@id'\npod_title_xpath = './@title'\nplaintext_xpath = './plaintext'\nimage_xpath = './img'\nimg_src_xpath = './@src'\nimg_alt_xpath = './@alt'\n\n# pods to display as image in infobox\n# this pods do return a plaintext, but they look better and are more useful as images\nimage_pods = {'VisualRepresentation',\n 'Illustration'}\n\n\n# do search-request\ndef request(query, params):\n params['url'] = search_url.format(query=urlencode({'input': query}), api_key=api_key)\n params['headers']['Referer'] = site_url.format(query=urlencode({'i': query}))\n\n return params\n\n\n# replace private user area characters to make text legible\ndef replace_pua_chars(text):\n pua_chars = {u'\\uf522': u'\\u2192', # rigth arrow\n u'\\uf7b1': u'\\u2115', # set of natural numbers\n u'\\uf7b4': u'\\u211a', # set of rational numbers\n u'\\uf7b5': u'\\u211d', # set of real numbers\n u'\\uf7bd': u'\\u2124', # set of integer numbers\n u'\\uf74c': 'd', # differential\n u'\\uf74d': u'\\u212f', # euler's number\n u'\\uf74e': 'i', # imaginary number\n u'\\uf7d9': '='} # equals sign\n\n for k, v in pua_chars.items():\n text = text.replace(k, v)\n\n return text\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n search_results = etree.XML(resp.text)\n\n # return empty array if there are no results\n if search_results.xpath(failure_xpath):\n return []\n\n try:\n infobox_title = search_results.xpath(input_xpath)[0].text\n except:\n infobox_title = \"\"\n\n pods = search_results.xpath(pods_xpath)\n result_chunks = []\n result_content = \"\"\n for pod in pods:\n pod_id = pod.xpath(pod_id_xpath)[0]\n pod_title = pod.xpath(pod_title_xpath)[0]\n pod_is_result = pod.xpath(pod_primary_xpath)\n\n subpods = pod.xpath(subpods_xpath)\n if not subpods:\n continue\n\n # Appends either a text or an image, depending on which one is more suitable\n for subpod in subpods:\n content = subpod.xpath(plaintext_xpath)[0].text\n image = subpod.xpath(image_xpath)\n\n if content and pod_id not in image_pods:\n\n if pod_is_result or not result_content:\n if pod_id != \"Input\":\n result_content = \"%s: %s\" % (pod_title, content)\n\n # if no input pod was found, title is first plaintext pod\n if not infobox_title:\n infobox_title = content\n\n content = replace_pua_chars(content)\n result_chunks.append({'label': pod_title, 'value': content})\n\n elif image:\n result_chunks.append({'label': pod_title,\n 'image': {'src': image[0].xpath(img_src_xpath)[0],\n 'alt': image[0].xpath(img_alt_xpath)[0]}})\n\n if not result_chunks:\n return []\n\n title = \"Wolfram|Alpha (%s)\" % infobox_title\n\n # append infobox\n results.append({'infobox': infobox_title,\n 'attributes': result_chunks,\n 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}]})\n\n # append link to site\n results.append({'url': resp.request.headers['Referer'],\n 'title': title,\n 'content': result_content})\n\n return results\n", "path": "searx/engines/wolframalpha_api.py"}], "after_files": [{"content": "# Wolfram Alpha (Science)\n#\n# @website https://www.wolframalpha.com\n# @provide-api yes (https://api.wolframalpha.com/v2/)\n#\n# @using-api yes\n# @results XML\n# @stable yes\n# @parse url, infobox\n\nfrom lxml import etree\nfrom searx.url_utils import urlencode\n\n# search-url\nsearch_url = 'https://api.wolframalpha.com/v2/query?appid={api_key}&{query}'\nsite_url = 'https://www.wolframalpha.com/input/?{query}'\napi_key = '' # defined in settings.yml\n\n# xpath variables\nfailure_xpath = '/queryresult[attribute::success=\"false\"]'\ninput_xpath = '//pod[starts-with(attribute::id, \"Input\")]/subpod/plaintext'\npods_xpath = '//pod'\nsubpods_xpath = './subpod'\npod_primary_xpath = './@primary'\npod_id_xpath = './@id'\npod_title_xpath = './@title'\nplaintext_xpath = './plaintext'\nimage_xpath = './img'\nimg_src_xpath = './@src'\nimg_alt_xpath = './@alt'\n\n# pods to display as image in infobox\n# this pods do return a plaintext, but they look better and are more useful as images\nimage_pods = {'VisualRepresentation',\n 'Illustration'}\n\n\n# do search-request\ndef request(query, params):\n params['url'] = search_url.format(query=urlencode({'input': query}), api_key=api_key)\n params['headers']['Referer'] = site_url.format(query=urlencode({'i': query}))\n\n return params\n\n\n# replace private user area characters to make text legible\ndef replace_pua_chars(text):\n pua_chars = {u'\\uf522': u'\\u2192', # rigth arrow\n u'\\uf7b1': u'\\u2115', # set of natural numbers\n u'\\uf7b4': u'\\u211a', # set of rational numbers\n u'\\uf7b5': u'\\u211d', # set of real numbers\n u'\\uf7bd': u'\\u2124', # set of integer numbers\n u'\\uf74c': 'd', # differential\n u'\\uf74d': u'\\u212f', # euler's number\n u'\\uf74e': 'i', # imaginary number\n u'\\uf7d9': '='} # equals sign\n\n for k, v in pua_chars.items():\n text = text.replace(k, v)\n\n return text\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n search_results = etree.XML(resp.content)\n\n # return empty array if there are no results\n if search_results.xpath(failure_xpath):\n return []\n\n try:\n infobox_title = search_results.xpath(input_xpath)[0].text\n except:\n infobox_title = \"\"\n\n pods = search_results.xpath(pods_xpath)\n result_chunks = []\n result_content = \"\"\n for pod in pods:\n pod_id = pod.xpath(pod_id_xpath)[0]\n pod_title = pod.xpath(pod_title_xpath)[0]\n pod_is_result = pod.xpath(pod_primary_xpath)\n\n subpods = pod.xpath(subpods_xpath)\n if not subpods:\n continue\n\n # Appends either a text or an image, depending on which one is more suitable\n for subpod in subpods:\n content = subpod.xpath(plaintext_xpath)[0].text\n image = subpod.xpath(image_xpath)\n\n if content and pod_id not in image_pods:\n\n if pod_is_result or not result_content:\n if pod_id != \"Input\":\n result_content = \"%s: %s\" % (pod_title, content)\n\n # if no input pod was found, title is first plaintext pod\n if not infobox_title:\n infobox_title = content\n\n content = replace_pua_chars(content)\n result_chunks.append({'label': pod_title, 'value': content})\n\n elif image:\n result_chunks.append({'label': pod_title,\n 'image': {'src': image[0].xpath(img_src_xpath)[0],\n 'alt': image[0].xpath(img_alt_xpath)[0]}})\n\n if not result_chunks:\n return []\n\n title = \"Wolfram|Alpha (%s)\" % infobox_title\n\n # append infobox\n results.append({'infobox': infobox_title,\n 'attributes': result_chunks,\n 'urls': [{'title': 'Wolfram|Alpha', 'url': resp.request.headers['Referer']}]})\n\n # append link to site\n results.append({'url': resp.request.headers['Referer'],\n 'title': title,\n 'content': result_content})\n\n return results\n", "path": "searx/engines/wolframalpha_api.py"}]} | 2,584 | 122 |
gh_patches_debug_16417 | rasdani/github-patches | git_diff | celery__kombu-101 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kombu compression "encodes" the body ?
At https://github.com/ask/kombu/blob/master/kombu/compression.py#L61, we see:
```
return encoder(body.encode("utf-8")), content_type
```
Same for decoding. What gives? It crashes with msgpack (which can't be encoded). Is there any reason for that ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kombu/compression.py`
Content:
```
1 """
2 kombu.compression
3 =================
4
5 Compression utilities.
6
7 :copyright: (c) 2009 - 2012 by Ask Solem.
8 :license: BSD, see LICENSE for more details.
9
10 """
11 from __future__ import absolute_import
12
13 import zlib
14
15 _aliases = {}
16 _encoders = {}
17 _decoders = {}
18
19 __all__ = ["register", "encoders", "get_encoder",
20 "get_decoder", "compress", "decompress"]
21
22
23 def register(encoder, decoder, content_type, aliases=[]):
24 """Register new compression method.
25
26 :param encoder: Function used to compress text.
27 :param decoder: Function used to decompress previously compressed text.
28 :param content_type: The mime type this compression method identifies as.
29 :param aliases: A list of names to associate with this compression method.
30
31 """
32 _encoders[content_type] = encoder
33 _decoders[content_type] = decoder
34 _aliases.update((alias, content_type) for alias in aliases)
35
36
37 def encoders():
38 """Returns a list of available compression methods."""
39 return _encoders.keys()
40
41
42 def get_encoder(t):
43 """Get encoder by alias name."""
44 t = _aliases.get(t, t)
45 return _encoders[t], t
46
47
48 def get_decoder(t):
49 """Get decoder by alias name."""
50 return _decoders[_aliases.get(t, t)]
51
52
53 def compress(body, content_type):
54 """Compress text.
55
56 :param body: The text to compress.
57 :param content_type: mime-type of compression method to use.
58
59 """
60 encoder, content_type = get_encoder(content_type)
61 return encoder(body.encode("utf-8")), content_type
62
63
64 def decompress(body, content_type):
65 """Decompress compressed text.
66
67 :param body: Previously compressed text to uncompress.
68 :param content_type: mime-type of compression method used.
69
70 """
71 return get_decoder(content_type)(body).decode("utf-8")
72
73
74 register(zlib.compress,
75 zlib.decompress,
76 "application/x-gzip", aliases=["gzip", "zlib"])
77 try:
78 import bz2
79 except ImportError:
80 pass # Jython?
81 else:
82 register(bz2.compress,
83 bz2.decompress,
84 "application/x-bz2", aliases=["bzip2", "bzip"])
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kombu/compression.py b/kombu/compression.py
--- a/kombu/compression.py
+++ b/kombu/compression.py
@@ -10,6 +10,8 @@
"""
from __future__ import absolute_import
+from kombu.utils.encoding import ensure_bytes, bytes_to_str
+
import zlib
_aliases = {}
@@ -58,7 +60,7 @@
"""
encoder, content_type = get_encoder(content_type)
- return encoder(body.encode("utf-8")), content_type
+ return encoder(ensure_bytes(body)), content_type
def decompress(body, content_type):
@@ -68,7 +70,7 @@
:param content_type: mime-type of compression method used.
"""
- return get_decoder(content_type)(body).decode("utf-8")
+ return bytes_to_str(get_decoder(content_type)(body))
register(zlib.compress,
| {"golden_diff": "diff --git a/kombu/compression.py b/kombu/compression.py\n--- a/kombu/compression.py\n+++ b/kombu/compression.py\n@@ -10,6 +10,8 @@\n \"\"\"\n from __future__ import absolute_import\n \n+from kombu.utils.encoding import ensure_bytes, bytes_to_str\n+\n import zlib\n \n _aliases = {}\n@@ -58,7 +60,7 @@\n \n \"\"\"\n encoder, content_type = get_encoder(content_type)\n- return encoder(body.encode(\"utf-8\")), content_type\n+ return encoder(ensure_bytes(body)), content_type\n \n \n def decompress(body, content_type):\n@@ -68,7 +70,7 @@\n :param content_type: mime-type of compression method used.\n \n \"\"\"\n- return get_decoder(content_type)(body).decode(\"utf-8\")\n+ return bytes_to_str(get_decoder(content_type)(body))\n \n \n register(zlib.compress,\n", "issue": "Kombu compression \"encodes\" the body ?\nAt https://github.com/ask/kombu/blob/master/kombu/compression.py#L61, we see:\n\n```\nreturn encoder(body.encode(\"utf-8\")), content_type\n```\n\nSame for decoding. What gives? It crashes with msgpack (which can't be encoded). Is there any reason for that ?\n\n", "before_files": [{"content": "\"\"\"\nkombu.compression\n=================\n\nCompression utilities.\n\n:copyright: (c) 2009 - 2012 by Ask Solem.\n:license: BSD, see LICENSE for more details.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport zlib\n\n_aliases = {}\n_encoders = {}\n_decoders = {}\n\n__all__ = [\"register\", \"encoders\", \"get_encoder\",\n \"get_decoder\", \"compress\", \"decompress\"]\n\n\ndef register(encoder, decoder, content_type, aliases=[]):\n \"\"\"Register new compression method.\n\n :param encoder: Function used to compress text.\n :param decoder: Function used to decompress previously compressed text.\n :param content_type: The mime type this compression method identifies as.\n :param aliases: A list of names to associate with this compression method.\n\n \"\"\"\n _encoders[content_type] = encoder\n _decoders[content_type] = decoder\n _aliases.update((alias, content_type) for alias in aliases)\n\n\ndef encoders():\n \"\"\"Returns a list of available compression methods.\"\"\"\n return _encoders.keys()\n\n\ndef get_encoder(t):\n \"\"\"Get encoder by alias name.\"\"\"\n t = _aliases.get(t, t)\n return _encoders[t], t\n\n\ndef get_decoder(t):\n \"\"\"Get decoder by alias name.\"\"\"\n return _decoders[_aliases.get(t, t)]\n\n\ndef compress(body, content_type):\n \"\"\"Compress text.\n\n :param body: The text to compress.\n :param content_type: mime-type of compression method to use.\n\n \"\"\"\n encoder, content_type = get_encoder(content_type)\n return encoder(body.encode(\"utf-8\")), content_type\n\n\ndef decompress(body, content_type):\n \"\"\"Decompress compressed text.\n\n :param body: Previously compressed text to uncompress.\n :param content_type: mime-type of compression method used.\n\n \"\"\"\n return get_decoder(content_type)(body).decode(\"utf-8\")\n\n\nregister(zlib.compress,\n zlib.decompress,\n \"application/x-gzip\", aliases=[\"gzip\", \"zlib\"])\ntry:\n import bz2\nexcept ImportError:\n pass # Jython?\nelse:\n register(bz2.compress,\n bz2.decompress,\n \"application/x-bz2\", aliases=[\"bzip2\", \"bzip\"])\n", "path": "kombu/compression.py"}], "after_files": [{"content": "\"\"\"\nkombu.compression\n=================\n\nCompression utilities.\n\n:copyright: (c) 2009 - 2012 by Ask Solem.\n:license: BSD, see LICENSE for more details.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom kombu.utils.encoding import ensure_bytes, bytes_to_str\n\nimport zlib\n\n_aliases = {}\n_encoders = {}\n_decoders = {}\n\n__all__ = [\"register\", \"encoders\", \"get_encoder\",\n \"get_decoder\", \"compress\", \"decompress\"]\n\n\ndef register(encoder, decoder, content_type, aliases=[]):\n \"\"\"Register new compression method.\n\n :param encoder: Function used to compress text.\n :param decoder: Function used to decompress previously compressed text.\n :param content_type: The mime type this compression method identifies as.\n :param aliases: A list of names to associate with this compression method.\n\n \"\"\"\n _encoders[content_type] = encoder\n _decoders[content_type] = decoder\n _aliases.update((alias, content_type) for alias in aliases)\n\n\ndef encoders():\n \"\"\"Returns a list of available compression methods.\"\"\"\n return _encoders.keys()\n\n\ndef get_encoder(t):\n \"\"\"Get encoder by alias name.\"\"\"\n t = _aliases.get(t, t)\n return _encoders[t], t\n\n\ndef get_decoder(t):\n \"\"\"Get decoder by alias name.\"\"\"\n return _decoders[_aliases.get(t, t)]\n\n\ndef compress(body, content_type):\n \"\"\"Compress text.\n\n :param body: The text to compress.\n :param content_type: mime-type of compression method to use.\n\n \"\"\"\n encoder, content_type = get_encoder(content_type)\n return encoder(ensure_bytes(body)), content_type\n\n\ndef decompress(body, content_type):\n \"\"\"Decompress compressed text.\n\n :param body: Previously compressed text to uncompress.\n :param content_type: mime-type of compression method used.\n\n \"\"\"\n return bytes_to_str(get_decoder(content_type)(body))\n\n\nregister(zlib.compress,\n zlib.decompress,\n \"application/x-gzip\", aliases=[\"gzip\", \"zlib\"])\ntry:\n import bz2\nexcept ImportError:\n pass # Jython?\nelse:\n register(bz2.compress,\n bz2.decompress,\n \"application/x-bz2\", aliases=[\"bzip2\", \"bzip\"])\n", "path": "kombu/compression.py"}]} | 1,012 | 203 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.