problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_4921 | rasdani/github-patches | git_diff | ocadotechnology__aimmo-123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Users should see which avatar is theirs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `players/views.py`
Content:
```
1 import logging
2
3 from django.http import JsonResponse
4 from django.contrib.auth.decorators import login_required
5 from django.http import HttpResponse
6 from django.views.generic import TemplateView
7
8 import os
9
10 from models import Player
11 from . import app_settings
12
13
14 def _post_code_success_response(message):
15 return create_response("SUCCESS", message)
16
17
18 def create_response(status, message):
19 response = {
20 "status": status,
21 "message": message
22 }
23 return JsonResponse(response)
24
25
26 @login_required
27 def code(request):
28 try:
29 player = request.user.player
30 except Player.DoesNotExist:
31 initial_code_file_name = os.path.join(
32 os.path.abspath(os.path.dirname(__file__)),
33 'avatar_examples/dumb_avatar.py',
34 )
35 with open(initial_code_file_name) as initial_code_file:
36 initial_code = initial_code_file.read()
37 player = Player.objects.create(user=request.user, code=initial_code)
38 if request.method == 'POST':
39 player.code = request.POST['code']
40 player.save()
41
42 return _post_code_success_response("Your code was saved!")
43 else:
44 return HttpResponse(player.code)
45
46
47 def games(request):
48 response = {
49 'main': {
50 'parameters': [],
51 'users': [
52 {
53 'id': player.user.pk,
54 'code': player.code,
55 } for player in Player.objects.all()
56 ]
57 }
58 }
59 return JsonResponse(response)
60
61
62 class WatchView(TemplateView):
63 template_name = 'players/watch.html'
64
65 def get_context_data(self, **kwargs):
66 context = super(WatchView, self).get_context_data(**kwargs)
67 context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION('main')
68 return context
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/players/views.py b/players/views.py
--- a/players/views.py
+++ b/players/views.py
@@ -65,4 +65,5 @@
def get_context_data(self, **kwargs):
context = super(WatchView, self).get_context_data(**kwargs)
context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION('main')
+ context['current_user_player_key'] = self.request.user.pk
return context
| {"golden_diff": "diff --git a/players/views.py b/players/views.py\n--- a/players/views.py\n+++ b/players/views.py\n@@ -65,4 +65,5 @@\n def get_context_data(self, **kwargs):\n context = super(WatchView, self).get_context_data(**kwargs)\n context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION('main')\n+ context['current_user_player_key'] = self.request.user.pk\n return context\n", "issue": "Users should see which avatar is theirs\n\n", "before_files": [{"content": "import logging\n\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\n\nimport os\n\nfrom models import Player\nfrom . import app_settings\n\n\ndef _post_code_success_response(message):\n return create_response(\"SUCCESS\", message)\n\n\ndef create_response(status, message):\n response = {\n \"status\": status,\n \"message\": message\n }\n return JsonResponse(response)\n\n\n@login_required\ndef code(request):\n try:\n player = request.user.player\n except Player.DoesNotExist:\n initial_code_file_name = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n 'avatar_examples/dumb_avatar.py',\n )\n with open(initial_code_file_name) as initial_code_file:\n initial_code = initial_code_file.read()\n player = Player.objects.create(user=request.user, code=initial_code)\n if request.method == 'POST':\n player.code = request.POST['code']\n player.save()\n\n return _post_code_success_response(\"Your code was saved!\")\n else:\n return HttpResponse(player.code)\n\n\ndef games(request):\n response = {\n 'main': {\n 'parameters': [],\n 'users': [\n {\n 'id': player.user.pk,\n 'code': player.code,\n } for player in Player.objects.all()\n ]\n }\n }\n return JsonResponse(response)\n\n\nclass WatchView(TemplateView):\n template_name = 'players/watch.html'\n\n def get_context_data(self, **kwargs):\n context = super(WatchView, self).get_context_data(**kwargs)\n context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION('main')\n return context\n", "path": "players/views.py"}], "after_files": [{"content": "import logging\n\nfrom django.http import JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\n\nimport os\n\nfrom models import Player\nfrom . import app_settings\n\n\ndef _post_code_success_response(message):\n return create_response(\"SUCCESS\", message)\n\n\ndef create_response(status, message):\n response = {\n \"status\": status,\n \"message\": message\n }\n return JsonResponse(response)\n\n\n@login_required\ndef code(request):\n try:\n player = request.user.player\n except Player.DoesNotExist:\n initial_code_file_name = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n 'avatar_examples/dumb_avatar.py',\n )\n with open(initial_code_file_name) as initial_code_file:\n initial_code = initial_code_file.read()\n player = Player.objects.create(user=request.user, code=initial_code)\n if request.method == 'POST':\n player.code = request.POST['code']\n player.save()\n\n return _post_code_success_response(\"Your code was saved!\")\n else:\n return HttpResponse(player.code)\n\n\ndef games(request):\n response = {\n 'main': {\n 'parameters': [],\n 'users': [\n {\n 'id': player.user.pk,\n 'code': player.code,\n } for player in Player.objects.all()\n ]\n }\n }\n return JsonResponse(response)\n\n\nclass WatchView(TemplateView):\n template_name = 'players/watch.html'\n\n def get_context_data(self, **kwargs):\n context = super(WatchView, self).get_context_data(**kwargs)\n context['game_url_base'], context['game_url_path'] = app_settings.GAME_SERVER_LOCATION_FUNCTION('main')\n context['current_user_player_key'] = self.request.user.pk\n return context\n", "path": "players/views.py"}]} | 767 | 109 |
gh_patches_debug_40748 | rasdani/github-patches | git_diff | vacanza__python-holidays-639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ireland considering UK as base class and hence not being a country itself
Issue also opened here:
home-assistant/core#67542
Looks like Ireland is being considered as being part of the UK which is wrong as not all the holidays in the UK exist, or necessarily exist in Ireland.
Take a reference on this comment: https://github.com/home-assistant/core/issues/67542#issuecomment-1058079650
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `holidays/countries/ireland.py`
Content:
```
1 # python-holidays
2 # ---------------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: dr-prodigy <[email protected]> (c) 2017-2022
8 # ryanss <[email protected]> (c) 2014-2017
9 # Website: https://github.com/dr-prodigy/python-holidays
10 # License: MIT (see LICENSE file)
11
12 from datetime import date
13
14 from dateutil.easter import easter
15 from dateutil.relativedelta import relativedelta as rd, MO
16
17 from holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC
18 from holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND
19 from holidays.holiday_base import HolidayBase
20 from .united_kingdom import UnitedKingdom
21
22
23 class Ireland(UnitedKingdom):
24 country = "IE"
25
26 def __init__(self, **kwargs):
27 HolidayBase.__init__(self, **kwargs)
28
29 def _country_specific(self, year):
30 # Ireland exclusive holidays
31
32 # St. Patrick's Day
33 name = "St. Patrick's Day"
34 self[date(year, MAR, 17)] = name
35 if self.observed and date(year, MAR, 17).weekday() in WEEKEND:
36 self[date(year, MAR, 17) + rd(weekday=MO)] = name + " (Observed)"
37
38 # Easter Monday
39 self[easter(year) + rd(weekday=MO)] = "Easter Monday"
40
41 # May Day bank holiday (first Monday in May)
42 if year >= 1978:
43 name = "May Day"
44 if year == 1995:
45 dt = date(year, MAY, 8)
46 else:
47 dt = date(year, MAY, 1)
48 if dt.weekday() == MON:
49 self[dt] = name
50 elif dt.weekday() == TUE:
51 self[dt + rd(days=+6)] = name
52 elif dt.weekday() == WED:
53 self[dt + rd(days=+5)] = name
54 elif dt.weekday() == THU:
55 self[dt + rd(days=+4)] = name
56 elif dt.weekday() == FRI:
57 self[dt + rd(days=+3)] = name
58 elif dt.weekday() == SAT:
59 self[dt + rd(days=+2)] = name
60 elif dt.weekday() == SUN:
61 self[dt + rd(days=+1)] = name
62
63 # June bank holiday (first Monday in June)
64 self[date(year, JUN, 1) + rd(weekday=MO)] = "June Bank Holiday"
65
66 # Summer bank holiday (first Monday in August)
67 self[date(year, AUG, 1) + rd(weekday=MO)] = "Summer Bank Holiday"
68
69 # October Bank Holiday (last Monday in October)
70 self[date(year, OCT, 31) + rd(weekday=MO(-1))] = "October Bank Holiday"
71
72 # St. Stephen's Day
73 name = "St. Stephen's Day"
74 self[date(year, DEC, 26)] = name
75 if self.observed and date(year, DEC, 26).weekday() == SAT:
76 self[date(year, DEC, 28)] = name + " (Observed)"
77 elif self.observed and date(year, DEC, 26).weekday() == SUN:
78 self[date(year, DEC, 28)] = name + " (Observed)"
79
80
81 class IE(Ireland):
82 pass
83
84
85 class IRL(Ireland):
86 pass
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/holidays/countries/ireland.py b/holidays/countries/ireland.py
--- a/holidays/countries/ireland.py
+++ b/holidays/countries/ireland.py
@@ -16,20 +16,37 @@
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, MO
-from holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC
+from holidays.constants import FEB, MAR, MAY, JUN, AUG, OCT, DEC
from holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND
from holidays.holiday_base import HolidayBase
-from .united_kingdom import UnitedKingdom
+class Ireland(HolidayBase):
+ """
+ Official holidays in Ireland, as declared in the Citizen's Information
+ bulletin:
+ https://www.citizensinformation.ie/en/employment/employment_rights_and_conditions/leave_and_holidays/public_holidays_in_ireland.html
+ """
-class Ireland(UnitedKingdom):
country = "IE"
+ subdivisions = []
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
- def _country_specific(self, year):
- # Ireland exclusive holidays
+ def _populate(self, year):
+ self[date(year, JAN, 1)] = "New Year's Day"
+
+ # St. Brigid's Day
+ if year >= 2023:
+ dt = date(year, FEB, 1)
+ self[dt] = "St. Brigid's Day"
+
+ if self.observed and dt.weekday() != FRI:
+ self[date(year, FEB, 1) + rd(weekday=MO)] = "St. Brigid's Day (Observed)"
+
+ # One-off day of rememberance and recognition
+ if year == 2022:
+ self[date(year, MAR, 18)] = "Day of Rememberance and Recognition"
# St. Patrick's Day
name = "St. Patrick's Day"
@@ -40,7 +57,7 @@
# Easter Monday
self[easter(year) + rd(weekday=MO)] = "Easter Monday"
- # May Day bank holiday (first Monday in May)
+ # May bank holiday (first Monday in May)
if year >= 1978:
name = "May Day"
if year == 1995:
@@ -66,18 +83,24 @@
self[date(year, JUN, 1) + rd(weekday=MO)] = "June Bank Holiday"
# Summer bank holiday (first Monday in August)
- self[date(year, AUG, 1) + rd(weekday=MO)] = "Summer Bank Holiday"
+ self[date(year, AUG, 1) + rd(weekday=MO)] = "August Bank Holiday"
# October Bank Holiday (last Monday in October)
self[date(year, OCT, 31) + rd(weekday=MO(-1))] = "October Bank Holiday"
+ # Christmas Day
+ name = "Christmas Day"
+ self[date(year, DEC, 25)] = "Christmas Day"
+ if self.observed and date(year, DEC, 25).weekday() in WEEKEND:
+ self[date(year, DEC, 25) + rd(weekday=MON)] = name + " (Observed)"
+
# St. Stephen's Day
name = "St. Stephen's Day"
self[date(year, DEC, 26)] = name
if self.observed and date(year, DEC, 26).weekday() == SAT:
- self[date(year, DEC, 28)] = name + " (Observed)"
+ self[date(year, DEC, 26) + rd(weekday=MON)] = name + " (Observed)"
elif self.observed and date(year, DEC, 26).weekday() == SUN:
- self[date(year, DEC, 28)] = name + " (Observed)"
+ self[date(year, DEC, 26) + rd(weekday=TUE)] = name + " (Observed)"
class IE(Ireland):
| {"golden_diff": "diff --git a/holidays/countries/ireland.py b/holidays/countries/ireland.py\n--- a/holidays/countries/ireland.py\n+++ b/holidays/countries/ireland.py\n@@ -16,20 +16,37 @@\n from dateutil.easter import easter\n from dateutil.relativedelta import relativedelta as rd, MO\n \n-from holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC\n+from holidays.constants import FEB, MAR, MAY, JUN, AUG, OCT, DEC\n from holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND\n from holidays.holiday_base import HolidayBase\n-from .united_kingdom import UnitedKingdom\n \n+class Ireland(HolidayBase):\n+ \"\"\"\n+ Official holidays in Ireland, as declared in the Citizen's Information\n+ bulletin:\n+ https://www.citizensinformation.ie/en/employment/employment_rights_and_conditions/leave_and_holidays/public_holidays_in_ireland.html\n+ \"\"\"\n \n-class Ireland(UnitedKingdom):\n country = \"IE\"\n+ subdivisions = []\n \n def __init__(self, **kwargs):\n HolidayBase.__init__(self, **kwargs)\n \n- def _country_specific(self, year):\n- # Ireland exclusive holidays\n+ def _populate(self, year):\n+ self[date(year, JAN, 1)] = \"New Year's Day\"\n+\n+ # St. Brigid's Day\n+ if year >= 2023:\n+ dt = date(year, FEB, 1)\n+ self[dt] = \"St. Brigid's Day\"\n+\n+ if self.observed and dt.weekday() != FRI:\n+ self[date(year, FEB, 1) + rd(weekday=MO)] = \"St. Brigid's Day (Observed)\"\n+\n+ # One-off day of rememberance and recognition\n+ if year == 2022:\n+ self[date(year, MAR, 18)] = \"Day of Rememberance and Recognition\"\n \n # St. Patrick's Day\n name = \"St. Patrick's Day\"\n@@ -40,7 +57,7 @@\n # Easter Monday\n self[easter(year) + rd(weekday=MO)] = \"Easter Monday\"\n \n- # May Day bank holiday (first Monday in May)\n+ # May bank holiday (first Monday in May)\n if year >= 1978:\n name = \"May Day\"\n if year == 1995:\n@@ -66,18 +83,24 @@\n self[date(year, JUN, 1) + rd(weekday=MO)] = \"June Bank Holiday\"\n \n # Summer bank holiday (first Monday in August)\n- self[date(year, AUG, 1) + rd(weekday=MO)] = \"Summer Bank Holiday\"\n+ self[date(year, AUG, 1) + rd(weekday=MO)] = \"August Bank Holiday\"\n \n # October Bank Holiday (last Monday in October)\n self[date(year, OCT, 31) + rd(weekday=MO(-1))] = \"October Bank Holiday\"\n \n+ # Christmas Day\n+ name = \"Christmas Day\"\n+ self[date(year, DEC, 25)] = \"Christmas Day\"\n+ if self.observed and date(year, DEC, 25).weekday() in WEEKEND:\n+ self[date(year, DEC, 25) + rd(weekday=MON)] = name + \" (Observed)\"\n+\n # St. Stephen's Day\n name = \"St. Stephen's Day\"\n self[date(year, DEC, 26)] = name\n if self.observed and date(year, DEC, 26).weekday() == SAT:\n- self[date(year, DEC, 28)] = name + \" (Observed)\"\n+ self[date(year, DEC, 26) + rd(weekday=MON)] = name + \" (Observed)\"\n elif self.observed and date(year, DEC, 26).weekday() == SUN:\n- self[date(year, DEC, 28)] = name + \" (Observed)\"\n+ self[date(year, DEC, 26) + rd(weekday=TUE)] = name + \" (Observed)\"\n \n \n class IE(Ireland):\n", "issue": "Ireland considering UK as base class and hence not being a country itself\nIssue also opened here:\r\nhome-assistant/core#67542\r\n\r\nLooks like Ireland is being considered as being part of the UK which is wrong as not all the holidays in the UK exist, or necessarily exist in Ireland.\r\n\r\nTake a reference on this comment: https://github.com/home-assistant/core/issues/67542#issuecomment-1058079650\n", "before_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd, MO\n\nfrom holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC\nfrom holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND\nfrom holidays.holiday_base import HolidayBase\nfrom .united_kingdom import UnitedKingdom\n\n\nclass Ireland(UnitedKingdom):\n country = \"IE\"\n\n def __init__(self, **kwargs):\n HolidayBase.__init__(self, **kwargs)\n\n def _country_specific(self, year):\n # Ireland exclusive holidays\n\n # St. Patrick's Day\n name = \"St. Patrick's Day\"\n self[date(year, MAR, 17)] = name\n if self.observed and date(year, MAR, 17).weekday() in WEEKEND:\n self[date(year, MAR, 17) + rd(weekday=MO)] = name + \" (Observed)\"\n\n # Easter Monday\n self[easter(year) + rd(weekday=MO)] = \"Easter Monday\"\n\n # May Day bank holiday (first Monday in May)\n if year >= 1978:\n name = \"May Day\"\n if year == 1995:\n dt = date(year, MAY, 8)\n else:\n dt = date(year, MAY, 1)\n if dt.weekday() == MON:\n self[dt] = name\n elif dt.weekday() == TUE:\n self[dt + rd(days=+6)] = name\n elif dt.weekday() == WED:\n self[dt + rd(days=+5)] = name\n elif dt.weekday() == THU:\n self[dt + rd(days=+4)] = name\n elif dt.weekday() == FRI:\n self[dt + rd(days=+3)] = name\n elif dt.weekday() == SAT:\n self[dt + rd(days=+2)] = name\n elif dt.weekday() == SUN:\n self[dt + rd(days=+1)] = name\n\n # June bank holiday (first Monday in June)\n self[date(year, JUN, 1) + rd(weekday=MO)] = \"June Bank Holiday\"\n\n # Summer bank holiday (first Monday in August)\n self[date(year, AUG, 1) + rd(weekday=MO)] = \"Summer Bank Holiday\"\n\n # October Bank Holiday (last Monday in October)\n self[date(year, OCT, 31) + rd(weekday=MO(-1))] = \"October Bank Holiday\"\n\n # St. Stephen's Day\n name = \"St. Stephen's Day\"\n self[date(year, DEC, 26)] = name\n if self.observed and date(year, DEC, 26).weekday() == SAT:\n self[date(year, DEC, 28)] = name + \" (Observed)\"\n elif self.observed and date(year, DEC, 26).weekday() == SUN:\n self[date(year, DEC, 28)] = name + \" (Observed)\"\n\n\nclass IE(Ireland):\n pass\n\n\nclass IRL(Ireland):\n pass\n", "path": "holidays/countries/ireland.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd, MO\n\nfrom holidays.constants import FEB, MAR, MAY, JUN, AUG, OCT, DEC\nfrom holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND\nfrom holidays.holiday_base import HolidayBase\n\nclass Ireland(HolidayBase):\n \"\"\"\n Official holidays in Ireland, as declared in the Citizen's Information\n bulletin:\n https://www.citizensinformation.ie/en/employment/employment_rights_and_conditions/leave_and_holidays/public_holidays_in_ireland.html\n \"\"\"\n\n country = \"IE\"\n subdivisions = []\n\n def __init__(self, **kwargs):\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n self[date(year, JAN, 1)] = \"New Year's Day\"\n\n # St. Brigid's Day\n if year >= 2023:\n dt = date(year, FEB, 1)\n self[dt] = \"St. Brigid's Day\"\n\n if self.observed and dt.weekday() != FRI:\n self[date(year, FEB, 1) + rd(weekday=MO)] = \"St. Brigid's Day (Observed)\"\n\n # One-off day of rememberance and recognition\n if year == 2022:\n self[date(year, MAR, 18)] = \"Day of Rememberance and Recognition\"\n\n # St. Patrick's Day\n name = \"St. Patrick's Day\"\n self[date(year, MAR, 17)] = name\n if self.observed and date(year, MAR, 17).weekday() in WEEKEND:\n self[date(year, MAR, 17) + rd(weekday=MO)] = name + \" (Observed)\"\n\n # Easter Monday\n self[easter(year) + rd(weekday=MO)] = \"Easter Monday\"\n\n # May bank holiday (first Monday in May)\n if year >= 1978:\n name = \"May Day\"\n if year == 1995:\n dt = date(year, MAY, 8)\n else:\n dt = date(year, MAY, 1)\n if dt.weekday() == MON:\n self[dt] = name\n elif dt.weekday() == TUE:\n self[dt + rd(days=+6)] = name\n elif dt.weekday() == WED:\n self[dt + rd(days=+5)] = name\n elif dt.weekday() == THU:\n self[dt + rd(days=+4)] = name\n elif dt.weekday() == FRI:\n self[dt + rd(days=+3)] = name\n elif dt.weekday() == SAT:\n self[dt + rd(days=+2)] = name\n elif dt.weekday() == SUN:\n self[dt + rd(days=+1)] = name\n\n # June bank holiday (first Monday in June)\n self[date(year, JUN, 1) + rd(weekday=MO)] = \"June Bank Holiday\"\n\n # Summer bank holiday (first Monday in August)\n self[date(year, AUG, 1) + rd(weekday=MO)] = \"August Bank Holiday\"\n\n # October Bank Holiday (last Monday in October)\n self[date(year, OCT, 31) + rd(weekday=MO(-1))] = \"October Bank Holiday\"\n\n # Christmas Day\n name = \"Christmas Day\"\n self[date(year, DEC, 25)] = \"Christmas Day\"\n if self.observed and date(year, DEC, 25).weekday() in WEEKEND:\n self[date(year, DEC, 25) + rd(weekday=MON)] = name + \" (Observed)\"\n\n # St. Stephen's Day\n name = \"St. Stephen's Day\"\n self[date(year, DEC, 26)] = name\n if self.observed and date(year, DEC, 26).weekday() == SAT:\n self[date(year, DEC, 26) + rd(weekday=MON)] = name + \" (Observed)\"\n elif self.observed and date(year, DEC, 26).weekday() == SUN:\n self[date(year, DEC, 26) + rd(weekday=TUE)] = name + \" (Observed)\"\n\n\nclass IE(Ireland):\n pass\n\n\nclass IRL(Ireland):\n pass\n", "path": "holidays/countries/ireland.py"}]} | 1,391 | 957 |
gh_patches_debug_6921 | rasdani/github-patches | git_diff | plotly__dash-2513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Exception when property of patched_fig is viewed
I know that it is currently not supported to view properties of `patch_fig=Patch()` but when e.g. iterating over trace names like so:
```
for trace in patched_fig['data']:
print(trace['name'])
```
no exception or error message is thrown but an endless stream of
```
...
<dash._patch.Patch object at 0x7f3b89a89b80>
<dash._patch.Patch object at 0x7f3b8305c0a0>
<dash._patch.Patch object at 0x7f3b89a89b80>
<dash._patch.Patch object at 0x7f3b8305c0a0>
<dash._patch.Patch object at 0x7f3b89a89b80>
<dash._patch.Patch object at 0x7f3b8305c0a0>
...
```
This is not exactly intended right?
I got there by trying to delete a trace of patched_fig by its name which otherwise appears not be possible (or is it?)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dash/_patch.py`
Content:
```
1 def _operation(name, location, **kwargs):
2 return {"operation": name, "location": location, "params": dict(**kwargs)}
3
4
5 _noop = object()
6
7
8 def validate_slice(obj):
9 if isinstance(obj, slice):
10 raise TypeError("a slice is not a valid index for patch")
11
12
13 class Patch:
14 """
15 Patch a callback output value
16
17 Act like a proxy of the output prop value on the frontend.
18
19 Supported prop types: Dictionaries and lists.
20 """
21
22 def __init__(self, location=None, parent=None):
23 if location is not None:
24 self._location = location
25 else:
26 # pylint: disable=consider-using-ternary
27 self._location = (parent and parent._location) or []
28 if parent is not None:
29 self._operations = parent._operations
30 else:
31 self._operations = []
32
33 def __getstate__(self):
34 return vars(self)
35
36 def __setstate__(self, state):
37 vars(self).update(state)
38
39 def __getitem__(self, item):
40 validate_slice(item)
41 return Patch(location=self._location + [item], parent=self)
42
43 def __getattr__(self, item):
44 if item == "tolist":
45 # to_json fix
46 raise AttributeError
47 if item == "_location":
48 return self._location
49 if item == "_operations":
50 return self._operations
51 return self.__getitem__(item)
52
53 def __setattr__(self, key, value):
54 if key in ("_location", "_operations"):
55 self.__dict__[key] = value
56 else:
57 self.__setitem__(key, value)
58
59 def __delattr__(self, item):
60 self.__delitem__(item)
61
62 def __setitem__(self, key, value):
63 validate_slice(key)
64 if value is _noop:
65 # The += set themselves.
66 return
67 self._operations.append(
68 _operation(
69 "Assign",
70 self._location + [key],
71 value=value,
72 )
73 )
74
75 def __delitem__(self, key):
76 validate_slice(key)
77 self._operations.append(_operation("Delete", self._location + [key]))
78
79 def __iadd__(self, other):
80 if isinstance(other, (list, tuple)):
81 self.extend(other)
82 else:
83 self._operations.append(_operation("Add", self._location, value=other))
84 return _noop
85
86 def __isub__(self, other):
87 self._operations.append(_operation("Sub", self._location, value=other))
88 return _noop
89
90 def __imul__(self, other):
91 self._operations.append(_operation("Mul", self._location, value=other))
92 return _noop
93
94 def __itruediv__(self, other):
95 self._operations.append(_operation("Div", self._location, value=other))
96 return _noop
97
98 def __ior__(self, other):
99 self.update(E=other)
100 return _noop
101
102 def append(self, item):
103 """Add the item to the end of a list"""
104 self._operations.append(_operation("Append", self._location, value=item))
105
106 def prepend(self, item):
107 """Add the item to the start of a list"""
108 self._operations.append(_operation("Prepend", self._location, value=item))
109
110 def insert(self, index, item):
111 """Add the item at the index of a list"""
112 self._operations.append(
113 _operation("Insert", self._location, value=item, index=index)
114 )
115
116 def clear(self):
117 """Remove all items in a list"""
118 self._operations.append(_operation("Clear", self._location))
119
120 def reverse(self):
121 """Reversal of the order of items in a list"""
122 self._operations.append(_operation("Reverse", self._location))
123
124 def extend(self, item):
125 """Add all the items to the end of a list"""
126 if not isinstance(item, (list, tuple)):
127 raise TypeError(f"{item} should be a list or tuple")
128 self._operations.append(_operation("Extend", self._location, value=item))
129
130 def remove(self, item):
131 """filter the item out of a list on the frontend"""
132 self._operations.append(_operation("Remove", self._location, value=item))
133
134 def update(self, E=None, **F):
135 """Merge a dict or keyword arguments with another dictionary"""
136 value = E or {}
137 value.update(F)
138 self._operations.append(_operation("Merge", self._location, value=value))
139
140 # pylint: disable=no-self-use
141 def sort(self):
142 raise KeyError(
143 "sort is reserved for future use, use brackets to access this key on your object"
144 )
145
146 def to_plotly_json(self):
147 return {
148 "__dash_patch_update": "__dash_patch_update",
149 "operations": self._operations,
150 }
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dash/_patch.py b/dash/_patch.py
--- a/dash/_patch.py
+++ b/dash/_patch.py
@@ -99,6 +99,12 @@
self.update(E=other)
return _noop
+ def __iter__(self):
+ raise TypeError("Patch objects are write-only, you cannot iterate them.")
+
+ def __repr__(self):
+ return f"<write-only dash.Patch object at {self._location}>"
+
def append(self, item):
"""Add the item to the end of a list"""
self._operations.append(_operation("Append", self._location, value=item))
| {"golden_diff": "diff --git a/dash/_patch.py b/dash/_patch.py\n--- a/dash/_patch.py\n+++ b/dash/_patch.py\n@@ -99,6 +99,12 @@\n self.update(E=other)\n return _noop\n \n+ def __iter__(self):\n+ raise TypeError(\"Patch objects are write-only, you cannot iterate them.\")\n+\n+ def __repr__(self):\n+ return f\"<write-only dash.Patch object at {self._location}>\"\n+\n def append(self, item):\n \"\"\"Add the item to the end of a list\"\"\"\n self._operations.append(_operation(\"Append\", self._location, value=item))\n", "issue": "[BUG] Exception when property of patched_fig is viewed\nI know that it is currently not supported to view properties of `patch_fig=Patch()` but when e.g. iterating over trace names like so:\r\n```\r\n for trace in patched_fig['data']:\r\n print(trace['name'])\r\n```\r\nno exception or error message is thrown but an endless stream of \r\n\r\n```\r\n...\r\n<dash._patch.Patch object at 0x7f3b89a89b80>\r\n<dash._patch.Patch object at 0x7f3b8305c0a0>\r\n<dash._patch.Patch object at 0x7f3b89a89b80>\r\n<dash._patch.Patch object at 0x7f3b8305c0a0>\r\n<dash._patch.Patch object at 0x7f3b89a89b80>\r\n<dash._patch.Patch object at 0x7f3b8305c0a0>\r\n...\r\n```\r\nThis is not exactly intended right?\r\n\r\nI got there by trying to delete a trace of patched_fig by its name which otherwise appears not be possible (or is it?)\r\n\n", "before_files": [{"content": "def _operation(name, location, **kwargs):\n return {\"operation\": name, \"location\": location, \"params\": dict(**kwargs)}\n\n\n_noop = object()\n\n\ndef validate_slice(obj):\n if isinstance(obj, slice):\n raise TypeError(\"a slice is not a valid index for patch\")\n\n\nclass Patch:\n \"\"\"\n Patch a callback output value\n\n Act like a proxy of the output prop value on the frontend.\n\n Supported prop types: Dictionaries and lists.\n \"\"\"\n\n def __init__(self, location=None, parent=None):\n if location is not None:\n self._location = location\n else:\n # pylint: disable=consider-using-ternary\n self._location = (parent and parent._location) or []\n if parent is not None:\n self._operations = parent._operations\n else:\n self._operations = []\n\n def __getstate__(self):\n return vars(self)\n\n def __setstate__(self, state):\n vars(self).update(state)\n\n def __getitem__(self, item):\n validate_slice(item)\n return Patch(location=self._location + [item], parent=self)\n\n def __getattr__(self, item):\n if item == \"tolist\":\n # to_json fix\n raise AttributeError\n if item == \"_location\":\n return self._location\n if item == \"_operations\":\n return self._operations\n return self.__getitem__(item)\n\n def __setattr__(self, key, value):\n if key in (\"_location\", \"_operations\"):\n self.__dict__[key] = value\n else:\n self.__setitem__(key, value)\n\n def __delattr__(self, item):\n self.__delitem__(item)\n\n def __setitem__(self, key, value):\n validate_slice(key)\n if value is _noop:\n # The += set themselves.\n return\n self._operations.append(\n _operation(\n \"Assign\",\n self._location + [key],\n value=value,\n )\n )\n\n def __delitem__(self, key):\n validate_slice(key)\n self._operations.append(_operation(\"Delete\", self._location + [key]))\n\n def __iadd__(self, other):\n if isinstance(other, (list, tuple)):\n self.extend(other)\n else:\n self._operations.append(_operation(\"Add\", self._location, value=other))\n return _noop\n\n def __isub__(self, other):\n self._operations.append(_operation(\"Sub\", self._location, value=other))\n return _noop\n\n def __imul__(self, other):\n self._operations.append(_operation(\"Mul\", self._location, value=other))\n return _noop\n\n def __itruediv__(self, other):\n self._operations.append(_operation(\"Div\", self._location, value=other))\n return _noop\n\n def __ior__(self, other):\n self.update(E=other)\n return _noop\n\n def append(self, item):\n \"\"\"Add the item to the end of a list\"\"\"\n self._operations.append(_operation(\"Append\", self._location, value=item))\n\n def prepend(self, item):\n \"\"\"Add the item to the start of a list\"\"\"\n self._operations.append(_operation(\"Prepend\", self._location, value=item))\n\n def insert(self, index, item):\n \"\"\"Add the item at the index of a list\"\"\"\n self._operations.append(\n _operation(\"Insert\", self._location, value=item, index=index)\n )\n\n def clear(self):\n \"\"\"Remove all items in a list\"\"\"\n self._operations.append(_operation(\"Clear\", self._location))\n\n def reverse(self):\n \"\"\"Reversal of the order of items in a list\"\"\"\n self._operations.append(_operation(\"Reverse\", self._location))\n\n def extend(self, item):\n \"\"\"Add all the items to the end of a list\"\"\"\n if not isinstance(item, (list, tuple)):\n raise TypeError(f\"{item} should be a list or tuple\")\n self._operations.append(_operation(\"Extend\", self._location, value=item))\n\n def remove(self, item):\n \"\"\"filter the item out of a list on the frontend\"\"\"\n self._operations.append(_operation(\"Remove\", self._location, value=item))\n\n def update(self, E=None, **F):\n \"\"\"Merge a dict or keyword arguments with another dictionary\"\"\"\n value = E or {}\n value.update(F)\n self._operations.append(_operation(\"Merge\", self._location, value=value))\n\n # pylint: disable=no-self-use\n def sort(self):\n raise KeyError(\n \"sort is reserved for future use, use brackets to access this key on your object\"\n )\n\n def to_plotly_json(self):\n return {\n \"__dash_patch_update\": \"__dash_patch_update\",\n \"operations\": self._operations,\n }\n", "path": "dash/_patch.py"}], "after_files": [{"content": "def _operation(name, location, **kwargs):\n return {\"operation\": name, \"location\": location, \"params\": dict(**kwargs)}\n\n\n_noop = object()\n\n\ndef validate_slice(obj):\n if isinstance(obj, slice):\n raise TypeError(\"a slice is not a valid index for patch\")\n\n\nclass Patch:\n \"\"\"\n Patch a callback output value\n\n Act like a proxy of the output prop value on the frontend.\n\n Supported prop types: Dictionaries and lists.\n \"\"\"\n\n def __init__(self, location=None, parent=None):\n if location is not None:\n self._location = location\n else:\n # pylint: disable=consider-using-ternary\n self._location = (parent and parent._location) or []\n if parent is not None:\n self._operations = parent._operations\n else:\n self._operations = []\n\n def __getstate__(self):\n return vars(self)\n\n def __setstate__(self, state):\n vars(self).update(state)\n\n def __getitem__(self, item):\n validate_slice(item)\n return Patch(location=self._location + [item], parent=self)\n\n def __getattr__(self, item):\n if item == \"tolist\":\n # to_json fix\n raise AttributeError\n if item == \"_location\":\n return self._location\n if item == \"_operations\":\n return self._operations\n return self.__getitem__(item)\n\n def __setattr__(self, key, value):\n if key in (\"_location\", \"_operations\"):\n self.__dict__[key] = value\n else:\n self.__setitem__(key, value)\n\n def __delattr__(self, item):\n self.__delitem__(item)\n\n def __setitem__(self, key, value):\n validate_slice(key)\n if value is _noop:\n # The += set themselves.\n return\n self._operations.append(\n _operation(\n \"Assign\",\n self._location + [key],\n value=value,\n )\n )\n\n def __delitem__(self, key):\n validate_slice(key)\n self._operations.append(_operation(\"Delete\", self._location + [key]))\n\n def __iadd__(self, other):\n if isinstance(other, (list, tuple)):\n self.extend(other)\n else:\n self._operations.append(_operation(\"Add\", self._location, value=other))\n return _noop\n\n def __isub__(self, other):\n self._operations.append(_operation(\"Sub\", self._location, value=other))\n return _noop\n\n def __imul__(self, other):\n self._operations.append(_operation(\"Mul\", self._location, value=other))\n return _noop\n\n def __itruediv__(self, other):\n self._operations.append(_operation(\"Div\", self._location, value=other))\n return _noop\n\n def __ior__(self, other):\n self.update(E=other)\n return _noop\n\n def __iter__(self):\n raise TypeError(\"Patch objects are write-only, you cannot iterate them.\")\n\n def __repr__(self):\n return f\"<write-only dash.Patch object at {self._location}>\"\n\n def append(self, item):\n \"\"\"Add the item to the end of a list\"\"\"\n self._operations.append(_operation(\"Append\", self._location, value=item))\n\n def prepend(self, item):\n \"\"\"Add the item to the start of a list\"\"\"\n self._operations.append(_operation(\"Prepend\", self._location, value=item))\n\n def insert(self, index, item):\n \"\"\"Add the item at the index of a list\"\"\"\n self._operations.append(\n _operation(\"Insert\", self._location, value=item, index=index)\n )\n\n def clear(self):\n \"\"\"Remove all items in a list\"\"\"\n self._operations.append(_operation(\"Clear\", self._location))\n\n def reverse(self):\n \"\"\"Reversal of the order of items in a list\"\"\"\n self._operations.append(_operation(\"Reverse\", self._location))\n\n def extend(self, item):\n \"\"\"Add all the items to the end of a list\"\"\"\n if not isinstance(item, (list, tuple)):\n raise TypeError(f\"{item} should be a list or tuple\")\n self._operations.append(_operation(\"Extend\", self._location, value=item))\n\n def remove(self, item):\n \"\"\"filter the item out of a list on the frontend\"\"\"\n self._operations.append(_operation(\"Remove\", self._location, value=item))\n\n def update(self, E=None, **F):\n \"\"\"Merge a dict or keyword arguments with another dictionary\"\"\"\n value = E or {}\n value.update(F)\n self._operations.append(_operation(\"Merge\", self._location, value=value))\n\n # pylint: disable=no-self-use\n def sort(self):\n raise KeyError(\n \"sort is reserved for future use, use brackets to access this key on your object\"\n )\n\n def to_plotly_json(self):\n return {\n \"__dash_patch_update\": \"__dash_patch_update\",\n \"operations\": self._operations,\n }\n", "path": "dash/_patch.py"}]} | 1,924 | 145 |
gh_patches_debug_28460 | rasdani/github-patches | git_diff | mindsdb__mindsdb-2678 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Fix scylladb error when connecting with secure bundle
When connecting with `secure_connect_bundle` users got unknown secure_connect_bundle path error.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/integrations/handlers/scylla_handler/scylla_handler.py`
Content:
```
1 import os
2 from mindsdb.integrations.libs.base_handler import DatabaseHandler
3 from mindsdb_sql import parse_sql
4 from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
5 from cassandra.cluster import Cluster
6 from cassandra.auth import PlainTextAuthProvider
7 from mindsdb.integrations.libs.response import (
8 HandlerStatusResponse as StatusResponse,
9 HandlerResponse as Response,
10 RESPONSE_TYPE
11 )
12 from mindsdb.utilities.log import log
13 import pandas as pd
14 from mindsdb_sql.parser.ast.base import ASTNode
15
16
17 class ScyllaHandler(DatabaseHandler):
18 """
19 This handler handles connection and execution of the Scylla statements.
20 """
21 name = 'scylla'
22
23 def __init__(self, name=None, **kwargs):
24 super().__init__(name)
25 self.parser = parse_sql
26 self.connection_args = kwargs.get('connection_data')
27 self.session = None
28 self.is_connected = False
29
30 def connect(self):
31 """
32 Handles the connection to a Scylla keystore.
33 """
34 if self.is_connected is True:
35 return self.session
36
37 auth_provider = PlainTextAuthProvider(
38 username=self.connection_args['user'], password=self.connection_args['password']
39 )
40
41 connection_props = {
42 'auth_provider': auth_provider
43 }
44
45 if self.connection_args['protocol_version'] is not None:
46 connection_props['protocol_version'] = self.connection_args['protocol_version']
47
48 secure_connect_bundle = self.connection_args.get('secure_connect_bundle')
49
50 if secure_connect_bundle is not None:
51 if os.path.isfile(self.secure_connect_bundle) is False:
52 raise Exception("Secure_connect_bundle' must be path to the file")
53 connection_props['cloud'] = {
54 'secure_connect_bundle': self.secure_connect_bundle
55 }
56 else:
57 connection_props['contact_points'] = [self.connection_args['host']]
58 connection_props['port'] = int(self.connection_args['port'])
59
60 cluster = Cluster(**connection_props)
61 session = cluster.connect(self.connection_args['keyspace'])
62
63 self.is_connected = True
64 self.session = session
65 return self.session
66
67 def check_connection(self) -> StatusResponse:
68 """
69 Check the connection of the Scylla database
70 :return: success status and error message if error occurs
71 """
72 response = StatusResponse(False)
73
74 try:
75 session = self.connect()
76 # TODO: change the healthcheck
77 session.execute('SELECT release_version FROM system.local').one()
78 response.success = True
79 except Exception as e:
80 log.error(f'Error connecting to Scylla {self.connection_args["keyspace"]}, {e}!')
81 response.error_message = e
82
83 if response.success is False and self.is_connected is True:
84 self.is_connected = False
85
86 return response
87
88 def native_query(self, query: str) -> Response:
89 """
90 Receive SQL query and runs it
91 :param query: The SQL query to run in MySQL
92 :return: returns the records from the current recordset
93 """
94 session = self.connect()
95 try:
96 resp = session.execute(query).all()
97 if resp:
98 response = Response(
99 RESPONSE_TYPE.TABLE,
100 pd.DataFrame(
101 resp
102 )
103 )
104 else:
105 response = Response(RESPONSE_TYPE.OK)
106 except Exception as e:
107 log.error(f'Error running query: {query} on {self.connection_args["keyspace"]}!')
108 response = Response(
109 RESPONSE_TYPE.ERROR,
110 error_message=str(e)
111 )
112 return response
113
114 def query(self, query: ASTNode) -> Response:
115 """
116 Retrieve the data from the SQL statement.
117 """
118 renderer = SqlalchemyRender('mysql')
119 query_str = renderer.get_string(query, with_failback=True)
120 return self.native_query(query_str)
121
122 def get_tables(self) -> Response:
123 """
124 Get a list with all of the tabels in MySQL
125 """
126 q = "DESCRIBE TABLES;"
127 result = self.native_query(q)
128 df = result.data_frame
129 result.data_frame = df.rename(columns={df.columns[0]: 'table_name'})
130 return result
131
132 def get_columns(self, table_name) -> Response:
133 """
134 Show details about the table
135 """
136 q = f"DESCRIBE {table_name};"
137 result = self.native_query(q)
138 return result
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py
--- a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py
+++ b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py
@@ -41,24 +41,21 @@
connection_props = {
'auth_provider': auth_provider
}
-
- if self.connection_args['protocol_version'] is not None:
- connection_props['protocol_version'] = self.connection_args['protocol_version']
-
+ connection_props['protocol_version'] = self.connection_args.get('protocol_version', 4)
secure_connect_bundle = self.connection_args.get('secure_connect_bundle')
if secure_connect_bundle is not None:
- if os.path.isfile(self.secure_connect_bundle) is False:
+ if os.path.isfile(secure_connect_bundle) is False:
raise Exception("Secure_connect_bundle' must be path to the file")
connection_props['cloud'] = {
- 'secure_connect_bundle': self.secure_connect_bundle
+ 'secure_connect_bundle': secure_connect_bundle
}
else:
connection_props['contact_points'] = [self.connection_args['host']]
connection_props['port'] = int(self.connection_args['port'])
cluster = Cluster(**connection_props)
- session = cluster.connect(self.connection_args['keyspace'])
+ session = cluster.connect(self.connection_args.get('keyspace'))
self.is_connected = True
self.session = session
| {"golden_diff": "diff --git a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py\n--- a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py\n+++ b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py\n@@ -41,24 +41,21 @@\n connection_props = {\n 'auth_provider': auth_provider\n }\n-\n- if self.connection_args['protocol_version'] is not None:\n- connection_props['protocol_version'] = self.connection_args['protocol_version']\n- \n+ connection_props['protocol_version'] = self.connection_args.get('protocol_version', 4)\n secure_connect_bundle = self.connection_args.get('secure_connect_bundle')\n \n if secure_connect_bundle is not None:\n- if os.path.isfile(self.secure_connect_bundle) is False:\n+ if os.path.isfile(secure_connect_bundle) is False:\n raise Exception(\"Secure_connect_bundle' must be path to the file\")\n connection_props['cloud'] = {\n- 'secure_connect_bundle': self.secure_connect_bundle\n+ 'secure_connect_bundle': secure_connect_bundle\n }\n else:\n connection_props['contact_points'] = [self.connection_args['host']]\n connection_props['port'] = int(self.connection_args['port'])\n \n cluster = Cluster(**connection_props)\n- session = cluster.connect(self.connection_args['keyspace'])\n+ session = cluster.connect(self.connection_args.get('keyspace'))\n \n self.is_connected = True\n self.session = session\n", "issue": "[BUG] Fix scylladb error when connecting with secure bundle\nWhen connecting with `secure_connect_bundle` users got unknown secure_connect_bundle path error.\n", "before_files": [{"content": "import os\nfrom mindsdb.integrations.libs.base_handler import DatabaseHandler\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\nfrom cassandra.cluster import Cluster\nfrom cassandra.auth import PlainTextAuthProvider\nfrom mindsdb.integrations.libs.response import (\n HandlerStatusResponse as StatusResponse,\n HandlerResponse as Response,\n RESPONSE_TYPE\n)\nfrom mindsdb.utilities.log import log\nimport pandas as pd\nfrom mindsdb_sql.parser.ast.base import ASTNode\n\n\nclass ScyllaHandler(DatabaseHandler):\n \"\"\"\n This handler handles connection and execution of the Scylla statements.\n \"\"\"\n name = 'scylla'\n\n def __init__(self, name=None, **kwargs):\n super().__init__(name)\n self.parser = parse_sql\n self.connection_args = kwargs.get('connection_data')\n self.session = None\n self.is_connected = False\n\n def connect(self):\n \"\"\"\n Handles the connection to a Scylla keystore.\n \"\"\"\n if self.is_connected is True:\n return self.session\n\n auth_provider = PlainTextAuthProvider(\n username=self.connection_args['user'], password=self.connection_args['password']\n )\n\n connection_props = {\n 'auth_provider': auth_provider\n }\n\n if self.connection_args['protocol_version'] is not None:\n connection_props['protocol_version'] = self.connection_args['protocol_version']\n \n secure_connect_bundle = self.connection_args.get('secure_connect_bundle')\n\n if secure_connect_bundle is not None:\n if os.path.isfile(self.secure_connect_bundle) is False:\n raise Exception(\"Secure_connect_bundle' must be path to the file\")\n connection_props['cloud'] = {\n 'secure_connect_bundle': self.secure_connect_bundle\n }\n else:\n connection_props['contact_points'] = [self.connection_args['host']]\n connection_props['port'] = int(self.connection_args['port'])\n\n cluster = Cluster(**connection_props)\n session = cluster.connect(self.connection_args['keyspace'])\n\n self.is_connected = True\n self.session = session\n return self.session\n\n def check_connection(self) -> StatusResponse:\n \"\"\"\n Check the connection of the Scylla database\n :return: success status and error message if error occurs\n \"\"\"\n response = StatusResponse(False)\n\n try:\n session = self.connect()\n # TODO: change the healthcheck\n session.execute('SELECT release_version FROM system.local').one()\n response.success = True\n except Exception as e:\n log.error(f'Error connecting to Scylla {self.connection_args[\"keyspace\"]}, {e}!')\n response.error_message = e\n\n if response.success is False and self.is_connected is True:\n self.is_connected = False\n\n return response\n\n def native_query(self, query: str) -> Response:\n \"\"\"\n Receive SQL query and runs it\n :param query: The SQL query to run in MySQL\n :return: returns the records from the current recordset\n \"\"\"\n session = self.connect()\n try:\n resp = session.execute(query).all()\n if resp:\n response = Response(\n RESPONSE_TYPE.TABLE,\n pd.DataFrame(\n resp\n )\n )\n else:\n response = Response(RESPONSE_TYPE.OK)\n except Exception as e:\n log.error(f'Error running query: {query} on {self.connection_args[\"keyspace\"]}!')\n response = Response(\n RESPONSE_TYPE.ERROR,\n error_message=str(e)\n )\n return response\n\n def query(self, query: ASTNode) -> Response:\n \"\"\"\n Retrieve the data from the SQL statement.\n \"\"\"\n renderer = SqlalchemyRender('mysql')\n query_str = renderer.get_string(query, with_failback=True)\n return self.native_query(query_str)\n\n def get_tables(self) -> Response:\n \"\"\"\n Get a list with all of the tabels in MySQL\n \"\"\"\n q = \"DESCRIBE TABLES;\"\n result = self.native_query(q)\n df = result.data_frame\n result.data_frame = df.rename(columns={df.columns[0]: 'table_name'})\n return result\n\n def get_columns(self, table_name) -> Response:\n \"\"\"\n Show details about the table\n \"\"\"\n q = f\"DESCRIBE {table_name};\"\n result = self.native_query(q)\n return result\n", "path": "mindsdb/integrations/handlers/scylla_handler/scylla_handler.py"}], "after_files": [{"content": "import os\nfrom mindsdb.integrations.libs.base_handler import DatabaseHandler\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\nfrom cassandra.cluster import Cluster\nfrom cassandra.auth import PlainTextAuthProvider\nfrom mindsdb.integrations.libs.response import (\n HandlerStatusResponse as StatusResponse,\n HandlerResponse as Response,\n RESPONSE_TYPE\n)\nfrom mindsdb.utilities.log import log\nimport pandas as pd\nfrom mindsdb_sql.parser.ast.base import ASTNode\n\n\nclass ScyllaHandler(DatabaseHandler):\n \"\"\"\n This handler handles connection and execution of the Scylla statements.\n \"\"\"\n name = 'scylla'\n\n def __init__(self, name=None, **kwargs):\n super().__init__(name)\n self.parser = parse_sql\n self.connection_args = kwargs.get('connection_data')\n self.session = None\n self.is_connected = False\n\n def connect(self):\n \"\"\"\n Handles the connection to a Scylla keystore.\n \"\"\"\n if self.is_connected is True:\n return self.session\n\n auth_provider = PlainTextAuthProvider(\n username=self.connection_args['user'], password=self.connection_args['password']\n )\n\n connection_props = {\n 'auth_provider': auth_provider\n }\n connection_props['protocol_version'] = self.connection_args.get('protocol_version', 4)\n secure_connect_bundle = self.connection_args.get('secure_connect_bundle')\n\n if secure_connect_bundle is not None:\n if os.path.isfile(secure_connect_bundle) is False:\n raise Exception(\"Secure_connect_bundle' must be path to the file\")\n connection_props['cloud'] = {\n 'secure_connect_bundle': secure_connect_bundle\n }\n else:\n connection_props['contact_points'] = [self.connection_args['host']]\n connection_props['port'] = int(self.connection_args['port'])\n\n cluster = Cluster(**connection_props)\n session = cluster.connect(self.connection_args.get('keyspace'))\n\n self.is_connected = True\n self.session = session\n return self.session\n\n def check_connection(self) -> StatusResponse:\n \"\"\"\n Check the connection of the Scylla database\n :return: success status and error message if error occurs\n \"\"\"\n response = StatusResponse(False)\n\n try:\n session = self.connect()\n # TODO: change the healthcheck\n session.execute('SELECT release_version FROM system.local').one()\n response.success = True\n except Exception as e:\n log.error(f'Error connecting to Scylla {self.connection_args[\"keyspace\"]}, {e}!')\n response.error_message = e\n\n if response.success is False and self.is_connected is True:\n self.is_connected = False\n\n return response\n\n def native_query(self, query: str) -> Response:\n \"\"\"\n Receive SQL query and runs it\n :param query: The SQL query to run in MySQL\n :return: returns the records from the current recordset\n \"\"\"\n session = self.connect()\n try:\n resp = session.execute(query).all()\n if resp:\n response = Response(\n RESPONSE_TYPE.TABLE,\n pd.DataFrame(\n resp\n )\n )\n else:\n response = Response(RESPONSE_TYPE.OK)\n except Exception as e:\n log.error(f'Error running query: {query} on {self.connection_args[\"keyspace\"]}!')\n response = Response(\n RESPONSE_TYPE.ERROR,\n error_message=str(e)\n )\n return response\n\n def query(self, query: ASTNode) -> Response:\n \"\"\"\n Retrieve the data from the SQL statement.\n \"\"\"\n renderer = SqlalchemyRender('mysql')\n query_str = renderer.get_string(query, with_failback=True)\n return self.native_query(query_str)\n\n def get_tables(self) -> Response:\n \"\"\"\n Get a list with all of the tabels in MySQL\n \"\"\"\n q = \"DESCRIBE TABLES;\"\n result = self.native_query(q)\n df = result.data_frame\n result.data_frame = df.rename(columns={df.columns[0]: 'table_name'})\n return result\n\n def get_columns(self, table_name) -> Response:\n \"\"\"\n Show details about the table\n \"\"\"\n q = f\"DESCRIBE {table_name};\"\n result = self.native_query(q)\n return result\n", "path": "mindsdb/integrations/handlers/scylla_handler/scylla_handler.py"}]} | 1,546 | 350 |
gh_patches_debug_11687 | rasdani/github-patches | git_diff | pypa__setuptools-2907 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`distutils` submodules being loaded from the stdlib
It seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).
_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_
`distutils` submodules being loaded from the stdlib
It seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).
_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `_distutils_hack/__init__.py`
Content:
```
1 import sys
2 import os
3 import re
4 import importlib
5 import warnings
6
7
8 is_pypy = '__pypy__' in sys.builtin_module_names
9
10
11 warnings.filterwarnings('ignore',
12 r'.+ distutils\b.+ deprecated',
13 DeprecationWarning)
14
15
16 def warn_distutils_present():
17 if 'distutils' not in sys.modules:
18 return
19 if is_pypy and sys.version_info < (3, 7):
20 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
21 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
22 return
23 warnings.warn(
24 "Distutils was imported before Setuptools, but importing Setuptools "
25 "also replaces the `distutils` module in `sys.modules`. This may lead "
26 "to undesirable behaviors or errors. To avoid these issues, avoid "
27 "using distutils directly, ensure that setuptools is installed in the "
28 "traditional way (e.g. not an editable install), and/or make sure "
29 "that setuptools is always imported before distutils.")
30
31
32 def clear_distutils():
33 if 'distutils' not in sys.modules:
34 return
35 warnings.warn("Setuptools is replacing distutils.")
36 mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
37 for name in mods:
38 del sys.modules[name]
39
40
41 def enabled():
42 """
43 Allow selection of distutils by environment variable.
44 """
45 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
46 return which == 'local'
47
48
49 def ensure_local_distutils():
50 clear_distutils()
51 distutils = importlib.import_module('setuptools._distutils')
52 distutils.__name__ = 'distutils'
53 sys.modules['distutils'] = distutils
54
55 # sanity check that submodules load as expected
56 core = importlib.import_module('distutils.core')
57 assert '_distutils' in core.__file__, core.__file__
58
59
60 def do_override():
61 """
62 Ensure that the local copy of distutils is preferred over stdlib.
63
64 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
65 for more motivation.
66 """
67 if enabled():
68 warn_distutils_present()
69 ensure_local_distutils()
70
71
72 class DistutilsMetaFinder:
73 def find_spec(self, fullname, path, target=None):
74 if path is not None:
75 return
76
77 method_name = 'spec_for_{fullname}'.format(**locals())
78 method = getattr(self, method_name, lambda: None)
79 return method()
80
81 def spec_for_distutils(self):
82 import importlib.abc
83 import importlib.util
84
85 class DistutilsLoader(importlib.abc.Loader):
86
87 def create_module(self, spec):
88 return importlib.import_module('setuptools._distutils')
89
90 def exec_module(self, module):
91 pass
92
93 return importlib.util.spec_from_loader('distutils', DistutilsLoader())
94
95 def spec_for_pip(self):
96 """
97 Ensure stdlib distutils when running under pip.
98 See pypa/pip#8761 for rationale.
99 """
100 if self.pip_imported_during_build():
101 return
102 clear_distutils()
103 self.spec_for_distutils = lambda: None
104
105 @staticmethod
106 def pip_imported_during_build():
107 """
108 Detect if pip is being imported in a build script. Ref #2355.
109 """
110 import traceback
111 return any(
112 frame.f_globals['__file__'].endswith('setup.py')
113 for frame, line in traceback.walk_stack(None)
114 )
115
116
117 DISTUTILS_FINDER = DistutilsMetaFinder()
118
119
120 def add_shim():
121 sys.meta_path.insert(0, DISTUTILS_FINDER)
122
123
124 def remove_shim():
125 try:
126 sys.meta_path.remove(DISTUTILS_FINDER)
127 except ValueError:
128 pass
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -48,11 +48,15 @@
def ensure_local_distutils():
clear_distutils()
- distutils = importlib.import_module('setuptools._distutils')
- distutils.__name__ = 'distutils'
- sys.modules['distutils'] = distutils
- # sanity check that submodules load as expected
+ # With the DistutilsMetaFinder in place,
+ # perform an import to cause distutils to be
+ # loaded from setuptools._distutils. Ref #2906.
+ add_shim()
+ importlib.import_module('distutils')
+ remove_shim()
+
+ # check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
| {"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -48,11 +48,15 @@\n \n def ensure_local_distutils():\n clear_distutils()\n- distutils = importlib.import_module('setuptools._distutils')\n- distutils.__name__ = 'distutils'\n- sys.modules['distutils'] = distutils\n \n- # sanity check that submodules load as expected\n+ # With the DistutilsMetaFinder in place,\n+ # perform an import to cause distutils to be\n+ # loaded from setuptools._distutils. Ref #2906.\n+ add_shim()\n+ importlib.import_module('distutils')\n+ remove_shim()\n+\n+ # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n", "issue": "`distutils` submodules being loaded from the stdlib\nIt seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).\r\n\r\n_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_\n`distutils` submodules being loaded from the stdlib\nIt seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).\r\n\r\n_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_\n", "before_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\nwarnings.filterwarnings('ignore',\n r'.+ distutils\\b.+ deprecated',\n DeprecationWarning)\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @staticmethod\n def pip_imported_during_build():\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n frame.f_globals['__file__'].endswith('setup.py')\n for frame, line in traceback.walk_stack(None)\n )\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}], "after_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\nwarnings.filterwarnings('ignore',\n r'.+ distutils\\b.+ deprecated',\n DeprecationWarning)\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n\n # With the DistutilsMetaFinder in place,\n # perform an import to cause distutils to be\n # loaded from setuptools._distutils. Ref #2906.\n add_shim()\n importlib.import_module('distutils')\n remove_shim()\n\n # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @staticmethod\n def pip_imported_during_build():\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n frame.f_globals['__file__'].endswith('setup.py')\n for frame, line in traceback.walk_stack(None)\n )\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}]} | 1,757 | 229 |
gh_patches_debug_64121 | rasdani/github-patches | git_diff | plotly__dash-333 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The README is in markdown and doesn't render properly on pypi.io
See: https://pypi.org/project/dash/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import io
2 from setuptools import setup, find_packages
3
4 main_ns = {}
5 exec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used
6
7 setup(
8 name='dash',
9 version=main_ns['__version__'],
10 author='chris p',
11 author_email='[email protected]',
12 packages=find_packages(exclude=['tests*']),
13 license='MIT',
14 description=('A Python framework for building reactive web-apps. '
15 'Developed by Plotly.'),
16 long_description=io.open('README.md', encoding='utf-8').read(),
17 install_requires=[
18 'Flask>=0.12',
19 'flask-compress',
20 'plotly',
21 'dash_renderer',
22 ],
23 url='https://plot.ly/dash',
24 classifiers=[
25 'Development Status :: 5 - Production/Stable',
26 'Environment :: Web Environment',
27 'Framework :: Flask',
28 'Intended Audience :: Developers',
29 'Intended Audience :: Education',
30 'Intended Audience :: Financial and Insurance Industry',
31 'Intended Audience :: Healthcare Industry',
32 'Intended Audience :: Manufacturing',
33 'Intended Audience :: Science/Research',
34 'License :: OSI Approved :: MIT License',
35 'Programming Language :: Python :: 2.7',
36 'Programming Language :: Python :: 3.3',
37 'Programming Language :: Python :: 3.4',
38 'Programming Language :: Python :: 3.5',
39 'Programming Language :: Python :: 3.6',
40 'Topic :: Database :: Front-Ends',
41 'Topic :: Office/Business :: Financial :: Spreadsheet',
42 'Topic :: Scientific/Engineering :: Visualization',
43 'Topic :: Software Development :: Libraries :: Application Frameworks',
44 'Topic :: Software Development :: Widget Sets'
45 ]
46 )
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,6 +14,7 @@
description=('A Python framework for building reactive web-apps. '
'Developed by Plotly.'),
long_description=io.open('README.md', encoding='utf-8').read(),
+ long_description_content_type='text/markdown',
install_requires=[
'Flask>=0.12',
'flask-compress',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,6 +14,7 @@\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n+ long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n", "issue": "The README is in markdown and doesn't render properly on pypi.io\nSee: https://pypi.org/project/dash/\r\n\n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer',\n ],\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer',\n ],\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}]} | 749 | 104 |
gh_patches_debug_1331 | rasdani/github-patches | git_diff | litestar-org__litestar-1773 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/dto/exceptions.py`
Content:
```
1 from __future__ import annotations
2
3 from litestar.exceptions import ImproperlyConfiguredException
4
5 __all__ = ("DTOException", "UnsupportedType")
6
7
8 class DTOException(ImproperlyConfiguredException):
9 """Base exception for DTO errors."""
10
11
12 class UnsupportedType(DTOException):
13 """Raised when a type is not supported by Litestar."""
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/dto/exceptions.py b/litestar/dto/exceptions.py
deleted file mode 100644
--- a/litestar/dto/exceptions.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from __future__ import annotations
-
-from litestar.exceptions import ImproperlyConfiguredException
-
-__all__ = ("DTOException", "UnsupportedType")
-
-
-class DTOException(ImproperlyConfiguredException):
- """Base exception for DTO errors."""
-
-
-class UnsupportedType(DTOException):
- """Raised when a type is not supported by Litestar."""
| {"golden_diff": "diff --git a/litestar/dto/exceptions.py b/litestar/dto/exceptions.py\ndeleted file mode 100644\n--- a/litestar/dto/exceptions.py\n+++ /dev/null\n@@ -1,13 +0,0 @@\n-from __future__ import annotations\n-\n-from litestar.exceptions import ImproperlyConfiguredException\n-\n-__all__ = (\"DTOException\", \"UnsupportedType\")\n-\n-\n-class DTOException(ImproperlyConfiguredException):\n- \"\"\"Base exception for DTO errors.\"\"\"\n-\n-\n-class UnsupportedType(DTOException):\n- \"\"\"Raised when a type is not supported by Litestar.\"\"\"\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom litestar.exceptions import ImproperlyConfiguredException\n\n__all__ = (\"DTOException\", \"UnsupportedType\")\n\n\nclass DTOException(ImproperlyConfiguredException):\n \"\"\"Base exception for DTO errors.\"\"\"\n\n\nclass UnsupportedType(DTOException):\n \"\"\"Raised when a type is not supported by Litestar.\"\"\"\n", "path": "litestar/dto/exceptions.py"}], "after_files": [{"content": null, "path": "litestar/dto/exceptions.py"}]} | 523 | 139 |
gh_patches_debug_47653 | rasdani/github-patches | git_diff | DataBiosphere__toil-4528 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WES ignores host in production
When trying to run `toil server --host 0.0.0.0`, I noticed that it would always only listen on `127.0.0.1` no matter what `--host` is set to but running with `--debug` didn't have this problem.
```
❯ toil server --host 0.0.0.0
...
[2022-11-11 16:50:46 +0000] [7173] [INFO] Starting gunicorn 20.1.0
[2022-11-11 16:50:46 +0000] [7173] [INFO] Listening at: http://127.0.0.1:8000
...
```
vs
```
❯ toil server --host 0.0.0.0 --debug
...
INFO:werkzeug:WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
* Running on all addresses (0.0.0.0)
* Running on http://127.0.0.1:8080
...
```
I tracked the problem down to [this line](https://github.com/DataBiosphere/toil/blob/master/src/toil/server/wsgi_app.py#L44). It appears to be overwriting the settings taken from the command line with Gunicorn's defaults before checking to see if anything has been set which `bind` won't be as it's been set to `None` in the merge.
Swapping the dictionaries around seems to have fixed it.
```python
for key, value in {**vars(env_args), **self.options}.items():
```
┆Issue is synchronized with this [Jira Story](https://ucsc-cgl.atlassian.net/browse/TOIL-1242)
┆Issue Number: TOIL-1242
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/toil/server/wsgi_app.py`
Content:
```
1 # Copyright (C) 2015-2021 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Dict, Optional
15
16 from gunicorn.app.base import BaseApplication # type: ignore
17
18
19 class GunicornApplication(BaseApplication): # type: ignore
20 """
21 An entry point to integrate a Gunicorn WSGI server in Python. To start a
22 WSGI application with callable `app`, run the following code:
23
24 WSGIApplication(app, options={
25 ...
26 }).run()
27
28 For more details, see: https://docs.gunicorn.org/en/latest/custom.html
29 """
30 def __init__(self, app: object, options: Optional[Dict[str, Any]] = None):
31 self.options = options or {}
32 self.application = app
33 super().__init__()
34
35 def init(self, *args: Any) -> None:
36 pass
37
38 def load_config(self) -> None:
39 parser = self.cfg.parser()
40 env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())
41
42 # TODO: also read from the Gunicorn config file?
43
44 for key, value in {**self.options, **vars(env_args)}.items():
45 if key in self.cfg.settings and value is not None:
46 self.cfg.set(key.lower(), value)
47
48 def load(self) -> object:
49 return self.application
50
51
52 def run_app(app: object, options: Optional[Dict[str, Any]] = None) -> None:
53 """
54 Run a Gunicorn WSGI server.
55 """
56 GunicornApplication(app, options=options).run()
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/toil/server/wsgi_app.py b/src/toil/server/wsgi_app.py
--- a/src/toil/server/wsgi_app.py
+++ b/src/toil/server/wsgi_app.py
@@ -41,7 +41,7 @@
# TODO: also read from the Gunicorn config file?
- for key, value in {**self.options, **vars(env_args)}.items():
+ for key, value in {**vars(env_args), **self.options}.items():
if key in self.cfg.settings and value is not None:
self.cfg.set(key.lower(), value)
| {"golden_diff": "diff --git a/src/toil/server/wsgi_app.py b/src/toil/server/wsgi_app.py\n--- a/src/toil/server/wsgi_app.py\n+++ b/src/toil/server/wsgi_app.py\n@@ -41,7 +41,7 @@\n \n # TODO: also read from the Gunicorn config file?\n \n- for key, value in {**self.options, **vars(env_args)}.items():\n+ for key, value in {**vars(env_args), **self.options}.items():\n if key in self.cfg.settings and value is not None:\n self.cfg.set(key.lower(), value)\n", "issue": "WES ignores host in production\nWhen trying to run `toil server --host 0.0.0.0`, I noticed that it would always only listen on `127.0.0.1` no matter what `--host` is set to but running with `--debug` didn't have this problem.\n\n```\n\u276f toil server --host 0.0.0.0\n...\n[2022-11-11 16:50:46 +0000] [7173] [INFO] Starting gunicorn 20.1.0\n[2022-11-11 16:50:46 +0000] [7173] [INFO] Listening at: http://127.0.0.1:8000\n...\n```\nvs\n```\n\u276f toil server --host 0.0.0.0 --debug\n...\nINFO:werkzeug:WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.\n * Running on all addresses (0.0.0.0)\n * Running on http://127.0.0.1:8080\n...\n```\n\nI tracked the problem down to [this line](https://github.com/DataBiosphere/toil/blob/master/src/toil/server/wsgi_app.py#L44). It appears to be overwriting the settings taken from the command line with Gunicorn's defaults before checking to see if anything has been set which `bind` won't be as it's been set to `None` in the merge.\n\nSwapping the dictionaries around seems to have fixed it.\n```python\n for key, value in {**vars(env_args), **self.options}.items():\n```\n\n\u2506Issue is synchronized with this [Jira Story](https://ucsc-cgl.atlassian.net/browse/TOIL-1242)\n\u2506Issue Number: TOIL-1242\n\n", "before_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional\n\nfrom gunicorn.app.base import BaseApplication # type: ignore\n\n\nclass GunicornApplication(BaseApplication): # type: ignore\n \"\"\"\n An entry point to integrate a Gunicorn WSGI server in Python. To start a\n WSGI application with callable `app`, run the following code:\n\n WSGIApplication(app, options={\n ...\n }).run()\n\n For more details, see: https://docs.gunicorn.org/en/latest/custom.html\n \"\"\"\n def __init__(self, app: object, options: Optional[Dict[str, Any]] = None):\n self.options = options or {}\n self.application = app\n super().__init__()\n\n def init(self, *args: Any) -> None:\n pass\n\n def load_config(self) -> None:\n parser = self.cfg.parser()\n env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())\n\n # TODO: also read from the Gunicorn config file?\n\n for key, value in {**self.options, **vars(env_args)}.items():\n if key in self.cfg.settings and value is not None:\n self.cfg.set(key.lower(), value)\n\n def load(self) -> object:\n return self.application\n\n\ndef run_app(app: object, options: Optional[Dict[str, Any]] = None) -> None:\n \"\"\"\n Run a Gunicorn WSGI server.\n \"\"\"\n GunicornApplication(app, options=options).run()\n", "path": "src/toil/server/wsgi_app.py"}], "after_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional\n\nfrom gunicorn.app.base import BaseApplication # type: ignore\n\n\nclass GunicornApplication(BaseApplication): # type: ignore\n \"\"\"\n An entry point to integrate a Gunicorn WSGI server in Python. To start a\n WSGI application with callable `app`, run the following code:\n\n WSGIApplication(app, options={\n ...\n }).run()\n\n For more details, see: https://docs.gunicorn.org/en/latest/custom.html\n \"\"\"\n def __init__(self, app: object, options: Optional[Dict[str, Any]] = None):\n self.options = options or {}\n self.application = app\n super().__init__()\n\n def init(self, *args: Any) -> None:\n pass\n\n def load_config(self) -> None:\n parser = self.cfg.parser()\n env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())\n\n # TODO: also read from the Gunicorn config file?\n\n for key, value in {**vars(env_args), **self.options}.items():\n if key in self.cfg.settings and value is not None:\n self.cfg.set(key.lower(), value)\n\n def load(self) -> object:\n return self.application\n\n\ndef run_app(app: object, options: Optional[Dict[str, Any]] = None) -> None:\n \"\"\"\n Run a Gunicorn WSGI server.\n \"\"\"\n GunicornApplication(app, options=options).run()\n", "path": "src/toil/server/wsgi_app.py"}]} | 1,271 | 131 |
gh_patches_debug_39354 | rasdani/github-patches | git_diff | marshmallow-code__webargs-509 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'Not a valid tuple.' when trying to use marshmallow fields.Tuple for argument validation
I'm trying to use the marshmallow fields.Tuple for querystring argument validation on a GET request using Flask. The issue I'm running into is that no matter what type of object I declare and no matter what I use in the request, I always get the default 'Not a valid tuple.' response. I have tried using a tuple of size 1 and 2; using fields.String and/or fields.Integer, etc with the same result.
- I'm using Python 3.6.9 with these dependencies:
anyjson==0.3.3
apipkg==1.5
arrow==0.15.5
attrs==19.3.0
backports.functools-lru-cache==1.6.1
cassandra-driver==3.22.0
Cerberus==1.3.2
certifi==2019.11.28
cffi==1.13.2
chardet==3.0.4
click==7.1.1
execnet==1.7.1
Flask==1.1.1
Flask-Cors==3.0.8
funcsigs==1.0.2
futures==3.1.1
geomet==0.1.2
gevent==1.4.0
greenlet==0.4.13
gunicorn==20.0.4
idna==2.9
importlib-metadata==1.6.0
itsdangerous==1.1.0
Jinja2==2.11.1
jsonklog==0.15.0
MarkupSafe==1.1.1
marshmallow==3.5.1
neurolab==0.3.5
numpy==1.18.1
pluggy==0.13.1
py==1.8.1
pyaml==20.3.1
pymongo==3.10.1
pytest==3.3.0
pytest-forked==0.2
pytest-xdist==1.20.1
python-dateutil==2.8.1
PyYAML==5.3.1
readline==6.2.4.1
requests==2.23.0
six==1.14.0
urllib3==1.25.8
webargs==6.0.0
Werkzeug==1.0.0
zipp==3.1.0
- Here is an example of what I'm trying to do:
```
from flask import Flask
from webargs.flaskparser import parser, use_kwargs
from marshmallow import EXCLUDE, fields, Schema
app = Flask(__name__)
@app.errorhandler(422)
def custom_handler(error):
errors = []
if 'query' in error.data['messages']:
for arg in error.data['messages']['query']:
for item in error.data['messages']['query'][arg]:
errors.append(item)
return str(errors), 400
class test_schema(Schema):
class Meta:
unknown = EXCLUDE
strict = True
test_tup = fields.Tuple((fields.Integer(required=True), fields.Integer(required=True)), required=True)
@app.route('/test/', strict_slashes=False)
@parser.use_kwargs(test_schema, location='query')
def test_the_mallow(**kwargs):
return "True"
```
- Finally, here are a couple example url's I've tried:
localhost:2300/test/?test_tup=[0,0]
localhost:2300/test/?test_tup=(0,0)
localhost:2300/test/?test_tup=0,0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webargs/fields.py`
Content:
```
1 """Field classes.
2
3 Includes all fields from `marshmallow.fields` in addition to a custom
4 `Nested` field and `DelimitedList`.
5
6 All fields can optionally take a special `location` keyword argument, which
7 tells webargs where to parse the request argument from.
8
9 .. code-block:: python
10
11 args = {
12 "active": fields.Bool(location="query"),
13 "content_type": fields.Str(data_key="Content-Type", location="headers"),
14 }
15
16 Note: `data_key` replaced `load_from` in marshmallow 3.
17 When using marshmallow 2, use `load_from`.
18 """
19 import marshmallow as ma
20
21 # Expose all fields from marshmallow.fields.
22 from marshmallow.fields import * # noqa: F40
23 from webargs.compat import MARSHMALLOW_VERSION_INFO
24 from webargs.dict2schema import dict2schema
25
26 __all__ = ["DelimitedList"] + ma.fields.__all__
27
28
29 class Nested(ma.fields.Nested):
30 """Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
31 the first argument, which will be converted to a `marshmallow.Schema`.
32
33 .. note::
34
35 The schema class here will always be `marshmallow.Schema`, regardless
36 of whether a custom schema class is set on the parser. Pass an explicit schema
37 class if necessary.
38 """
39
40 def __init__(self, nested, *args, **kwargs):
41 if isinstance(nested, dict):
42 nested = dict2schema(nested)
43 super().__init__(nested, *args, **kwargs)
44
45
46 class DelimitedList(ma.fields.List):
47 """A field which is similar to a List, but takes its input as a delimited
48 string (e.g. "foo,bar,baz").
49
50 Like List, it can be given a nested field type which it will use to
51 de/serialize each element of the list.
52
53 :param Field cls_or_instance: A field class or instance.
54 :param str delimiter: Delimiter between values.
55 """
56
57 default_error_messages = {"invalid": "Not a valid delimited list."}
58 delimiter = ","
59
60 def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):
61 self.delimiter = delimiter or self.delimiter
62 super().__init__(cls_or_instance, **kwargs)
63
64 def _serialize(self, value, attr, obj):
65 # serializing will start with List serialization, so that we correctly
66 # output lists of non-primitive types, e.g. DelimitedList(DateTime)
67 return self.delimiter.join(
68 format(each) for each in super()._serialize(value, attr, obj)
69 )
70
71 def _deserialize(self, value, attr, data, **kwargs):
72 # attempting to deserialize from a non-string source is an error
73 if not isinstance(value, (str, bytes)):
74 if MARSHMALLOW_VERSION_INFO[0] < 3:
75 self.fail("invalid")
76 else:
77 raise self.make_error("invalid")
78 return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/webargs/fields.py b/src/webargs/fields.py
--- a/src/webargs/fields.py
+++ b/src/webargs/fields.py
@@ -43,26 +43,24 @@
super().__init__(nested, *args, **kwargs)
-class DelimitedList(ma.fields.List):
- """A field which is similar to a List, but takes its input as a delimited
- string (e.g. "foo,bar,baz").
+class DelimitedFieldMixin:
+ """
+ This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple
+ which split on a pre-specified delimiter. By default, the delimiter will be ","
- Like List, it can be given a nested field type which it will use to
- de/serialize each element of the list.
+ Because we want the MRO to reach this class before the List or Tuple class,
+ it must be listed first in the superclasses
- :param Field cls_or_instance: A field class or instance.
- :param str delimiter: Delimiter between values.
+ For example, a DelimitedList-like type can be defined like so:
+
+ >>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):
+ >>> pass
"""
- default_error_messages = {"invalid": "Not a valid delimited list."}
delimiter = ","
- def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):
- self.delimiter = delimiter or self.delimiter
- super().__init__(cls_or_instance, **kwargs)
-
def _serialize(self, value, attr, obj):
- # serializing will start with List serialization, so that we correctly
+ # serializing will start with parent-class serialization, so that we correctly
# output lists of non-primitive types, e.g. DelimitedList(DateTime)
return self.delimiter.join(
format(each) for each in super()._serialize(value, attr, obj)
@@ -76,3 +74,45 @@
else:
raise self.make_error("invalid")
return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)
+
+
+class DelimitedList(DelimitedFieldMixin, ma.fields.List):
+ """A field which is similar to a List, but takes its input as a delimited
+ string (e.g. "foo,bar,baz").
+
+ Like List, it can be given a nested field type which it will use to
+ de/serialize each element of the list.
+
+ :param Field cls_or_instance: A field class or instance.
+ :param str delimiter: Delimiter between values.
+ """
+
+ default_error_messages = {"invalid": "Not a valid delimited list."}
+ delimiter = ","
+
+ def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):
+ self.delimiter = delimiter or self.delimiter
+ super().__init__(cls_or_instance, **kwargs)
+
+
+# DelimitedTuple can only be defined when using marshmallow3, when Tuple was
+# added
+if MARSHMALLOW_VERSION_INFO[0] >= 3:
+
+ class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):
+ """A field which is similar to a Tuple, but takes its input as a delimited
+ string (e.g. "foo,bar,baz").
+
+ Like Tuple, it can be given a tuple of nested field types which it will use to
+ de/serialize each element of the tuple.
+
+ :param Iterable[Field] tuple_fields: An iterable of field classes or instances.
+ :param str delimiter: Delimiter between values.
+ """
+
+ default_error_messages = {"invalid": "Not a valid delimited tuple."}
+ delimiter = ","
+
+ def __init__(self, tuple_fields, *, delimiter=None, **kwargs):
+ self.delimiter = delimiter or self.delimiter
+ super().__init__(tuple_fields, **kwargs)
| {"golden_diff": "diff --git a/src/webargs/fields.py b/src/webargs/fields.py\n--- a/src/webargs/fields.py\n+++ b/src/webargs/fields.py\n@@ -43,26 +43,24 @@\n super().__init__(nested, *args, **kwargs)\n \n \n-class DelimitedList(ma.fields.List):\n- \"\"\"A field which is similar to a List, but takes its input as a delimited\n- string (e.g. \"foo,bar,baz\").\n+class DelimitedFieldMixin:\n+ \"\"\"\n+ This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple\n+ which split on a pre-specified delimiter. By default, the delimiter will be \",\"\n \n- Like List, it can be given a nested field type which it will use to\n- de/serialize each element of the list.\n+ Because we want the MRO to reach this class before the List or Tuple class,\n+ it must be listed first in the superclasses\n \n- :param Field cls_or_instance: A field class or instance.\n- :param str delimiter: Delimiter between values.\n+ For example, a DelimitedList-like type can be defined like so:\n+\n+ >>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):\n+ >>> pass\n \"\"\"\n \n- default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n \n- def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):\n- self.delimiter = delimiter or self.delimiter\n- super().__init__(cls_or_instance, **kwargs)\n-\n def _serialize(self, value, attr, obj):\n- # serializing will start with List serialization, so that we correctly\n+ # serializing will start with parent-class serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj)\n@@ -76,3 +74,45 @@\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n+\n+\n+class DelimitedList(DelimitedFieldMixin, ma.fields.List):\n+ \"\"\"A field which is similar to a List, but takes its input as a delimited\n+ string (e.g. \"foo,bar,baz\").\n+\n+ Like List, it can be given a nested field type which it will use to\n+ de/serialize each element of the list.\n+\n+ :param Field cls_or_instance: A field class or instance.\n+ :param str delimiter: Delimiter between values.\n+ \"\"\"\n+\n+ default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n+ delimiter = \",\"\n+\n+ def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):\n+ self.delimiter = delimiter or self.delimiter\n+ super().__init__(cls_or_instance, **kwargs)\n+\n+\n+# DelimitedTuple can only be defined when using marshmallow3, when Tuple was\n+# added\n+if MARSHMALLOW_VERSION_INFO[0] >= 3:\n+\n+ class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):\n+ \"\"\"A field which is similar to a Tuple, but takes its input as a delimited\n+ string (e.g. \"foo,bar,baz\").\n+\n+ Like Tuple, it can be given a tuple of nested field types which it will use to\n+ de/serialize each element of the tuple.\n+\n+ :param Iterable[Field] tuple_fields: An iterable of field classes or instances.\n+ :param str delimiter: Delimiter between values.\n+ \"\"\"\n+\n+ default_error_messages = {\"invalid\": \"Not a valid delimited tuple.\"}\n+ delimiter = \",\"\n+\n+ def __init__(self, tuple_fields, *, delimiter=None, **kwargs):\n+ self.delimiter = delimiter or self.delimiter\n+ super().__init__(tuple_fields, **kwargs)\n", "issue": "'Not a valid tuple.' when trying to use marshmallow fields.Tuple for argument validation\nI'm trying to use the marshmallow fields.Tuple for querystring argument validation on a GET request using Flask. The issue I'm running into is that no matter what type of object I declare and no matter what I use in the request, I always get the default 'Not a valid tuple.' response. I have tried using a tuple of size 1 and 2; using fields.String and/or fields.Integer, etc with the same result.\r\n\r\n- I'm using Python 3.6.9 with these dependencies:\r\nanyjson==0.3.3\r\napipkg==1.5\r\narrow==0.15.5\r\nattrs==19.3.0\r\nbackports.functools-lru-cache==1.6.1\r\ncassandra-driver==3.22.0\r\nCerberus==1.3.2\r\ncertifi==2019.11.28\r\ncffi==1.13.2\r\nchardet==3.0.4\r\nclick==7.1.1\r\nexecnet==1.7.1\r\nFlask==1.1.1\r\nFlask-Cors==3.0.8\r\nfuncsigs==1.0.2\r\nfutures==3.1.1\r\ngeomet==0.1.2\r\ngevent==1.4.0\r\ngreenlet==0.4.13\r\ngunicorn==20.0.4\r\nidna==2.9\r\nimportlib-metadata==1.6.0\r\nitsdangerous==1.1.0\r\nJinja2==2.11.1\r\njsonklog==0.15.0\r\nMarkupSafe==1.1.1\r\nmarshmallow==3.5.1\r\nneurolab==0.3.5\r\nnumpy==1.18.1\r\npluggy==0.13.1\r\npy==1.8.1\r\npyaml==20.3.1\r\npymongo==3.10.1\r\npytest==3.3.0\r\npytest-forked==0.2\r\npytest-xdist==1.20.1\r\npython-dateutil==2.8.1\r\nPyYAML==5.3.1\r\nreadline==6.2.4.1\r\nrequests==2.23.0\r\nsix==1.14.0\r\nurllib3==1.25.8\r\nwebargs==6.0.0\r\nWerkzeug==1.0.0\r\nzipp==3.1.0\r\n\r\n- Here is an example of what I'm trying to do:\r\n```\r\nfrom flask import Flask\r\nfrom webargs.flaskparser import parser, use_kwargs\r\nfrom marshmallow import EXCLUDE, fields, Schema\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\[email protected](422)\r\ndef custom_handler(error):\r\n errors = []\r\n if 'query' in error.data['messages']:\r\n for arg in error.data['messages']['query']:\r\n for item in error.data['messages']['query'][arg]:\r\n errors.append(item)\r\n return str(errors), 400\r\n\r\n\r\nclass test_schema(Schema):\r\n class Meta:\r\n unknown = EXCLUDE\r\n strict = True\r\n \r\n test_tup = fields.Tuple((fields.Integer(required=True), fields.Integer(required=True)), required=True)\r\n\r\n\r\[email protected]('/test/', strict_slashes=False)\r\[email protected]_kwargs(test_schema, location='query')\r\ndef test_the_mallow(**kwargs):\r\n return \"True\"\r\n```\r\n\r\n- Finally, here are a couple example url's I've tried:\r\n localhost:2300/test/?test_tup=[0,0]\r\n localhost:2300/test/?test_tup=(0,0)\r\n localhost:2300/test/?test_tup=0,0\r\n\n", "before_files": [{"content": "\"\"\"Field classes.\n\nIncludes all fields from `marshmallow.fields` in addition to a custom\n`Nested` field and `DelimitedList`.\n\nAll fields can optionally take a special `location` keyword argument, which\ntells webargs where to parse the request argument from.\n\n.. code-block:: python\n\n args = {\n \"active\": fields.Bool(location=\"query\"),\n \"content_type\": fields.Str(data_key=\"Content-Type\", location=\"headers\"),\n }\n\nNote: `data_key` replaced `load_from` in marshmallow 3.\nWhen using marshmallow 2, use `load_from`.\n\"\"\"\nimport marshmallow as ma\n\n# Expose all fields from marshmallow.fields.\nfrom marshmallow.fields import * # noqa: F40\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.dict2schema import dict2schema\n\n__all__ = [\"DelimitedList\"] + ma.fields.__all__\n\n\nclass Nested(ma.fields.Nested):\n \"\"\"Same as `marshmallow.fields.Nested`, except can be passed a dictionary as\n the first argument, which will be converted to a `marshmallow.Schema`.\n\n .. note::\n\n The schema class here will always be `marshmallow.Schema`, regardless\n of whether a custom schema class is set on the parser. Pass an explicit schema\n class if necessary.\n \"\"\"\n\n def __init__(self, nested, *args, **kwargs):\n if isinstance(nested, dict):\n nested = dict2schema(nested)\n super().__init__(nested, *args, **kwargs)\n\n\nclass DelimitedList(ma.fields.List):\n \"\"\"A field which is similar to a List, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like List, it can be given a nested field type which it will use to\n de/serialize each element of the list.\n\n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n\n def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n super().__init__(cls_or_instance, **kwargs)\n\n def _serialize(self, value, attr, obj):\n # serializing will start with List serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj)\n )\n\n def _deserialize(self, value, attr, data, **kwargs):\n # attempting to deserialize from a non-string source is an error\n if not isinstance(value, (str, bytes)):\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n self.fail(\"invalid\")\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n", "path": "src/webargs/fields.py"}], "after_files": [{"content": "\"\"\"Field classes.\n\nIncludes all fields from `marshmallow.fields` in addition to a custom\n`Nested` field and `DelimitedList`.\n\nAll fields can optionally take a special `location` keyword argument, which\ntells webargs where to parse the request argument from.\n\n.. code-block:: python\n\n args = {\n \"active\": fields.Bool(location=\"query\"),\n \"content_type\": fields.Str(data_key=\"Content-Type\", location=\"headers\"),\n }\n\nNote: `data_key` replaced `load_from` in marshmallow 3.\nWhen using marshmallow 2, use `load_from`.\n\"\"\"\nimport marshmallow as ma\n\n# Expose all fields from marshmallow.fields.\nfrom marshmallow.fields import * # noqa: F40\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.dict2schema import dict2schema\n\n__all__ = [\"DelimitedList\"] + ma.fields.__all__\n\n\nclass Nested(ma.fields.Nested):\n \"\"\"Same as `marshmallow.fields.Nested`, except can be passed a dictionary as\n the first argument, which will be converted to a `marshmallow.Schema`.\n\n .. note::\n\n The schema class here will always be `marshmallow.Schema`, regardless\n of whether a custom schema class is set on the parser. Pass an explicit schema\n class if necessary.\n \"\"\"\n\n def __init__(self, nested, *args, **kwargs):\n if isinstance(nested, dict):\n nested = dict2schema(nested)\n super().__init__(nested, *args, **kwargs)\n\n\nclass DelimitedFieldMixin:\n \"\"\"\n This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple\n which split on a pre-specified delimiter. By default, the delimiter will be \",\"\n\n Because we want the MRO to reach this class before the List or Tuple class,\n it must be listed first in the superclasses\n\n For example, a DelimitedList-like type can be defined like so:\n\n >>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):\n >>> pass\n \"\"\"\n\n delimiter = \",\"\n\n def _serialize(self, value, attr, obj):\n # serializing will start with parent-class serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj)\n )\n\n def _deserialize(self, value, attr, data, **kwargs):\n # attempting to deserialize from a non-string source is an error\n if not isinstance(value, (str, bytes)):\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n self.fail(\"invalid\")\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n\n\nclass DelimitedList(DelimitedFieldMixin, ma.fields.List):\n \"\"\"A field which is similar to a List, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like List, it can be given a nested field type which it will use to\n de/serialize each element of the list.\n\n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n\n def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n super().__init__(cls_or_instance, **kwargs)\n\n\n# DelimitedTuple can only be defined when using marshmallow3, when Tuple was\n# added\nif MARSHMALLOW_VERSION_INFO[0] >= 3:\n\n class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):\n \"\"\"A field which is similar to a Tuple, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like Tuple, it can be given a tuple of nested field types which it will use to\n de/serialize each element of the tuple.\n\n :param Iterable[Field] tuple_fields: An iterable of field classes or instances.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited tuple.\"}\n delimiter = \",\"\n\n def __init__(self, tuple_fields, *, delimiter=None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n super().__init__(tuple_fields, **kwargs)\n", "path": "src/webargs/fields.py"}]} | 1,902 | 889 |
gh_patches_debug_7758 | rasdani/github-patches | git_diff | CTFd__CTFd-1934 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error 500 when visiting /admin/users/1 - AttributeError: 'NoneType' object has no attribute 'get_score'
**Environment**:
- CTFd Version/Commit: HEAD
- Operating System: Docker image based off official Dockerfile
- Web Browser and Version: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1 Safari/605.1.15
**What happened?**
500 Internal server error
**What did you expect to happen?**
Show the admin user details when in team mode
**How to reproduce your issue**
* visited the `/admin/users/1`
* this seems due to the fact that, when in team mode, the admin user does not belong to any team and, for some reason, this one returns `None`
```python
@hybrid_property
def account(self):
from CTFd.utils import get_config
user_mode = get_config("user_mode")
if user_mode == "teams":
return self.team
elif user_mode == "users":
return self
```
**Any associated stack traces or error logs**
```
ERROR [CTFd] Exception on /admin/users/1 [GET]
--
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask_restx/api.py", line 639, in error_router
return original_handler(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/CTFd/CTFd/utils/decorators/__init__.py", line 133, in admins_only_wrapper
return f(*args, **kwargs)
File "/opt/CTFd/CTFd/admin/users.py", line 91, in users_detail
score = user.account.get_score(admin=True)
AttributeError: 'NoneType' object has no attribute 'get_score'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/admin/users.py`
Content:
```
1 from flask import render_template, request, url_for
2 from sqlalchemy.sql import not_
3
4 from CTFd.admin import admin
5 from CTFd.models import Challenges, Tracking, Users
6 from CTFd.utils import get_config
7 from CTFd.utils.decorators import admins_only
8 from CTFd.utils.modes import TEAMS_MODE
9
10
11 @admin.route("/admin/users")
12 @admins_only
13 def users_listing():
14 q = request.args.get("q")
15 field = request.args.get("field")
16 page = abs(request.args.get("page", 1, type=int))
17 filters = []
18 users = []
19
20 if q:
21 # The field exists as an exposed column
22 if Users.__mapper__.has_property(field):
23 filters.append(getattr(Users, field).like("%{}%".format(q)))
24
25 if q and field == "ip":
26 users = (
27 Users.query.join(Tracking, Users.id == Tracking.user_id)
28 .filter(Tracking.ip.like("%{}%".format(q)))
29 .order_by(Users.id.asc())
30 .paginate(page=page, per_page=50)
31 )
32 else:
33 users = (
34 Users.query.filter(*filters)
35 .order_by(Users.id.asc())
36 .paginate(page=page, per_page=50)
37 )
38
39 args = dict(request.args)
40 args.pop("page", 1)
41
42 return render_template(
43 "admin/users/users.html",
44 users=users,
45 prev_page=url_for(request.endpoint, page=users.prev_num, **args),
46 next_page=url_for(request.endpoint, page=users.next_num, **args),
47 q=q,
48 field=field,
49 )
50
51
52 @admin.route("/admin/users/new")
53 @admins_only
54 def users_new():
55 return render_template("admin/users/new.html")
56
57
58 @admin.route("/admin/users/<int:user_id>")
59 @admins_only
60 def users_detail(user_id):
61 # Get user object
62 user = Users.query.filter_by(id=user_id).first_or_404()
63
64 # Get the user's solves
65 solves = user.get_solves(admin=True)
66
67 # Get challenges that the user is missing
68 if get_config("user_mode") == TEAMS_MODE:
69 if user.team:
70 all_solves = user.team.get_solves(admin=True)
71 else:
72 all_solves = user.get_solves(admin=True)
73 else:
74 all_solves = user.get_solves(admin=True)
75
76 solve_ids = [s.challenge_id for s in all_solves]
77 missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()
78
79 # Get IP addresses that the User has used
80 addrs = (
81 Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()
82 )
83
84 # Get Fails
85 fails = user.get_fails(admin=True)
86
87 # Get Awards
88 awards = user.get_awards(admin=True)
89
90 # Get user properties
91 score = user.account.get_score(admin=True)
92 place = user.account.get_place(admin=True)
93
94 return render_template(
95 "admin/users/user.html",
96 solves=solves,
97 user=user,
98 addrs=addrs,
99 score=score,
100 missing=missing,
101 place=place,
102 fails=fails,
103 awards=awards,
104 )
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/admin/users.py b/CTFd/admin/users.py
--- a/CTFd/admin/users.py
+++ b/CTFd/admin/users.py
@@ -87,9 +87,14 @@
# Get Awards
awards = user.get_awards(admin=True)
- # Get user properties
- score = user.account.get_score(admin=True)
- place = user.account.get_place(admin=True)
+ # Check if the user has an account (team or user)
+ # so that we don't throw an error if they dont
+ if user.account:
+ score = user.account.get_score(admin=True)
+ place = user.account.get_place(admin=True)
+ else:
+ score = None
+ place = None
return render_template(
"admin/users/user.html",
| {"golden_diff": "diff --git a/CTFd/admin/users.py b/CTFd/admin/users.py\n--- a/CTFd/admin/users.py\n+++ b/CTFd/admin/users.py\n@@ -87,9 +87,14 @@\n # Get Awards\n awards = user.get_awards(admin=True)\n \n- # Get user properties\n- score = user.account.get_score(admin=True)\n- place = user.account.get_place(admin=True)\n+ # Check if the user has an account (team or user)\n+ # so that we don't throw an error if they dont\n+ if user.account:\n+ score = user.account.get_score(admin=True)\n+ place = user.account.get_place(admin=True)\n+ else:\n+ score = None\n+ place = None\n \n return render_template(\n \"admin/users/user.html\",\n", "issue": "Error 500 when visiting /admin/users/1 - AttributeError: 'NoneType' object has no attribute 'get_score'\n**Environment**:\r\n\r\n- CTFd Version/Commit: HEAD\r\n- Operating System: Docker image based off official Dockerfile\r\n- Web Browser and Version: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1 Safari/605.1.15\r\n\r\n**What happened?**\r\n500 Internal server error\r\n\r\n**What did you expect to happen?**\r\nShow the admin user details when in team mode\r\n\r\n**How to reproduce your issue**\r\n* visited the `/admin/users/1`\r\n* this seems due to the fact that, when in team mode, the admin user does not belong to any team and, for some reason, this one returns `None`\r\n\r\n```python\r\n @hybrid_property\r\n def account(self):\r\n from CTFd.utils import get_config\r\n\r\n user_mode = get_config(\"user_mode\")\r\n if user_mode == \"teams\":\r\n return self.team\r\n elif user_mode == \"users\":\r\n return self\r\n```\r\n\r\n**Any associated stack traces or error logs**\r\n```\r\nERROR [CTFd] Exception on /admin/users/1 [GET]\r\n--\r\nTraceback (most recent call last):\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 2447, in wsgi_app\r\nresponse = self.full_dispatch_request()\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1952, in full_dispatch_request\r\nrv = self.handle_user_exception(e)\r\nFile \"/usr/local/lib/python3.7/site-packages/flask_restx/api.py\", line 639, in error_router\r\nreturn original_handler(e)\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1821, in handle_user_exception\r\nreraise(exc_type, exc_value, tb)\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/_compat.py\", line 39, in reraise\r\nraise value\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\nrv = self.dispatch_request()\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1936, in dispatch_request\r\nreturn self.view_functions[rule.endpoint](**req.view_args)\r\nFile \"/opt/CTFd/CTFd/utils/decorators/__init__.py\", line 133, in admins_only_wrapper\r\nreturn f(*args, **kwargs)\r\nFile \"/opt/CTFd/CTFd/admin/users.py\", line 91, in users_detail\r\nscore = user.account.get_score(admin=True)\r\nAttributeError: 'NoneType' object has no attribute 'get_score'\r\n```\r\n\n", "before_files": [{"content": "from flask import render_template, request, url_for\nfrom sqlalchemy.sql import not_\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Tracking, Users\nfrom CTFd.utils import get_config\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import TEAMS_MODE\n\n\[email protected](\"/admin/users\")\n@admins_only\ndef users_listing():\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n page = abs(request.args.get(\"page\", 1, type=int))\n filters = []\n users = []\n\n if q:\n # The field exists as an exposed column\n if Users.__mapper__.has_property(field):\n filters.append(getattr(Users, field).like(\"%{}%\".format(q)))\n\n if q and field == \"ip\":\n users = (\n Users.query.join(Tracking, Users.id == Tracking.user_id)\n .filter(Tracking.ip.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n else:\n users = (\n Users.query.filter(*filters)\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n\n args = dict(request.args)\n args.pop(\"page\", 1)\n\n return render_template(\n \"admin/users/users.html\",\n users=users,\n prev_page=url_for(request.endpoint, page=users.prev_num, **args),\n next_page=url_for(request.endpoint, page=users.next_num, **args),\n q=q,\n field=field,\n )\n\n\[email protected](\"/admin/users/new\")\n@admins_only\ndef users_new():\n return render_template(\"admin/users/new.html\")\n\n\[email protected](\"/admin/users/<int:user_id>\")\n@admins_only\ndef users_detail(user_id):\n # Get user object\n user = Users.query.filter_by(id=user_id).first_or_404()\n\n # Get the user's solves\n solves = user.get_solves(admin=True)\n\n # Get challenges that the user is missing\n if get_config(\"user_mode\") == TEAMS_MODE:\n if user.team:\n all_solves = user.team.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n\n solve_ids = [s.challenge_id for s in all_solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get IP addresses that the User has used\n addrs = (\n Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()\n )\n\n # Get Fails\n fails = user.get_fails(admin=True)\n\n # Get Awards\n awards = user.get_awards(admin=True)\n\n # Get user properties\n score = user.account.get_score(admin=True)\n place = user.account.get_place(admin=True)\n\n return render_template(\n \"admin/users/user.html\",\n solves=solves,\n user=user,\n addrs=addrs,\n score=score,\n missing=missing,\n place=place,\n fails=fails,\n awards=awards,\n )\n", "path": "CTFd/admin/users.py"}], "after_files": [{"content": "from flask import render_template, request, url_for\nfrom sqlalchemy.sql import not_\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Tracking, Users\nfrom CTFd.utils import get_config\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import TEAMS_MODE\n\n\[email protected](\"/admin/users\")\n@admins_only\ndef users_listing():\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n page = abs(request.args.get(\"page\", 1, type=int))\n filters = []\n users = []\n\n if q:\n # The field exists as an exposed column\n if Users.__mapper__.has_property(field):\n filters.append(getattr(Users, field).like(\"%{}%\".format(q)))\n\n if q and field == \"ip\":\n users = (\n Users.query.join(Tracking, Users.id == Tracking.user_id)\n .filter(Tracking.ip.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n else:\n users = (\n Users.query.filter(*filters)\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n\n args = dict(request.args)\n args.pop(\"page\", 1)\n\n return render_template(\n \"admin/users/users.html\",\n users=users,\n prev_page=url_for(request.endpoint, page=users.prev_num, **args),\n next_page=url_for(request.endpoint, page=users.next_num, **args),\n q=q,\n field=field,\n )\n\n\[email protected](\"/admin/users/new\")\n@admins_only\ndef users_new():\n return render_template(\"admin/users/new.html\")\n\n\[email protected](\"/admin/users/<int:user_id>\")\n@admins_only\ndef users_detail(user_id):\n # Get user object\n user = Users.query.filter_by(id=user_id).first_or_404()\n\n # Get the user's solves\n solves = user.get_solves(admin=True)\n\n # Get challenges that the user is missing\n if get_config(\"user_mode\") == TEAMS_MODE:\n if user.team:\n all_solves = user.team.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n\n solve_ids = [s.challenge_id for s in all_solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get IP addresses that the User has used\n addrs = (\n Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()\n )\n\n # Get Fails\n fails = user.get_fails(admin=True)\n\n # Get Awards\n awards = user.get_awards(admin=True)\n\n # Check if the user has an account (team or user)\n # so that we don't throw an error if they dont\n if user.account:\n score = user.account.get_score(admin=True)\n place = user.account.get_place(admin=True)\n else:\n score = None\n place = None\n\n return render_template(\n \"admin/users/user.html\",\n solves=solves,\n user=user,\n addrs=addrs,\n score=score,\n missing=missing,\n place=place,\n fails=fails,\n awards=awards,\n )\n", "path": "CTFd/admin/users.py"}]} | 1,802 | 180 |
gh_patches_debug_13796 | rasdani/github-patches | git_diff | Mailu__Mailu-1874 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Weblate instance is down
I tried accessing the Weblate instance and potentially add another language but it looks down.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup/server.py`
Content:
```
1 import flask
2 import flask_bootstrap
3 import redis
4 import json
5 import os
6 import jinja2
7 import uuid
8 import string
9 import random
10 import ipaddress
11 import hashlib
12 import time
13
14
15 version = os.getenv("this_version", "master")
16 static_url_path = "/" + version + "/static"
17 app = flask.Flask(__name__, static_url_path=static_url_path)
18 flask_bootstrap.Bootstrap(app)
19 db = redis.StrictRedis(host='redis', port=6379, db=0)
20
21
22 def render_flavor(flavor, template, data):
23 return flask.render_template(
24 os.path.join(flavor, template),
25 **data
26 )
27
28
29 @app.add_template_global
30 def secret(length=16):
31 charset = string.ascii_uppercase + string.digits
32 return ''.join(
33 random.SystemRandom().choice(charset)
34 for _ in range(length)
35 )
36
37 #Original copied from https://github.com/andrewlkho/ulagen
38 def random_ipv6_subnet():
39 eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff
40 eui64_canon = "-".join([format(eui64, "02X")[i:i+2] for i in range(0, 18, 2)])
41
42 h = hashlib.sha1()
43 h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))
44 globalid = h.hexdigest()[0:10]
45
46 prefix = ":".join(("fd" + globalid[0:2], globalid[2:6], globalid[6:10]))
47 return prefix
48
49 def build_app(path):
50
51 app.jinja_env.trim_blocks = True
52 app.jinja_env.lstrip_blocks = True
53
54 @app.context_processor
55 def app_context():
56 return dict(
57 versions=os.getenv("VERSIONS","master").split(','),
58 stable_version = os.getenv("stable_version", "master")
59 )
60
61 prefix_bp = flask.Blueprint(version, __name__)
62 prefix_bp.jinja_loader = jinja2.ChoiceLoader([
63 jinja2.FileSystemLoader(os.path.join(path, "templates")),
64 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
65 ])
66
67 root_bp = flask.Blueprint("root", __name__)
68 root_bp.jinja_loader = jinja2.ChoiceLoader([
69 jinja2.FileSystemLoader(os.path.join(path, "templates")),
70 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
71 ])
72
73 @prefix_bp.context_processor
74 @root_bp.context_processor
75 def bp_context(version=version):
76 return dict(version=version)
77
78 @prefix_bp.route("/")
79 @root_bp.route("/")
80 def wizard():
81 return flask.render_template('wizard.html')
82
83 @prefix_bp.route("/submit_flavor", methods=["POST"])
84 @root_bp.route("/submit_flavor", methods=["POST"])
85 def submit_flavor():
86 data = flask.request.form.copy()
87 subnet6 = random_ipv6_subnet()
88 steps = sorted(os.listdir(os.path.join(path, "templates", "steps", data["flavor"])))
89 return flask.render_template('wizard.html', flavor=data["flavor"], steps=steps, subnet6=subnet6)
90
91 @prefix_bp.route("/submit", methods=["POST"])
92 @root_bp.route("/submit", methods=["POST"])
93 def submit():
94 data = flask.request.form.copy()
95 data['uid'] = str(uuid.uuid4())
96 try:
97 data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])
98 except ValueError as err:
99 return "Error while generating files: " + str(err)
100 db.set(data['uid'], json.dumps(data))
101 return flask.redirect(flask.url_for('.setup', uid=data['uid']))
102
103 @prefix_bp.route("/setup/<uid>", methods=["GET"])
104 @root_bp.route("/setup/<uid>", methods=["GET"])
105 def setup(uid):
106 data = json.loads(db.get(uid))
107 flavor = data.get("flavor", "compose")
108 rendered = render_flavor(flavor, "setup.html", data)
109 return flask.render_template("setup.html", contents=rendered)
110
111 @prefix_bp.route("/file/<uid>/<filepath>", methods=["GET"])
112 @root_bp.route("/file/<uid>/<filepath>", methods=["GET"])
113 def file(uid, filepath):
114 data = json.loads(db.get(uid))
115 flavor = data.get("flavor", "compose")
116 return flask.Response(
117 render_flavor(flavor, filepath, data),
118 mimetype="application/text"
119 )
120
121 app.register_blueprint(prefix_bp, url_prefix="/{}".format(version))
122 app.register_blueprint(root_bp)
123
124
125 if __name__ == "__main__":
126 build_app("/tmp/mailutest")
127 app.run(debug=True)
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup/server.py b/setup/server.py
--- a/setup/server.py
+++ b/setup/server.py
@@ -54,11 +54,11 @@
@app.context_processor
def app_context():
return dict(
- versions=os.getenv("VERSIONS","master").split(','),
+ versions=os.getenv("VERSIONS","master").split(','),
stable_version = os.getenv("stable_version", "master")
)
- prefix_bp = flask.Blueprint(version, __name__)
+ prefix_bp = flask.Blueprint(version.replace(".", "_"), __name__)
prefix_bp.jinja_loader = jinja2.ChoiceLoader([
jinja2.FileSystemLoader(os.path.join(path, "templates")),
jinja2.FileSystemLoader(os.path.join(path, "flavors"))
| {"golden_diff": "diff --git a/setup/server.py b/setup/server.py\n--- a/setup/server.py\n+++ b/setup/server.py\n@@ -54,11 +54,11 @@\n @app.context_processor\n def app_context():\n return dict(\n- versions=os.getenv(\"VERSIONS\",\"master\").split(','), \n+ versions=os.getenv(\"VERSIONS\",\"master\").split(','),\n stable_version = os.getenv(\"stable_version\", \"master\")\n )\n \n- prefix_bp = flask.Blueprint(version, __name__)\n+ prefix_bp = flask.Blueprint(version.replace(\".\", \"_\"), __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n", "issue": "Weblate instance is down\nI tried accessing the Weblate instance and potentially add another language but it looks down.\n", "before_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\nimport time\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n#Original copied from https://github.com/andrewlkho/ulagen\ndef random_ipv6_subnet():\n eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff\n eui64_canon = \"-\".join([format(eui64, \"02X\")[i:i+2] for i in range(0, 18, 2)])\n\n h = hashlib.sha1()\n h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))\n globalid = h.hexdigest()[0:10]\n\n prefix = \":\".join((\"fd\" + globalid[0:2], globalid[2:6], globalid[6:10]))\n return prefix\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(\n versions=os.getenv(\"VERSIONS\",\"master\").split(','), \n stable_version = os.getenv(\"stable_version\", \"master\")\n )\n\n prefix_bp = flask.Blueprint(version, __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n subnet6 = random_ipv6_subnet()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps, subnet6=subnet6)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}], "after_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\nimport time\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n#Original copied from https://github.com/andrewlkho/ulagen\ndef random_ipv6_subnet():\n eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff\n eui64_canon = \"-\".join([format(eui64, \"02X\")[i:i+2] for i in range(0, 18, 2)])\n\n h = hashlib.sha1()\n h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))\n globalid = h.hexdigest()[0:10]\n\n prefix = \":\".join((\"fd\" + globalid[0:2], globalid[2:6], globalid[6:10]))\n return prefix\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(\n versions=os.getenv(\"VERSIONS\",\"master\").split(','),\n stable_version = os.getenv(\"stable_version\", \"master\")\n )\n\n prefix_bp = flask.Blueprint(version.replace(\".\", \"_\"), __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n subnet6 = random_ipv6_subnet()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps, subnet6=subnet6)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}]} | 1,648 | 170 |
gh_patches_debug_48394 | rasdani/github-patches | git_diff | DDMAL__CantusDB-274 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Assign a specific user to multiple sources in Django admin
In the user-edit page in the Django admin interface, we already have a selector that allows for multi-selecting sources and assigning the user to them. We need to make the selector box wider so that the source titles are not clipped.
This issue is related to issue #216 , the relationship between the User model and Source model should go both ways.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/models/source.py`
Content:
```
1 from django.db import models
2 from main_app.models import BaseModel, Segment
3 from django.contrib.auth import get_user_model
4
5
6 class Source(BaseModel):
7 cursus_choices = [("Monastic", "Monastic"), ("Secular", "Secular")]
8 source_status_choices = [
9 (
10 "Editing process (not all the fields have been proofread)",
11 "Editing process (not all the fields have been proofread)",
12 ),
13 ("Published / Complete", "Published / Complete"),
14 ("Published / Proofread pending", "Published / Proofread pending"),
15 ("Unpublished / Editing process", "Unpublished / Editing process"),
16 ("Unpublished / Indexing process", "Unpublished / Indexing process"),
17 ("Unpublished / Proofread pending", "Unpublished / Proofread pending"),
18 ("Unpublished / Proofreading process", "Unpublished / Proofreading process"),
19 ]
20
21 # sources with public=False cannot be accessed by its url (access denied) and do not appear in source list
22 public = models.BooleanField(blank=True, null=True)
23 # sources with visible=False can be accessed by typing in the url, but do not appear in source list
24 visible = models.BooleanField(blank=True, null=True)
25 title = models.CharField(
26 max_length=255,
27 help_text="Full Manuscript Identification (City, Archive, Shelf-mark)",
28 )
29 # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark
30 # it is a human-readable ID for a source
31 siglum = models.CharField(
32 max_length=63,
33 null=True,
34 blank=True,
35 help_text="RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).",
36 )
37 # the RISM siglum uniquely identifies a library or holding institution
38 rism_siglum = models.ForeignKey(
39 "RismSiglum", on_delete=models.PROTECT, null=True, blank=True,
40 )
41 provenance = models.ForeignKey(
42 "Provenance",
43 on_delete=models.PROTECT,
44 help_text="If the origin is unknown, select a location where the source was "
45 "used later in its lifetime and provide details in the "
46 '"Provenance notes" field.',
47 null=True,
48 blank=True,
49 )
50 provenance_notes = models.TextField(
51 blank=True,
52 null=True,
53 help_text="More exact indication of the provenance (if necessary)",
54 )
55 full_source = models.BooleanField(blank=True, null=True)
56 date = models.CharField(
57 blank=True,
58 null=True,
59 max_length=63,
60 help_text='Date of the manuscript (e.g. "1200s", "1300-1350", etc.)',
61 )
62 century = models.ManyToManyField("Century", related_name="sources")
63 notation = models.ManyToManyField("Notation", related_name="sources")
64 cursus = models.CharField(
65 blank=True, null=True, choices=cursus_choices, max_length=63
66 )
67 # TODO: Fill this field up with JSON info when I have access to the Users
68 current_editors = models.ManyToManyField(get_user_model(), related_name="sources_user_can_edit")
69 inventoried_by = models.ManyToManyField(
70 "Indexer", related_name="sources_inventoried"
71 )
72 full_text_entered_by = models.ManyToManyField(
73 "Indexer", related_name="entered_full_text_for_sources"
74 )
75 melodies_entered_by = models.ManyToManyField(
76 "Indexer", related_name="entered_melody_for_sources"
77 )
78 proofreaders = models.ManyToManyField("Indexer", related_name="proofread_sources")
79 other_editors = models.ManyToManyField("Indexer", related_name="edited_sources")
80 segment = models.ForeignKey(
81 "Segment", on_delete=models.PROTECT, blank=True, null=True
82 )
83 source_status = models.CharField(blank=True, null=True, max_length=255)
84 complete_inventory = models.BooleanField(blank=True, null=True)
85 summary = models.TextField(blank=True, null=True)
86 liturgical_occasions = models.TextField(blank=True, null=True)
87 description = models.TextField(blank=True, null=True)
88 selected_bibliography = models.TextField(blank=True, null=True)
89 image_link = models.URLField(
90 blank=True,
91 null=True,
92 help_text='HTTP link to the image gallery of the source.',
93 )
94 indexing_notes = models.TextField(blank=True, null=True)
95 indexing_date = models.TextField(blank=True, null=True)
96 json_info = models.JSONField(blank=True, null=True)
97 fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)
98 dact_id = models.CharField(max_length=15, blank=True, null=True)
99
100 def number_of_chants(self) -> int:
101 """Returns the number of Chants and Sequences in this Source."""
102 return self.chant_set.count() + self.sequence_set.count()
103
104 def number_of_melodies(self) -> int:
105 """Returns the number of Chants in this Source that have melodies."""
106 return self.chant_set.filter(volpiano__isnull=False).count()
107
108 def __str__(self):
109 string = '{t} ({i})'.format(t=self.title, i=self.id)
110 return string
111
112 def save(self, *args, **kwargs):
113 # when creating a source, assign it to "Cantus Database" by default
114 cantus_db_segment = Segment.objects.get(name="CANTUS Database")
115 self.segment = cantus_db_segment
116 super().save(*args, **kwargs)
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django/cantusdb_project/main_app/models/source.py b/django/cantusdb_project/main_app/models/source.py
--- a/django/cantusdb_project/main_app/models/source.py
+++ b/django/cantusdb_project/main_app/models/source.py
@@ -106,7 +106,7 @@
return self.chant_set.filter(volpiano__isnull=False).count()
def __str__(self):
- string = '{t} ({i})'.format(t=self.title, i=self.id)
+ string = '[{s}] {t} ({i})'.format(s=self.rism_siglum, t=self.title, i=self.id)
return string
def save(self, *args, **kwargs):
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/models/source.py b/django/cantusdb_project/main_app/models/source.py\n--- a/django/cantusdb_project/main_app/models/source.py\n+++ b/django/cantusdb_project/main_app/models/source.py\n@@ -106,7 +106,7 @@\n return self.chant_set.filter(volpiano__isnull=False).count()\n \n def __str__(self):\n- string = '{t} ({i})'.format(t=self.title, i=self.id)\n+ string = '[{s}] {t} ({i})'.format(s=self.rism_siglum, t=self.title, i=self.id)\n return string\n \n def save(self, *args, **kwargs):\n", "issue": "Assign a specific user to multiple sources in Django admin\nIn the user-edit page in the Django admin interface, we already have a selector that allows for multi-selecting sources and assigning the user to them. We need to make the selector box wider so that the source titles are not clipped. \r\n\r\nThis issue is related to issue #216 , the relationship between the User model and Source model should go both ways. \n", "before_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel, Segment\nfrom django.contrib.auth import get_user_model\n\n\nclass Source(BaseModel):\n cursus_choices = [(\"Monastic\", \"Monastic\"), (\"Secular\", \"Secular\")]\n source_status_choices = [\n (\n \"Editing process (not all the fields have been proofread)\",\n \"Editing process (not all the fields have been proofread)\",\n ),\n (\"Published / Complete\", \"Published / Complete\"),\n (\"Published / Proofread pending\", \"Published / Proofread pending\"),\n (\"Unpublished / Editing process\", \"Unpublished / Editing process\"),\n (\"Unpublished / Indexing process\", \"Unpublished / Indexing process\"),\n (\"Unpublished / Proofread pending\", \"Unpublished / Proofread pending\"),\n (\"Unpublished / Proofreading process\", \"Unpublished / Proofreading process\"),\n ]\n\n # sources with public=False cannot be accessed by its url (access denied) and do not appear in source list\n public = models.BooleanField(blank=True, null=True)\n # sources with visible=False can be accessed by typing in the url, but do not appear in source list\n visible = models.BooleanField(blank=True, null=True)\n title = models.CharField(\n max_length=255,\n help_text=\"Full Manuscript Identification (City, Archive, Shelf-mark)\",\n )\n # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark\n # it is a human-readable ID for a source\n siglum = models.CharField(\n max_length=63, \n null=True, \n blank=True,\n help_text=\"RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).\",\n )\n # the RISM siglum uniquely identifies a library or holding institution\n rism_siglum = models.ForeignKey(\n \"RismSiglum\", on_delete=models.PROTECT, null=True, blank=True,\n )\n provenance = models.ForeignKey(\n \"Provenance\",\n on_delete=models.PROTECT,\n help_text=\"If the origin is unknown, select a location where the source was \"\n \"used later in its lifetime and provide details in the \"\n '\"Provenance notes\" field.',\n null=True,\n blank=True,\n )\n provenance_notes = models.TextField(\n blank=True,\n null=True,\n help_text=\"More exact indication of the provenance (if necessary)\",\n )\n full_source = models.BooleanField(blank=True, null=True)\n date = models.CharField(\n blank=True,\n null=True,\n max_length=63,\n help_text='Date of the manuscript (e.g. \"1200s\", \"1300-1350\", etc.)',\n )\n century = models.ManyToManyField(\"Century\", related_name=\"sources\")\n notation = models.ManyToManyField(\"Notation\", related_name=\"sources\")\n cursus = models.CharField(\n blank=True, null=True, choices=cursus_choices, max_length=63\n )\n # TODO: Fill this field up with JSON info when I have access to the Users\n current_editors = models.ManyToManyField(get_user_model(), related_name=\"sources_user_can_edit\")\n inventoried_by = models.ManyToManyField(\n \"Indexer\", related_name=\"sources_inventoried\"\n )\n full_text_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_full_text_for_sources\"\n )\n melodies_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_melody_for_sources\"\n )\n proofreaders = models.ManyToManyField(\"Indexer\", related_name=\"proofread_sources\")\n other_editors = models.ManyToManyField(\"Indexer\", related_name=\"edited_sources\")\n segment = models.ForeignKey(\n \"Segment\", on_delete=models.PROTECT, blank=True, null=True\n )\n source_status = models.CharField(blank=True, null=True, max_length=255)\n complete_inventory = models.BooleanField(blank=True, null=True)\n summary = models.TextField(blank=True, null=True)\n liturgical_occasions = models.TextField(blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n selected_bibliography = models.TextField(blank=True, null=True)\n image_link = models.URLField(\n blank=True, \n null=True,\n help_text='HTTP link to the image gallery of the source.',\n )\n indexing_notes = models.TextField(blank=True, null=True)\n indexing_date = models.TextField(blank=True, null=True)\n json_info = models.JSONField(blank=True, null=True)\n fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)\n dact_id = models.CharField(max_length=15, blank=True, null=True)\n\n def number_of_chants(self) -> int:\n \"\"\"Returns the number of Chants and Sequences in this Source.\"\"\"\n return self.chant_set.count() + self.sequence_set.count()\n\n def number_of_melodies(self) -> int:\n \"\"\"Returns the number of Chants in this Source that have melodies.\"\"\"\n return self.chant_set.filter(volpiano__isnull=False).count()\n\n def __str__(self):\n string = '{t} ({i})'.format(t=self.title, i=self.id)\n return string\n\n def save(self, *args, **kwargs):\n # when creating a source, assign it to \"Cantus Database\" by default\n cantus_db_segment = Segment.objects.get(name=\"CANTUS Database\")\n self.segment = cantus_db_segment\n super().save(*args, **kwargs)\n", "path": "django/cantusdb_project/main_app/models/source.py"}], "after_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel, Segment\nfrom django.contrib.auth import get_user_model\n\n\nclass Source(BaseModel):\n cursus_choices = [(\"Monastic\", \"Monastic\"), (\"Secular\", \"Secular\")]\n source_status_choices = [\n (\n \"Editing process (not all the fields have been proofread)\",\n \"Editing process (not all the fields have been proofread)\",\n ),\n (\"Published / Complete\", \"Published / Complete\"),\n (\"Published / Proofread pending\", \"Published / Proofread pending\"),\n (\"Unpublished / Editing process\", \"Unpublished / Editing process\"),\n (\"Unpublished / Indexing process\", \"Unpublished / Indexing process\"),\n (\"Unpublished / Proofread pending\", \"Unpublished / Proofread pending\"),\n (\"Unpublished / Proofreading process\", \"Unpublished / Proofreading process\"),\n ]\n\n # sources with public=False cannot be accessed by its url (access denied) and do not appear in source list\n public = models.BooleanField(blank=True, null=True)\n # sources with visible=False can be accessed by typing in the url, but do not appear in source list\n visible = models.BooleanField(blank=True, null=True)\n title = models.CharField(\n max_length=255,\n help_text=\"Full Manuscript Identification (City, Archive, Shelf-mark)\",\n )\n # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark\n # it is a human-readable ID for a source\n siglum = models.CharField(\n max_length=63, \n null=True, \n blank=True,\n help_text=\"RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).\",\n )\n # the RISM siglum uniquely identifies a library or holding institution\n rism_siglum = models.ForeignKey(\n \"RismSiglum\", on_delete=models.PROTECT, null=True, blank=True,\n )\n provenance = models.ForeignKey(\n \"Provenance\",\n on_delete=models.PROTECT,\n help_text=\"If the origin is unknown, select a location where the source was \"\n \"used later in its lifetime and provide details in the \"\n '\"Provenance notes\" field.',\n null=True,\n blank=True,\n )\n provenance_notes = models.TextField(\n blank=True,\n null=True,\n help_text=\"More exact indication of the provenance (if necessary)\",\n )\n full_source = models.BooleanField(blank=True, null=True)\n date = models.CharField(\n blank=True,\n null=True,\n max_length=63,\n help_text='Date of the manuscript (e.g. \"1200s\", \"1300-1350\", etc.)',\n )\n century = models.ManyToManyField(\"Century\", related_name=\"sources\")\n notation = models.ManyToManyField(\"Notation\", related_name=\"sources\")\n cursus = models.CharField(\n blank=True, null=True, choices=cursus_choices, max_length=63\n )\n # TODO: Fill this field up with JSON info when I have access to the Users\n current_editors = models.ManyToManyField(get_user_model(), related_name=\"sources_user_can_edit\")\n inventoried_by = models.ManyToManyField(\n \"Indexer\", related_name=\"sources_inventoried\"\n )\n full_text_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_full_text_for_sources\"\n )\n melodies_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_melody_for_sources\"\n )\n proofreaders = models.ManyToManyField(\"Indexer\", related_name=\"proofread_sources\")\n other_editors = models.ManyToManyField(\"Indexer\", related_name=\"edited_sources\")\n segment = models.ForeignKey(\n \"Segment\", on_delete=models.PROTECT, blank=True, null=True\n )\n source_status = models.CharField(blank=True, null=True, max_length=255)\n complete_inventory = models.BooleanField(blank=True, null=True)\n summary = models.TextField(blank=True, null=True)\n liturgical_occasions = models.TextField(blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n selected_bibliography = models.TextField(blank=True, null=True)\n image_link = models.URLField(\n blank=True, \n null=True,\n help_text='HTTP link to the image gallery of the source.',\n )\n indexing_notes = models.TextField(blank=True, null=True)\n indexing_date = models.TextField(blank=True, null=True)\n json_info = models.JSONField(blank=True, null=True)\n fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)\n dact_id = models.CharField(max_length=15, blank=True, null=True)\n\n def number_of_chants(self) -> int:\n \"\"\"Returns the number of Chants and Sequences in this Source.\"\"\"\n return self.chant_set.count() + self.sequence_set.count()\n\n def number_of_melodies(self) -> int:\n \"\"\"Returns the number of Chants in this Source that have melodies.\"\"\"\n return self.chant_set.filter(volpiano__isnull=False).count()\n\n def __str__(self):\n string = '[{s}] {t} ({i})'.format(s=self.rism_siglum, t=self.title, i=self.id)\n return string\n\n def save(self, *args, **kwargs):\n # when creating a source, assign it to \"Cantus Database\" by default\n cantus_db_segment = Segment.objects.get(name=\"CANTUS Database\")\n self.segment = cantus_db_segment\n super().save(*args, **kwargs)\n", "path": "django/cantusdb_project/main_app/models/source.py"}]} | 1,792 | 166 |
gh_patches_debug_6027 | rasdani/github-patches | git_diff | twisted__twisted-12103 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Avoid encode/decode in chat.py for better readablity
As discussed in [this comment](https://github.com/twisted/twisted/pull/12070#discussion_r1442784443), it's better to use byte concat as mentioned in order of better readability in [docs/core/howto/listings/servers/chat.py:35](https://github.com/twisted/twisted/pull/12070/files/c59c93ec644a17e0f3a1752ca9ceca31a27a9f5e#diff-0923ff3db530a2e5d28ea8cc2b3a8f91f399792786772c541bf9edf7a0c50126)
```python
message = b'<' + self.name + b'> ' + message
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/core/howto/listings/servers/chat.py`
Content:
```
1 from twisted.internet import reactor
2 from twisted.internet.protocol import Factory
3 from twisted.protocols.basic import LineReceiver
4
5
6 class Chat(LineReceiver):
7 def __init__(self, users):
8 self.users = users
9 self.name = None
10 self.state = "GETNAME"
11
12 def connectionMade(self):
13 self.sendLine(b"What's your name?")
14
15 def connectionLost(self, reason):
16 if self.name in self.users:
17 del self.users[self.name]
18
19 def lineReceived(self, line):
20 if self.state == "GETNAME":
21 self.handle_GETNAME(line)
22 else:
23 self.handle_CHAT(line)
24
25 def handle_GETNAME(self, name):
26 if name in self.users:
27 self.sendLine(b"Name taken, please choose another.")
28 return
29 self.sendLine(f"Welcome, {name.decode('utf-8')}!".encode("utf-8"))
30 self.name = name
31 self.users[name] = self
32 self.state = "CHAT"
33
34 def handle_CHAT(self, message):
35 message = f"<{self.name.decode('utf-8')}> {message.decode('utf-8')}".encode(
36 "utf-8"
37 )
38 for name, protocol in self.users.items():
39 if protocol != self:
40 protocol.sendLine(message)
41
42
43 class ChatFactory(Factory):
44 def __init__(self):
45 self.users = {} # maps user names to Chat instances
46
47 def buildProtocol(self, addr):
48 return Chat(self.users)
49
50
51 reactor.listenTCP(8123, ChatFactory())
52 reactor.run()
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/core/howto/listings/servers/chat.py b/docs/core/howto/listings/servers/chat.py
--- a/docs/core/howto/listings/servers/chat.py
+++ b/docs/core/howto/listings/servers/chat.py
@@ -32,9 +32,7 @@
self.state = "CHAT"
def handle_CHAT(self, message):
- message = f"<{self.name.decode('utf-8')}> {message.decode('utf-8')}".encode(
- "utf-8"
- )
+ message = b"<" + self.name + b"> " + message
for name, protocol in self.users.items():
if protocol != self:
protocol.sendLine(message)
| {"golden_diff": "diff --git a/docs/core/howto/listings/servers/chat.py b/docs/core/howto/listings/servers/chat.py\n--- a/docs/core/howto/listings/servers/chat.py\n+++ b/docs/core/howto/listings/servers/chat.py\n@@ -32,9 +32,7 @@\n self.state = \"CHAT\"\n \n def handle_CHAT(self, message):\n- message = f\"<{self.name.decode('utf-8')}> {message.decode('utf-8')}\".encode(\n- \"utf-8\"\n- )\n+ message = b\"<\" + self.name + b\"> \" + message\n for name, protocol in self.users.items():\n if protocol != self:\n protocol.sendLine(message)\n", "issue": "Avoid encode/decode in chat.py for better readablity\nAs discussed in [this comment](https://github.com/twisted/twisted/pull/12070#discussion_r1442784443), it's better to use byte concat as mentioned in order of better readability in [docs/core/howto/listings/servers/chat.py:35](https://github.com/twisted/twisted/pull/12070/files/c59c93ec644a17e0f3a1752ca9ceca31a27a9f5e#diff-0923ff3db530a2e5d28ea8cc2b3a8f91f399792786772c541bf9edf7a0c50126)\r\n```python\r\nmessage = b'<' + self.name + b'> ' + message\r\n```\n", "before_files": [{"content": "from twisted.internet import reactor\nfrom twisted.internet.protocol import Factory\nfrom twisted.protocols.basic import LineReceiver\n\n\nclass Chat(LineReceiver):\n def __init__(self, users):\n self.users = users\n self.name = None\n self.state = \"GETNAME\"\n\n def connectionMade(self):\n self.sendLine(b\"What's your name?\")\n\n def connectionLost(self, reason):\n if self.name in self.users:\n del self.users[self.name]\n\n def lineReceived(self, line):\n if self.state == \"GETNAME\":\n self.handle_GETNAME(line)\n else:\n self.handle_CHAT(line)\n\n def handle_GETNAME(self, name):\n if name in self.users:\n self.sendLine(b\"Name taken, please choose another.\")\n return\n self.sendLine(f\"Welcome, {name.decode('utf-8')}!\".encode(\"utf-8\"))\n self.name = name\n self.users[name] = self\n self.state = \"CHAT\"\n\n def handle_CHAT(self, message):\n message = f\"<{self.name.decode('utf-8')}> {message.decode('utf-8')}\".encode(\n \"utf-8\"\n )\n for name, protocol in self.users.items():\n if protocol != self:\n protocol.sendLine(message)\n\n\nclass ChatFactory(Factory):\n def __init__(self):\n self.users = {} # maps user names to Chat instances\n\n def buildProtocol(self, addr):\n return Chat(self.users)\n\n\nreactor.listenTCP(8123, ChatFactory())\nreactor.run()\n", "path": "docs/core/howto/listings/servers/chat.py"}], "after_files": [{"content": "from twisted.internet import reactor\nfrom twisted.internet.protocol import Factory\nfrom twisted.protocols.basic import LineReceiver\n\n\nclass Chat(LineReceiver):\n def __init__(self, users):\n self.users = users\n self.name = None\n self.state = \"GETNAME\"\n\n def connectionMade(self):\n self.sendLine(b\"What's your name?\")\n\n def connectionLost(self, reason):\n if self.name in self.users:\n del self.users[self.name]\n\n def lineReceived(self, line):\n if self.state == \"GETNAME\":\n self.handle_GETNAME(line)\n else:\n self.handle_CHAT(line)\n\n def handle_GETNAME(self, name):\n if name in self.users:\n self.sendLine(b\"Name taken, please choose another.\")\n return\n self.sendLine(f\"Welcome, {name.decode('utf-8')}!\".encode(\"utf-8\"))\n self.name = name\n self.users[name] = self\n self.state = \"CHAT\"\n\n def handle_CHAT(self, message):\n message = b\"<\" + self.name + b\"> \" + message\n for name, protocol in self.users.items():\n if protocol != self:\n protocol.sendLine(message)\n\n\nclass ChatFactory(Factory):\n def __init__(self):\n self.users = {} # maps user names to Chat instances\n\n def buildProtocol(self, addr):\n return Chat(self.users)\n\n\nreactor.listenTCP(8123, ChatFactory())\nreactor.run()\n", "path": "docs/core/howto/listings/servers/chat.py"}]} | 916 | 156 |
gh_patches_debug_548 | rasdani/github-patches | git_diff | Gallopsled__pwntools-532 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bpython
Hi,
Unfortunately pwntools doesn't seem to work with bpython 0.12 in conjunction of python 2.7.9.
from pwn import *
results in:
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/pwn/**init**.py", line 2, in <module>
from .toplevel import *
File "/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py", line 2, in <module>
from pwnlib import *
File "/usr/local/lib/python2.7/dist-packages/pwnlib/**init**.py", line 10, in <module>
from . import \
File "/usr/local/lib/python2.7/dist-packages/pwnlib/asm.py", line 45, in <module>
from . import log
File "/usr/local/lib/python2.7/dist-packages/pwnlib/log.py", line 69, in <module>
from .term import spinners, text
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/**init**.py", line 1, in <module>
from . import key, readline, text, termcap, keymap, term
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py", line 2, in <module>
from . import term, text
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py", line 111, in <module>
sys.modules[**name**] = Module()
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py", line 22, in **init**
self.num_colors = termcap.get('colors', default = 8)
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py", line 15, in get
init()
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py", line 39, in init
curses.setupterm()
TypeError: argument must be an int, or have a fileno() method.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwnlib/term/termcap.py`
Content:
```
1 __all__ = ['get']
2 import os, curses
3
4 cache = None
5 def get(cap, *args, **kwargs):
6 default = kwargs.pop('default', '')
7
8 if 'PWNLIB_NOTERM' in os.environ:
9 return ''
10
11 if kwargs != {}:
12 raise TypeError("get(): No such argument %r" % kwargs.popitem()[0])
13
14 if cache == None:
15 init()
16 s = cache.get(cap)
17 if not s:
18 s = curses.tigetstr(cap)
19 if s == None:
20 s = curses.tigetnum(cap)
21 if s == -2:
22 s = curses.tigetflag(cap)
23 if s == -1:
24 # default to empty string so tparm doesn't fail
25 s = ''
26 else:
27 s = bool(s)
28 cache[cap] = s
29 # if `s' is not set `curses.tparm' will throw an error if given arguments
30 if args and s:
31 return curses.tparm(s, *args)
32 else:
33 return s
34
35 def init():
36 global cache
37
38 if 'PWNLIB_NOTERM' not in os.environ:
39 curses.setupterm()
40
41 cache = {}
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwnlib/term/termcap.py b/pwnlib/term/termcap.py
--- a/pwnlib/term/termcap.py
+++ b/pwnlib/term/termcap.py
@@ -36,6 +36,10 @@
global cache
if 'PWNLIB_NOTERM' not in os.environ:
- curses.setupterm()
+ # Fix for BPython
+ try:
+ curses.setupterm()
+ except:
+ pass
cache = {}
| {"golden_diff": "diff --git a/pwnlib/term/termcap.py b/pwnlib/term/termcap.py\n--- a/pwnlib/term/termcap.py\n+++ b/pwnlib/term/termcap.py\n@@ -36,6 +36,10 @@\n global cache\n \n if 'PWNLIB_NOTERM' not in os.environ:\n- curses.setupterm()\n+ # Fix for BPython\n+ try:\n+ curses.setupterm()\n+ except:\n+ pass\n \n cache = {}\n", "issue": "Bpython\nHi,\n\nUnfortunately pwntools doesn't seem to work with bpython 0.12 in conjunction of python 2.7.9.\n\nfrom pwn import *\n\nresults in:\n\nTraceback (most recent call last):\n File \"<input>\", line 1, in <module>\n File \"/usr/local/lib/python2.7/dist-packages/pwn/**init**.py\", line 2, in <module>\n from .toplevel import *\n File \"/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py\", line 2, in <module>\n from pwnlib import *\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/**init**.py\", line 10, in <module>\n from . import \\\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/asm.py\", line 45, in <module>\n from . import log\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/log.py\", line 69, in <module>\n from .term import spinners, text\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/**init**.py\", line 1, in <module>\n from . import key, readline, text, termcap, keymap, term\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py\", line 2, in <module>\n from . import term, text\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py\", line 111, in <module>\n sys.modules[**name**] = Module()\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py\", line 22, in **init**\n self.num_colors = termcap.get('colors', default = 8)\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py\", line 15, in get\n init()\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py\", line 39, in init\n curses.setupterm()\nTypeError: argument must be an int, or have a fileno() method.\n\n", "before_files": [{"content": "__all__ = ['get']\nimport os, curses\n\ncache = None\ndef get(cap, *args, **kwargs):\n default = kwargs.pop('default', '')\n\n if 'PWNLIB_NOTERM' in os.environ:\n return ''\n\n if kwargs != {}:\n raise TypeError(\"get(): No such argument %r\" % kwargs.popitem()[0])\n\n if cache == None:\n init()\n s = cache.get(cap)\n if not s:\n s = curses.tigetstr(cap)\n if s == None:\n s = curses.tigetnum(cap)\n if s == -2:\n s = curses.tigetflag(cap)\n if s == -1:\n # default to empty string so tparm doesn't fail\n s = ''\n else:\n s = bool(s)\n cache[cap] = s\n # if `s' is not set `curses.tparm' will throw an error if given arguments\n if args and s:\n return curses.tparm(s, *args)\n else:\n return s\n\ndef init():\n global cache\n\n if 'PWNLIB_NOTERM' not in os.environ:\n curses.setupterm()\n\n cache = {}\n", "path": "pwnlib/term/termcap.py"}], "after_files": [{"content": "__all__ = ['get']\nimport os, curses\n\ncache = None\ndef get(cap, *args, **kwargs):\n default = kwargs.pop('default', '')\n\n if 'PWNLIB_NOTERM' in os.environ:\n return ''\n\n if kwargs != {}:\n raise TypeError(\"get(): No such argument %r\" % kwargs.popitem()[0])\n\n if cache == None:\n init()\n s = cache.get(cap)\n if not s:\n s = curses.tigetstr(cap)\n if s == None:\n s = curses.tigetnum(cap)\n if s == -2:\n s = curses.tigetflag(cap)\n if s == -1:\n # default to empty string so tparm doesn't fail\n s = ''\n else:\n s = bool(s)\n cache[cap] = s\n # if `s' is not set `curses.tparm' will throw an error if given arguments\n if args and s:\n return curses.tparm(s, *args)\n else:\n return s\n\ndef init():\n global cache\n\n if 'PWNLIB_NOTERM' not in os.environ:\n # Fix for BPython\n try:\n curses.setupterm()\n except:\n pass\n\n cache = {}\n", "path": "pwnlib/term/termcap.py"}]} | 1,098 | 117 |
gh_patches_debug_1990 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2137 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in project document category API
## Test plan
The project_document_category should not give an error. E.g. `http://rsr.localdev.akvo.org/rest/v1/project_document_category/` should load.
## Issue description
The project document category API gives an error. See http://sentry.support.akvo-ops.org/rsr/test/group/879/, or on the Test server: http://rsr.test.akvo.org/rest/v1/project_document_category/.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/project_document.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import ProjectDocument, ProjectDocumentCategory
9
10 from ..serializers import ProjectDocumentSerializer, ProjectDocumentCategorySerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class ProjectDocumentViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = ProjectDocument.objects.all()
18 serializer_class = ProjectDocumentSerializer
19
20
21 class ProjectDocumentCategoryViewSet(PublicProjectViewSet):
22 """
23 """
24 queryset = ProjectDocumentCategory.objects.all()
25 serializer_class = ProjectDocumentCategorySerializer
26 filter_fields = ('document__project', 'document', 'category', )
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/project_document.py b/akvo/rest/views/project_document.py
--- a/akvo/rest/views/project_document.py
+++ b/akvo/rest/views/project_document.py
@@ -24,3 +24,4 @@
queryset = ProjectDocumentCategory.objects.all()
serializer_class = ProjectDocumentCategorySerializer
filter_fields = ('document__project', 'document', 'category', )
+ project_relation = 'document__project__'
| {"golden_diff": "diff --git a/akvo/rest/views/project_document.py b/akvo/rest/views/project_document.py\n--- a/akvo/rest/views/project_document.py\n+++ b/akvo/rest/views/project_document.py\n@@ -24,3 +24,4 @@\n queryset = ProjectDocumentCategory.objects.all()\n serializer_class = ProjectDocumentCategorySerializer\n filter_fields = ('document__project', 'document', 'category', )\n+ project_relation = 'document__project__'\n", "issue": "Bug in project document category API\n## Test plan\n\nThe project_document_category should not give an error. E.g. `http://rsr.localdev.akvo.org/rest/v1/project_document_category/` should load.\n## Issue description\n\nThe project document category API gives an error. See http://sentry.support.akvo-ops.org/rsr/test/group/879/, or on the Test server: http://rsr.test.akvo.org/rest/v1/project_document_category/.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import ProjectDocument, ProjectDocumentCategory\n\nfrom ..serializers import ProjectDocumentSerializer, ProjectDocumentCategorySerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass ProjectDocumentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectDocument.objects.all()\n serializer_class = ProjectDocumentSerializer\n\n\nclass ProjectDocumentCategoryViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectDocumentCategory.objects.all()\n serializer_class = ProjectDocumentCategorySerializer\n filter_fields = ('document__project', 'document', 'category', )\n", "path": "akvo/rest/views/project_document.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import ProjectDocument, ProjectDocumentCategory\n\nfrom ..serializers import ProjectDocumentSerializer, ProjectDocumentCategorySerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass ProjectDocumentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectDocument.objects.all()\n serializer_class = ProjectDocumentSerializer\n\n\nclass ProjectDocumentCategoryViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectDocumentCategory.objects.all()\n serializer_class = ProjectDocumentCategorySerializer\n filter_fields = ('document__project', 'document', 'category', )\n project_relation = 'document__project__'\n", "path": "akvo/rest/views/project_document.py"}]} | 595 | 101 |
gh_patches_debug_7593 | rasdani/github-patches | git_diff | python-pillow__Pillow-1230 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot identify XBM file created with filename including underscore
Pillow 2.8.1, Python 2.7.6 (Anaconda 2.2.0), Windows 7 64bit
When I create git_hub.xbm (with ImageMagick), created file's header contains lines like this.
``` C
#define git_hub_width 32
#define git_hub_height 32
```
In XbmImagePlugin.py, regular expression to extract XBM header doesn't match defined macro with more than two underscores like above.This causes an IOError.
``` python
# XBM header
xbm_head = re.compile(
b"\s*#define[ \t]+[^_]*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
b"#define[ \t]+[^_]*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
b"(?P<hotspot>"
b"#define[ \t]+[^_]*_x_hot[ \t]+(?P<xhot>[0-9]+)[\r\n]+"
b"#define[ \t]+[^_]*_y_hot[ \t]+(?P<yhot>[0-9]+)[\r\n]+"
b")?"
b"[\\000-\\377]*_bits\\[\\]"
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PIL/XbmImagePlugin.py`
Content:
```
1 #
2 # The Python Imaging Library.
3 # $Id$
4 #
5 # XBM File handling
6 #
7 # History:
8 # 1995-09-08 fl Created
9 # 1996-11-01 fl Added save support
10 # 1997-07-07 fl Made header parser more tolerant
11 # 1997-07-22 fl Fixed yet another parser bug
12 # 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
13 # 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog)
14 # 2004-02-24 fl Allow some whitespace before first #define
15 #
16 # Copyright (c) 1997-2004 by Secret Labs AB
17 # Copyright (c) 1996-1997 by Fredrik Lundh
18 #
19 # See the README file for information on usage and redistribution.
20 #
21
22 __version__ = "0.6"
23
24 import re
25 from PIL import Image, ImageFile
26
27 # XBM header
28 xbm_head = re.compile(
29 b"\s*#define[ \t]+[^_]*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
30 b"#define[ \t]+[^_]*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
31 b"(?P<hotspot>"
32 b"#define[ \t]+[^_]*_x_hot[ \t]+(?P<xhot>[0-9]+)[\r\n]+"
33 b"#define[ \t]+[^_]*_y_hot[ \t]+(?P<yhot>[0-9]+)[\r\n]+"
34 b")?"
35 b"[\\000-\\377]*_bits\\[\\]"
36 )
37
38
39 def _accept(prefix):
40 return prefix.lstrip()[:7] == b"#define"
41
42
43 ##
44 # Image plugin for X11 bitmaps.
45
46 class XbmImageFile(ImageFile.ImageFile):
47
48 format = "XBM"
49 format_description = "X11 Bitmap"
50
51 def _open(self):
52
53 m = xbm_head.match(self.fp.read(512))
54
55 if m:
56
57 xsize = int(m.group("width"))
58 ysize = int(m.group("height"))
59
60 if m.group("hotspot"):
61 self.info["hotspot"] = (
62 int(m.group("xhot")), int(m.group("yhot"))
63 )
64
65 self.mode = "1"
66 self.size = xsize, ysize
67
68 self.tile = [("xbm", (0, 0)+self.size, m.end(), None)]
69
70
71 def _save(im, fp, filename):
72
73 if im.mode != "1":
74 raise IOError("cannot write mode %s as XBM" % im.mode)
75
76 fp.write(("#define im_width %d\n" % im.size[0]).encode('ascii'))
77 fp.write(("#define im_height %d\n" % im.size[1]).encode('ascii'))
78
79 hotspot = im.encoderinfo.get("hotspot")
80 if hotspot:
81 fp.write(("#define im_x_hot %d\n" % hotspot[0]).encode('ascii'))
82 fp.write(("#define im_y_hot %d\n" % hotspot[1]).encode('ascii'))
83
84 fp.write(b"static char im_bits[] = {\n")
85
86 ImageFile._save(im, fp, [("xbm", (0, 0)+im.size, 0, None)])
87
88 fp.write(b"};\n")
89
90
91 Image.register_open("XBM", XbmImageFile, _accept)
92 Image.register_save("XBM", _save)
93
94 Image.register_extension("XBM", ".xbm")
95
96 Image.register_mime("XBM", "image/xbm")
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PIL/XbmImagePlugin.py b/PIL/XbmImagePlugin.py
--- a/PIL/XbmImagePlugin.py
+++ b/PIL/XbmImagePlugin.py
@@ -26,8 +26,8 @@
# XBM header
xbm_head = re.compile(
- b"\s*#define[ \t]+[^_]*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
- b"#define[ \t]+[^_]*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
+ b"\s*#define[ \t]+.*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
+ b"#define[ \t]+.*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
b"(?P<hotspot>"
b"#define[ \t]+[^_]*_x_hot[ \t]+(?P<xhot>[0-9]+)[\r\n]+"
b"#define[ \t]+[^_]*_y_hot[ \t]+(?P<yhot>[0-9]+)[\r\n]+"
| {"golden_diff": "diff --git a/PIL/XbmImagePlugin.py b/PIL/XbmImagePlugin.py\n--- a/PIL/XbmImagePlugin.py\n+++ b/PIL/XbmImagePlugin.py\n@@ -26,8 +26,8 @@\n \n # XBM header\n xbm_head = re.compile(\n- b\"\\s*#define[ \\t]+[^_]*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n- b\"#define[ \\t]+[^_]*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n+ b\"\\s*#define[ \\t]+.*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n+ b\"#define[ \\t]+.*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n b\"(?P<hotspot>\"\n b\"#define[ \\t]+[^_]*_x_hot[ \\t]+(?P<xhot>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_y_hot[ \\t]+(?P<yhot>[0-9]+)[\\r\\n]+\"\n", "issue": "Cannot identify XBM file created with filename including underscore\nPillow 2.8.1, Python 2.7.6 (Anaconda 2.2.0), Windows 7 64bit\n\nWhen I create git_hub.xbm (with ImageMagick), created file's header contains lines like this.\n\n``` C\n#define git_hub_width 32\n#define git_hub_height 32\n```\n\nIn XbmImagePlugin.py, regular expression to extract XBM header doesn't match defined macro with more than two underscores like above.This causes an IOError.\n\n``` python\n# XBM header\nxbm_head = re.compile(\n b\"\\s*#define[ \\t]+[^_]*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n b\"(?P<hotspot>\"\n b\"#define[ \\t]+[^_]*_x_hot[ \\t]+(?P<xhot>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_y_hot[ \\t]+(?P<yhot>[0-9]+)[\\r\\n]+\"\n b\")?\"\n b\"[\\\\000-\\\\377]*_bits\\\\[\\\\]\"\n)\n```\n\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# XBM File handling\n#\n# History:\n# 1995-09-08 fl Created\n# 1996-11-01 fl Added save support\n# 1997-07-07 fl Made header parser more tolerant\n# 1997-07-22 fl Fixed yet another parser bug\n# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)\n# 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog)\n# 2004-02-24 fl Allow some whitespace before first #define\n#\n# Copyright (c) 1997-2004 by Secret Labs AB\n# Copyright (c) 1996-1997 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\n__version__ = \"0.6\"\n\nimport re\nfrom PIL import Image, ImageFile\n\n# XBM header\nxbm_head = re.compile(\n b\"\\s*#define[ \\t]+[^_]*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n b\"(?P<hotspot>\"\n b\"#define[ \\t]+[^_]*_x_hot[ \\t]+(?P<xhot>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_y_hot[ \\t]+(?P<yhot>[0-9]+)[\\r\\n]+\"\n b\")?\"\n b\"[\\\\000-\\\\377]*_bits\\\\[\\\\]\"\n)\n\n\ndef _accept(prefix):\n return prefix.lstrip()[:7] == b\"#define\"\n\n\n##\n# Image plugin for X11 bitmaps.\n\nclass XbmImageFile(ImageFile.ImageFile):\n\n format = \"XBM\"\n format_description = \"X11 Bitmap\"\n\n def _open(self):\n\n m = xbm_head.match(self.fp.read(512))\n\n if m:\n\n xsize = int(m.group(\"width\"))\n ysize = int(m.group(\"height\"))\n\n if m.group(\"hotspot\"):\n self.info[\"hotspot\"] = (\n int(m.group(\"xhot\")), int(m.group(\"yhot\"))\n )\n\n self.mode = \"1\"\n self.size = xsize, ysize\n\n self.tile = [(\"xbm\", (0, 0)+self.size, m.end(), None)]\n\n\ndef _save(im, fp, filename):\n\n if im.mode != \"1\":\n raise IOError(\"cannot write mode %s as XBM\" % im.mode)\n\n fp.write((\"#define im_width %d\\n\" % im.size[0]).encode('ascii'))\n fp.write((\"#define im_height %d\\n\" % im.size[1]).encode('ascii'))\n\n hotspot = im.encoderinfo.get(\"hotspot\")\n if hotspot:\n fp.write((\"#define im_x_hot %d\\n\" % hotspot[0]).encode('ascii'))\n fp.write((\"#define im_y_hot %d\\n\" % hotspot[1]).encode('ascii'))\n\n fp.write(b\"static char im_bits[] = {\\n\")\n\n ImageFile._save(im, fp, [(\"xbm\", (0, 0)+im.size, 0, None)])\n\n fp.write(b\"};\\n\")\n\n\nImage.register_open(\"XBM\", XbmImageFile, _accept)\nImage.register_save(\"XBM\", _save)\n\nImage.register_extension(\"XBM\", \".xbm\")\n\nImage.register_mime(\"XBM\", \"image/xbm\")\n", "path": "PIL/XbmImagePlugin.py"}], "after_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# XBM File handling\n#\n# History:\n# 1995-09-08 fl Created\n# 1996-11-01 fl Added save support\n# 1997-07-07 fl Made header parser more tolerant\n# 1997-07-22 fl Fixed yet another parser bug\n# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)\n# 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog)\n# 2004-02-24 fl Allow some whitespace before first #define\n#\n# Copyright (c) 1997-2004 by Secret Labs AB\n# Copyright (c) 1996-1997 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\n__version__ = \"0.6\"\n\nimport re\nfrom PIL import Image, ImageFile\n\n# XBM header\nxbm_head = re.compile(\n b\"\\s*#define[ \\t]+.*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+.*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n b\"(?P<hotspot>\"\n b\"#define[ \\t]+[^_]*_x_hot[ \\t]+(?P<xhot>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_y_hot[ \\t]+(?P<yhot>[0-9]+)[\\r\\n]+\"\n b\")?\"\n b\"[\\\\000-\\\\377]*_bits\\\\[\\\\]\"\n)\n\n\ndef _accept(prefix):\n return prefix.lstrip()[:7] == b\"#define\"\n\n\n##\n# Image plugin for X11 bitmaps.\n\nclass XbmImageFile(ImageFile.ImageFile):\n\n format = \"XBM\"\n format_description = \"X11 Bitmap\"\n\n def _open(self):\n\n m = xbm_head.match(self.fp.read(512))\n\n if m:\n\n xsize = int(m.group(\"width\"))\n ysize = int(m.group(\"height\"))\n\n if m.group(\"hotspot\"):\n self.info[\"hotspot\"] = (\n int(m.group(\"xhot\")), int(m.group(\"yhot\"))\n )\n\n self.mode = \"1\"\n self.size = xsize, ysize\n\n self.tile = [(\"xbm\", (0, 0)+self.size, m.end(), None)]\n\n\ndef _save(im, fp, filename):\n\n if im.mode != \"1\":\n raise IOError(\"cannot write mode %s as XBM\" % im.mode)\n\n fp.write((\"#define im_width %d\\n\" % im.size[0]).encode('ascii'))\n fp.write((\"#define im_height %d\\n\" % im.size[1]).encode('ascii'))\n\n hotspot = im.encoderinfo.get(\"hotspot\")\n if hotspot:\n fp.write((\"#define im_x_hot %d\\n\" % hotspot[0]).encode('ascii'))\n fp.write((\"#define im_y_hot %d\\n\" % hotspot[1]).encode('ascii'))\n\n fp.write(b\"static char im_bits[] = {\\n\")\n\n ImageFile._save(im, fp, [(\"xbm\", (0, 0)+im.size, 0, None)])\n\n fp.write(b\"};\\n\")\n\n\nImage.register_open(\"XBM\", XbmImageFile, _accept)\nImage.register_save(\"XBM\", _save)\n\nImage.register_extension(\"XBM\", \".xbm\")\n\nImage.register_mime(\"XBM\", \"image/xbm\")\n", "path": "PIL/XbmImagePlugin.py"}]} | 1,620 | 274 |
gh_patches_debug_28793 | rasdani/github-patches | git_diff | PaddlePaddle__models-123 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Modify inference script
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hsigmoid/infer.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 import os
4 import logging
5 import gzip
6
7 import paddle.v2 as paddle
8 from network_conf import ngram_lm
9
10 logger = logging.getLogger("paddle")
11 logger.setLevel(logging.WARNING)
12
13
14 def decode_res(infer_res, dict_size):
15 """
16 Inferring probabilities are orginized as a complete binary tree.
17 The actual labels are leaves (indices are counted from class number).
18 This function travels paths decoded from inferring results.
19 If the probability >0.5 then go to right child, otherwise go to left child.
20
21 param infer_res: inferring result
22 param dict_size: class number
23 return predict_lbls: actual class
24 """
25 predict_lbls = []
26 infer_res = infer_res > 0.5
27 for i, probs in enumerate(infer_res):
28 idx = 0
29 result = 1
30 while idx < len(probs):
31 result <<= 1
32 if probs[idx]:
33 result |= 1
34 if probs[idx]:
35 idx = idx * 2 + 2 # right child
36 else:
37 idx = idx * 2 + 1 # left child
38
39 predict_lbl = result - dict_size
40 predict_lbls.append(predict_lbl)
41 return predict_lbls
42
43
44 def predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):
45 infer_res = paddle.infer(
46 output_layer=prediction_layer, parameters=parameters, input=batch_ins)
47
48 predict_lbls = decode_res(infer_res, dict_size)
49 predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word
50
51 # Ouput format: word1 word2 word3 word4 -> predict label
52 for i, ins in enumerate(batch_ins):
53 print(" ".join([idx_word_dict[w]
54 for w in ins]) + " -> " + predict_words[i])
55
56
57 def main(model_path):
58 assert os.path.exists(model_path), "trained model does not exist."
59
60 paddle.init(use_gpu=False, trainer_count=1)
61 word_dict = paddle.dataset.imikolov.build_dict(min_word_freq=2)
62 dict_size = len(word_dict)
63 prediction_layer = ngram_lm(
64 is_train=False, hidden_size=256, embed_size=32, dict_size=dict_size)
65
66 with gzip.open(model_path, "r") as f:
67 parameters = paddle.parameters.Parameters.from_tar(f)
68
69 idx_word_dict = dict((v, k) for k, v in word_dict.items())
70 batch_size = 64
71 batch_ins = []
72 ins_iter = paddle.dataset.imikolov.test(word_dict, 5)
73
74 for ins in ins_iter():
75 batch_ins.append(ins[:-1])
76 if len(batch_ins) == batch_size:
77 predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
78 parameters)
79 batch_ins = []
80
81 if len(batch_ins) > 0:
82 predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
83 parameters)
84
85
86 if __name__ == "__main__":
87 main("models/hsigmoid_batch_00010.tar.gz")
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hsigmoid/infer.py b/hsigmoid/infer.py
--- a/hsigmoid/infer.py
+++ b/hsigmoid/infer.py
@@ -41,9 +41,8 @@
return predict_lbls
-def predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):
- infer_res = paddle.infer(
- output_layer=prediction_layer, parameters=parameters, input=batch_ins)
+def predict(batch_ins, idx_word_dict, dict_size, inferer):
+ infer_res = inferer.infer(input=batch_ins)
predict_lbls = decode_res(infer_res, dict_size)
predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word
@@ -66,6 +65,8 @@
with gzip.open(model_path, "r") as f:
parameters = paddle.parameters.Parameters.from_tar(f)
+ inferer = paddle.inference.Inference(
+ output_layer=prediction_layer, parameters=parameters)
idx_word_dict = dict((v, k) for k, v in word_dict.items())
batch_size = 64
batch_ins = []
@@ -74,13 +75,11 @@
for ins in ins_iter():
batch_ins.append(ins[:-1])
if len(batch_ins) == batch_size:
- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
- parameters)
+ predict(batch_ins, idx_word_dict, dict_size, inferer)
batch_ins = []
if len(batch_ins) > 0:
- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
- parameters)
+ predict(batch_ins, idx_word_dict, dict_size, inferer)
if __name__ == "__main__":
| {"golden_diff": "diff --git a/hsigmoid/infer.py b/hsigmoid/infer.py\n--- a/hsigmoid/infer.py\n+++ b/hsigmoid/infer.py\n@@ -41,9 +41,8 @@\n return predict_lbls\n \n \n-def predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):\n- infer_res = paddle.infer(\n- output_layer=prediction_layer, parameters=parameters, input=batch_ins)\n+def predict(batch_ins, idx_word_dict, dict_size, inferer):\n+ infer_res = inferer.infer(input=batch_ins)\n \n predict_lbls = decode_res(infer_res, dict_size)\n predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word\n@@ -66,6 +65,8 @@\n with gzip.open(model_path, \"r\") as f:\n parameters = paddle.parameters.Parameters.from_tar(f)\n \n+ inferer = paddle.inference.Inference(\n+ output_layer=prediction_layer, parameters=parameters)\n idx_word_dict = dict((v, k) for k, v in word_dict.items())\n batch_size = 64\n batch_ins = []\n@@ -74,13 +75,11 @@\n for ins in ins_iter():\n batch_ins.append(ins[:-1])\n if len(batch_ins) == batch_size:\n- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n- parameters)\n+ predict(batch_ins, idx_word_dict, dict_size, inferer)\n batch_ins = []\n \n if len(batch_ins) > 0:\n- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n- parameters)\n+ predict(batch_ins, idx_word_dict, dict_size, inferer)\n \n \n if __name__ == \"__main__\":\n", "issue": "Modify inference script\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport logging\nimport gzip\n\nimport paddle.v2 as paddle\nfrom network_conf import ngram_lm\n\nlogger = logging.getLogger(\"paddle\")\nlogger.setLevel(logging.WARNING)\n\n\ndef decode_res(infer_res, dict_size):\n \"\"\"\n Inferring probabilities are orginized as a complete binary tree.\n The actual labels are leaves (indices are counted from class number).\n This function travels paths decoded from inferring results.\n If the probability >0.5 then go to right child, otherwise go to left child.\n\n param infer_res: inferring result\n param dict_size: class number\n return predict_lbls: actual class\n \"\"\"\n predict_lbls = []\n infer_res = infer_res > 0.5\n for i, probs in enumerate(infer_res):\n idx = 0\n result = 1\n while idx < len(probs):\n result <<= 1\n if probs[idx]:\n result |= 1\n if probs[idx]:\n idx = idx * 2 + 2 # right child\n else:\n idx = idx * 2 + 1 # left child\n\n predict_lbl = result - dict_size\n predict_lbls.append(predict_lbl)\n return predict_lbls\n\n\ndef predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):\n infer_res = paddle.infer(\n output_layer=prediction_layer, parameters=parameters, input=batch_ins)\n\n predict_lbls = decode_res(infer_res, dict_size)\n predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word\n\n # Ouput format: word1 word2 word3 word4 -> predict label\n for i, ins in enumerate(batch_ins):\n print(\" \".join([idx_word_dict[w]\n for w in ins]) + \" -> \" + predict_words[i])\n\n\ndef main(model_path):\n assert os.path.exists(model_path), \"trained model does not exist.\"\n\n paddle.init(use_gpu=False, trainer_count=1)\n word_dict = paddle.dataset.imikolov.build_dict(min_word_freq=2)\n dict_size = len(word_dict)\n prediction_layer = ngram_lm(\n is_train=False, hidden_size=256, embed_size=32, dict_size=dict_size)\n\n with gzip.open(model_path, \"r\") as f:\n parameters = paddle.parameters.Parameters.from_tar(f)\n\n idx_word_dict = dict((v, k) for k, v in word_dict.items())\n batch_size = 64\n batch_ins = []\n ins_iter = paddle.dataset.imikolov.test(word_dict, 5)\n\n for ins in ins_iter():\n batch_ins.append(ins[:-1])\n if len(batch_ins) == batch_size:\n predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n parameters)\n batch_ins = []\n\n if len(batch_ins) > 0:\n predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n parameters)\n\n\nif __name__ == \"__main__\":\n main(\"models/hsigmoid_batch_00010.tar.gz\")\n", "path": "hsigmoid/infer.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport logging\nimport gzip\n\nimport paddle.v2 as paddle\nfrom network_conf import ngram_lm\n\nlogger = logging.getLogger(\"paddle\")\nlogger.setLevel(logging.WARNING)\n\n\ndef decode_res(infer_res, dict_size):\n \"\"\"\n Inferring probabilities are orginized as a complete binary tree.\n The actual labels are leaves (indices are counted from class number).\n This function travels paths decoded from inferring results.\n If the probability >0.5 then go to right child, otherwise go to left child.\n\n param infer_res: inferring result\n param dict_size: class number\n return predict_lbls: actual class\n \"\"\"\n predict_lbls = []\n infer_res = infer_res > 0.5\n for i, probs in enumerate(infer_res):\n idx = 0\n result = 1\n while idx < len(probs):\n result <<= 1\n if probs[idx]:\n result |= 1\n if probs[idx]:\n idx = idx * 2 + 2 # right child\n else:\n idx = idx * 2 + 1 # left child\n\n predict_lbl = result - dict_size\n predict_lbls.append(predict_lbl)\n return predict_lbls\n\n\ndef predict(batch_ins, idx_word_dict, dict_size, inferer):\n infer_res = inferer.infer(input=batch_ins)\n\n predict_lbls = decode_res(infer_res, dict_size)\n predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word\n\n # Ouput format: word1 word2 word3 word4 -> predict label\n for i, ins in enumerate(batch_ins):\n print(\" \".join([idx_word_dict[w]\n for w in ins]) + \" -> \" + predict_words[i])\n\n\ndef main(model_path):\n assert os.path.exists(model_path), \"trained model does not exist.\"\n\n paddle.init(use_gpu=False, trainer_count=1)\n word_dict = paddle.dataset.imikolov.build_dict(min_word_freq=2)\n dict_size = len(word_dict)\n prediction_layer = ngram_lm(\n is_train=False, hidden_size=256, embed_size=32, dict_size=dict_size)\n\n with gzip.open(model_path, \"r\") as f:\n parameters = paddle.parameters.Parameters.from_tar(f)\n\n inferer = paddle.inference.Inference(\n output_layer=prediction_layer, parameters=parameters)\n idx_word_dict = dict((v, k) for k, v in word_dict.items())\n batch_size = 64\n batch_ins = []\n ins_iter = paddle.dataset.imikolov.test(word_dict, 5)\n\n for ins in ins_iter():\n batch_ins.append(ins[:-1])\n if len(batch_ins) == batch_size:\n predict(batch_ins, idx_word_dict, dict_size, inferer)\n batch_ins = []\n\n if len(batch_ins) > 0:\n predict(batch_ins, idx_word_dict, dict_size, inferer)\n\n\nif __name__ == \"__main__\":\n main(\"models/hsigmoid_batch_00010.tar.gz\")\n", "path": "hsigmoid/infer.py"}]} | 1,124 | 395 |
gh_patches_debug_14264 | rasdani/github-patches | git_diff | CTFd__CTFd-1233 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Theme reset from CLI
There needs to be a way to reset the theme and other kinds of unrecoverable errors from the CLI.
Context: https://community.majorleaguecyber.org/t/ctfd-templatenotfound/51
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manage.py`
Content:
```
1 from flask import Flask
2 from flask_sqlalchemy import SQLAlchemy
3 from flask_script import Manager
4 from flask_migrate import Migrate, MigrateCommand
5 from CTFd import create_app
6 from CTFd.models import *
7
8 app = create_app()
9
10 manager = Manager(app)
11 manager.add_command('db', MigrateCommand)
12
13 if __name__ == '__main__':
14 manager.run()
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manage.py b/manage.py
--- a/manage.py
+++ b/manage.py
@@ -3,6 +3,7 @@
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from CTFd import create_app
+from CTFd.utils import get_config as get_config_util, set_config as set_config_util
from CTFd.models import *
app = create_app()
@@ -10,5 +11,18 @@
manager = Manager(app)
manager.add_command('db', MigrateCommand)
-if __name__ == '__main__':
+
[email protected]
+def get_config(key):
+ with app.app_context():
+ print(get_config_util(key))
+
+
[email protected]
+def set_config(key, value):
+ with app.app_context():
+ print(set_config_util(key, value).value)
+
+
+if __name__ == "__main__":
manager.run()
| {"golden_diff": "diff --git a/manage.py b/manage.py\n--- a/manage.py\n+++ b/manage.py\n@@ -3,6 +3,7 @@\n from flask_script import Manager\n from flask_migrate import Migrate, MigrateCommand\n from CTFd import create_app\n+from CTFd.utils import get_config as get_config_util, set_config as set_config_util\n from CTFd.models import *\n \n app = create_app()\n@@ -10,5 +11,18 @@\n manager = Manager(app)\n manager.add_command('db', MigrateCommand)\n \n-if __name__ == '__main__':\n+\[email protected]\n+def get_config(key):\n+ with app.app_context():\n+ print(get_config_util(key))\n+\n+\[email protected]\n+def set_config(key, value):\n+ with app.app_context():\n+ print(set_config_util(key, value).value)\n+\n+\n+if __name__ == \"__main__\":\n manager.run()\n", "issue": "Theme reset from CLI\nThere needs to be a way to reset the theme and other kinds of unrecoverable errors from the CLI. \r\n\r\nContext: https://community.majorleaguecyber.org/t/ctfd-templatenotfound/51\n", "before_files": [{"content": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom CTFd import create_app\nfrom CTFd.models import *\n\napp = create_app()\n\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\n\nif __name__ == '__main__':\n manager.run()\n", "path": "manage.py"}], "after_files": [{"content": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom CTFd import create_app\nfrom CTFd.utils import get_config as get_config_util, set_config as set_config_util\nfrom CTFd.models import *\n\napp = create_app()\n\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\n\n\[email protected]\ndef get_config(key):\n with app.app_context():\n print(get_config_util(key))\n\n\[email protected]\ndef set_config(key, value):\n with app.app_context():\n print(set_config_util(key, value).value)\n\n\nif __name__ == \"__main__\":\n manager.run()\n", "path": "manage.py"}]} | 408 | 203 |
gh_patches_debug_40764 | rasdani/github-patches | git_diff | svthalia__concrexit-3115 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Members API doesn't list all members
### Describe the bug
Luko, pk=23 on staging, is listed as member on the website, but is not a member according to the API
### How to reproduce
No idea.
check https://staging.thalia.nu/members/profile/23 vs https://staging.thalia.nu/api/v2/members/23/
### Expected behaviour
These urls should agree on the memberness of Luko
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/members/api/v2/views.py`
Content:
```
1 """API views of the activemembers app."""
2
3 from django.shortcuts import get_object_or_404
4
5 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
6 from rest_framework import filters as framework_filters
7 from rest_framework.generics import ListAPIView, RetrieveAPIView, UpdateAPIView
8
9 from members.api.v2 import filters
10 from members.api.v2.serializers.member import (
11 MemberCurrentSerializer,
12 MemberListSerializer,
13 MemberSerializer,
14 )
15 from members.models import Member
16 from thaliawebsite.api.openapi import OAuthAutoSchema
17 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod
18 from utils.media.services import fetch_thumbnails_db
19
20
21 class MemberListView(ListAPIView):
22 """Returns an overview of all members."""
23
24 serializer_class = MemberListSerializer
25 queryset = (
26 Member.current_members.all()
27 .select_related("profile")
28 .prefetch_related("membership_set")
29 )
30
31 def get_serializer(self, *args, **kwargs):
32 if len(args) > 0:
33 members = args[0]
34 fetch_thumbnails_db([member.profile.photo for member in members])
35 return super().get_serializer(*args, **kwargs)
36
37 permission_classes = [
38 IsAuthenticatedOrTokenHasScope,
39 ]
40 required_scopes = ["members:read"]
41 filter_backends = (
42 framework_filters.OrderingFilter,
43 framework_filters.SearchFilter,
44 filters.MembershipTypeFilter,
45 filters.StartingYearFilter,
46 )
47 ordering_fields = ("first_name", "last_name", "username")
48 search_fields = (
49 "profile__nickname",
50 "profile__starting_year",
51 "first_name",
52 "last_name",
53 "username",
54 )
55
56
57 class MemberDetailView(RetrieveAPIView):
58 """Returns details of a member."""
59
60 serializer_class = MemberSerializer
61 queryset = Member.current_members.all()
62 permission_classes = [
63 IsAuthenticatedOrTokenHasScope,
64 ]
65 required_scopes = ["members:read"]
66
67
68 class MemberCurrentView(MemberDetailView, UpdateAPIView):
69 """Returns details of the authenticated member."""
70
71 serializer_class = MemberCurrentSerializer
72 schema = OAuthAutoSchema(operation_id_base="CurrentMember")
73 permission_classes = [
74 IsAuthenticatedOrTokenHasScopeForMethod,
75 ]
76 required_scopes_per_method = {
77 "GET": ["profile:read"],
78 "PATCH": ["profile:write"],
79 "PUT": ["profile:write"],
80 }
81
82 def get_object(self):
83 return get_object_or_404(Member, pk=self.request.user.pk)
84
```
Path: `website/members/api/v2/filters.py`
Content:
```
1 from rest_framework import filters
2
3 from members.models import Membership
4
5
6 class StartingYearFilter(filters.BaseFilterBackend):
7 """Allows you to filter by starting year."""
8
9 def filter_queryset(self, request, queryset, view):
10 starting_year = request.query_params.get("starting_year", None)
11
12 if starting_year:
13 queryset = queryset.filter(profile__starting_year=starting_year)
14
15 return queryset
16
17 def get_schema_operation_parameters(self, view):
18 return [
19 {
20 "name": "starting_year",
21 "required": False,
22 "in": "query",
23 "description": "Filter by starting year",
24 "schema": {
25 "type": "number",
26 },
27 }
28 ]
29
30
31 class MembershipTypeFilter(filters.BaseFilterBackend):
32 """Allows you to filter by membership type."""
33
34 def filter_queryset(self, request, queryset, view):
35 membership_type = request.query_params.get("membership_type", None)
36
37 if membership_type:
38 memberships = Membership.objects.filter(type=membership_type)
39 queryset = queryset.filter(pk__in=memberships.values("user__pk"))
40
41 return queryset
42
43 def get_schema_operation_parameters(self, view):
44 return [
45 {
46 "name": "membership_type",
47 "required": False,
48 "in": "query",
49 "description": "Filter by membership type",
50 "schema": {
51 "type": "string",
52 },
53 }
54 ]
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/members/api/v2/filters.py b/website/members/api/v2/filters.py
--- a/website/members/api/v2/filters.py
+++ b/website/members/api/v2/filters.py
@@ -1,4 +1,9 @@
-from rest_framework import filters
+from datetime import datetime
+
+from django.db.models import Q
+from django.utils import timezone
+
+from rest_framework import filters, serializers
from members.models import Membership
@@ -28,6 +33,62 @@
]
+class FormerMemberFilter(filters.BaseFilterBackend):
+ def filter_queryset(self, request, queryset, view):
+ former = request.query_params.get("former", "false")
+
+ if former == "false":
+ # Filter out former members
+ return (
+ queryset.exclude(membership=None)
+ .filter(
+ Q(membership__until__isnull=True)
+ | Q(membership__until__gt=timezone.now().date())
+ )
+ .distinct()
+ )
+ elif former == "true":
+ # Filter out current members
+
+ memberships_query = Q(until__gt=datetime.now()) | Q(until=None)
+ members_query = ~Q(id=None)
+
+ # Filter out all current active memberships
+ memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
+ memberships = Membership.objects.filter(memberships_query)
+ members_query &= ~Q(pk__in=memberships.values("user__pk"))
+
+ memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
+ memberships = Membership.objects.filter(memberships_query)
+ all_memberships = Membership.objects.all()
+ # Only keep members that were once members, or are legacy users
+ # that do not have any memberships at all
+ members_query &= Q(pk__in=memberships.values("user__pk")) | ~Q(
+ pk__in=all_memberships.values("user__pk")
+ )
+
+ return queryset.filter(members_query)
+ elif former == "any":
+ # Include both former and current members
+ return queryset
+ else:
+ raise serializers.ValidationError("invalid former parameter")
+
+ def get_schema_operation_parameters(self, view):
+ return [
+ {
+ "name": "former",
+ "required": False,
+ "in": "query",
+ "description": "Include former members or only former members",
+ "schema": {
+ "type": "string",
+ "enum": ["true", "false", "any"],
+ },
+ }
+ ]
+
+
class MembershipTypeFilter(filters.BaseFilterBackend):
"""Allows you to filter by membership type."""
diff --git a/website/members/api/v2/views.py b/website/members/api/v2/views.py
--- a/website/members/api/v2/views.py
+++ b/website/members/api/v2/views.py
@@ -23,7 +23,7 @@
serializer_class = MemberListSerializer
queryset = (
- Member.current_members.all()
+ Member.objects.all()
.select_related("profile")
.prefetch_related("membership_set")
)
@@ -43,6 +43,7 @@
framework_filters.SearchFilter,
filters.MembershipTypeFilter,
filters.StartingYearFilter,
+ filters.FormerMemberFilter,
)
ordering_fields = ("first_name", "last_name", "username")
search_fields = (
@@ -58,7 +59,7 @@
"""Returns details of a member."""
serializer_class = MemberSerializer
- queryset = Member.current_members.all()
+ queryset = Member.objects.all()
permission_classes = [
IsAuthenticatedOrTokenHasScope,
]
| {"golden_diff": "diff --git a/website/members/api/v2/filters.py b/website/members/api/v2/filters.py\n--- a/website/members/api/v2/filters.py\n+++ b/website/members/api/v2/filters.py\n@@ -1,4 +1,9 @@\n-from rest_framework import filters\n+from datetime import datetime\n+\n+from django.db.models import Q\n+from django.utils import timezone\n+\n+from rest_framework import filters, serializers\n \n from members.models import Membership\n \n@@ -28,6 +33,62 @@\n ]\n \n \n+class FormerMemberFilter(filters.BaseFilterBackend):\n+ def filter_queryset(self, request, queryset, view):\n+ former = request.query_params.get(\"former\", \"false\")\n+\n+ if former == \"false\":\n+ # Filter out former members\n+ return (\n+ queryset.exclude(membership=None)\n+ .filter(\n+ Q(membership__until__isnull=True)\n+ | Q(membership__until__gt=timezone.now().date())\n+ )\n+ .distinct()\n+ )\n+ elif former == \"true\":\n+ # Filter out current members\n+\n+ memberships_query = Q(until__gt=datetime.now()) | Q(until=None)\n+ members_query = ~Q(id=None)\n+\n+ # Filter out all current active memberships\n+ memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n+ memberships = Membership.objects.filter(memberships_query)\n+ members_query &= ~Q(pk__in=memberships.values(\"user__pk\"))\n+\n+ memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n+ memberships = Membership.objects.filter(memberships_query)\n+ all_memberships = Membership.objects.all()\n+ # Only keep members that were once members, or are legacy users\n+ # that do not have any memberships at all\n+ members_query &= Q(pk__in=memberships.values(\"user__pk\")) | ~Q(\n+ pk__in=all_memberships.values(\"user__pk\")\n+ )\n+\n+ return queryset.filter(members_query)\n+ elif former == \"any\":\n+ # Include both former and current members\n+ return queryset\n+ else:\n+ raise serializers.ValidationError(\"invalid former parameter\")\n+\n+ def get_schema_operation_parameters(self, view):\n+ return [\n+ {\n+ \"name\": \"former\",\n+ \"required\": False,\n+ \"in\": \"query\",\n+ \"description\": \"Include former members or only former members\",\n+ \"schema\": {\n+ \"type\": \"string\",\n+ \"enum\": [\"true\", \"false\", \"any\"],\n+ },\n+ }\n+ ]\n+\n+\n class MembershipTypeFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by membership type.\"\"\"\n \ndiff --git a/website/members/api/v2/views.py b/website/members/api/v2/views.py\n--- a/website/members/api/v2/views.py\n+++ b/website/members/api/v2/views.py\n@@ -23,7 +23,7 @@\n \n serializer_class = MemberListSerializer\n queryset = (\n- Member.current_members.all()\n+ Member.objects.all()\n .select_related(\"profile\")\n .prefetch_related(\"membership_set\")\n )\n@@ -43,6 +43,7 @@\n framework_filters.SearchFilter,\n filters.MembershipTypeFilter,\n filters.StartingYearFilter,\n+ filters.FormerMemberFilter,\n )\n ordering_fields = (\"first_name\", \"last_name\", \"username\")\n search_fields = (\n@@ -58,7 +59,7 @@\n \"\"\"Returns details of a member.\"\"\"\n \n serializer_class = MemberSerializer\n- queryset = Member.current_members.all()\n+ queryset = Member.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n", "issue": "Members API doesn't list all members\n### Describe the bug\r\nLuko, pk=23 on staging, is listed as member on the website, but is not a member according to the API\r\n\r\n### How to reproduce\r\nNo idea.\r\ncheck https://staging.thalia.nu/members/profile/23 vs https://staging.thalia.nu/api/v2/members/23/\r\n\r\n### Expected behaviour\r\nThese urls should agree on the memberness of Luko\r\n\r\n\n", "before_files": [{"content": "\"\"\"API views of the activemembers app.\"\"\"\n\nfrom django.shortcuts import get_object_or_404\n\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters as framework_filters\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView, UpdateAPIView\n\nfrom members.api.v2 import filters\nfrom members.api.v2.serializers.member import (\n MemberCurrentSerializer,\n MemberListSerializer,\n MemberSerializer,\n)\nfrom members.models import Member\nfrom thaliawebsite.api.openapi import OAuthAutoSchema\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\nfrom utils.media.services import fetch_thumbnails_db\n\n\nclass MemberListView(ListAPIView):\n \"\"\"Returns an overview of all members.\"\"\"\n\n serializer_class = MemberListSerializer\n queryset = (\n Member.current_members.all()\n .select_related(\"profile\")\n .prefetch_related(\"membership_set\")\n )\n\n def get_serializer(self, *args, **kwargs):\n if len(args) > 0:\n members = args[0]\n fetch_thumbnails_db([member.profile.photo for member in members])\n return super().get_serializer(*args, **kwargs)\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"members:read\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.MembershipTypeFilter,\n filters.StartingYearFilter,\n )\n ordering_fields = (\"first_name\", \"last_name\", \"username\")\n search_fields = (\n \"profile__nickname\",\n \"profile__starting_year\",\n \"first_name\",\n \"last_name\",\n \"username\",\n )\n\n\nclass MemberDetailView(RetrieveAPIView):\n \"\"\"Returns details of a member.\"\"\"\n\n serializer_class = MemberSerializer\n queryset = Member.current_members.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"members:read\"]\n\n\nclass MemberCurrentView(MemberDetailView, UpdateAPIView):\n \"\"\"Returns details of the authenticated member.\"\"\"\n\n serializer_class = MemberCurrentSerializer\n schema = OAuthAutoSchema(operation_id_base=\"CurrentMember\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"profile:read\"],\n \"PATCH\": [\"profile:write\"],\n \"PUT\": [\"profile:write\"],\n }\n\n def get_object(self):\n return get_object_or_404(Member, pk=self.request.user.pk)\n", "path": "website/members/api/v2/views.py"}, {"content": "from rest_framework import filters\n\nfrom members.models import Membership\n\n\nclass StartingYearFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by starting year.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n starting_year = request.query_params.get(\"starting_year\", None)\n\n if starting_year:\n queryset = queryset.filter(profile__starting_year=starting_year)\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"starting_year\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by starting year\",\n \"schema\": {\n \"type\": \"number\",\n },\n }\n ]\n\n\nclass MembershipTypeFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by membership type.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n membership_type = request.query_params.get(\"membership_type\", None)\n\n if membership_type:\n memberships = Membership.objects.filter(type=membership_type)\n queryset = queryset.filter(pk__in=memberships.values(\"user__pk\"))\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"membership_type\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by membership type\",\n \"schema\": {\n \"type\": \"string\",\n },\n }\n ]\n", "path": "website/members/api/v2/filters.py"}], "after_files": [{"content": "\"\"\"API views of the activemembers app.\"\"\"\n\nfrom django.shortcuts import get_object_or_404\n\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters as framework_filters\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView, UpdateAPIView\n\nfrom members.api.v2 import filters\nfrom members.api.v2.serializers.member import (\n MemberCurrentSerializer,\n MemberListSerializer,\n MemberSerializer,\n)\nfrom members.models import Member\nfrom thaliawebsite.api.openapi import OAuthAutoSchema\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\nfrom utils.media.services import fetch_thumbnails_db\n\n\nclass MemberListView(ListAPIView):\n \"\"\"Returns an overview of all members.\"\"\"\n\n serializer_class = MemberListSerializer\n queryset = (\n Member.objects.all()\n .select_related(\"profile\")\n .prefetch_related(\"membership_set\")\n )\n\n def get_serializer(self, *args, **kwargs):\n if len(args) > 0:\n members = args[0]\n fetch_thumbnails_db([member.profile.photo for member in members])\n return super().get_serializer(*args, **kwargs)\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"members:read\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.MembershipTypeFilter,\n filters.StartingYearFilter,\n filters.FormerMemberFilter,\n )\n ordering_fields = (\"first_name\", \"last_name\", \"username\")\n search_fields = (\n \"profile__nickname\",\n \"profile__starting_year\",\n \"first_name\",\n \"last_name\",\n \"username\",\n )\n\n\nclass MemberDetailView(RetrieveAPIView):\n \"\"\"Returns details of a member.\"\"\"\n\n serializer_class = MemberSerializer\n queryset = Member.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"members:read\"]\n\n\nclass MemberCurrentView(MemberDetailView, UpdateAPIView):\n \"\"\"Returns details of the authenticated member.\"\"\"\n\n serializer_class = MemberCurrentSerializer\n schema = OAuthAutoSchema(operation_id_base=\"CurrentMember\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"profile:read\"],\n \"PATCH\": [\"profile:write\"],\n \"PUT\": [\"profile:write\"],\n }\n\n def get_object(self):\n return get_object_or_404(Member, pk=self.request.user.pk)\n", "path": "website/members/api/v2/views.py"}, {"content": "from datetime import datetime\n\nfrom django.db.models import Q\nfrom django.utils import timezone\n\nfrom rest_framework import filters, serializers\n\nfrom members.models import Membership\n\n\nclass StartingYearFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by starting year.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n starting_year = request.query_params.get(\"starting_year\", None)\n\n if starting_year:\n queryset = queryset.filter(profile__starting_year=starting_year)\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"starting_year\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by starting year\",\n \"schema\": {\n \"type\": \"number\",\n },\n }\n ]\n\n\nclass FormerMemberFilter(filters.BaseFilterBackend):\n def filter_queryset(self, request, queryset, view):\n former = request.query_params.get(\"former\", \"false\")\n\n if former == \"false\":\n # Filter out former members\n return (\n queryset.exclude(membership=None)\n .filter(\n Q(membership__until__isnull=True)\n | Q(membership__until__gt=timezone.now().date())\n )\n .distinct()\n )\n elif former == \"true\":\n # Filter out current members\n\n memberships_query = Q(until__gt=datetime.now()) | Q(until=None)\n members_query = ~Q(id=None)\n\n # Filter out all current active memberships\n memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n members_query &= ~Q(pk__in=memberships.values(\"user__pk\"))\n\n memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n memberships = Membership.objects.filter(memberships_query)\n all_memberships = Membership.objects.all()\n # Only keep members that were once members, or are legacy users\n # that do not have any memberships at all\n members_query &= Q(pk__in=memberships.values(\"user__pk\")) | ~Q(\n pk__in=all_memberships.values(\"user__pk\")\n )\n\n return queryset.filter(members_query)\n elif former == \"any\":\n # Include both former and current members\n return queryset\n else:\n raise serializers.ValidationError(\"invalid former parameter\")\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"former\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Include former members or only former members\",\n \"schema\": {\n \"type\": \"string\",\n \"enum\": [\"true\", \"false\", \"any\"],\n },\n }\n ]\n\n\nclass MembershipTypeFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by membership type.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n membership_type = request.query_params.get(\"membership_type\", None)\n\n if membership_type:\n memberships = Membership.objects.filter(type=membership_type)\n queryset = queryset.filter(pk__in=memberships.values(\"user__pk\"))\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"membership_type\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by membership type\",\n \"schema\": {\n \"type\": \"string\",\n },\n }\n ]\n", "path": "website/members/api/v2/filters.py"}]} | 1,487 | 845 |
gh_patches_debug_4661 | rasdani/github-patches | git_diff | SeldonIO__MLServer-478 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Activating custom conda env in mlserver build image
Hello,
I’ve encountered an issue when using `mlserver build ...` with `1.1.0dev` where the custom conda environment is not activated. The image builds and all packages are present in the image. However, when starting the image it crashes on `mlserver start` calling what seems to be native Python 3.8 rather than the conda installed python.
```
--> Sourcing new environment at ./envs/base/environment...
--> Calling conda-unpack...
--> Disabling user-installed packages...
Traceback (most recent call last):
File "/opt/mlserver/envs/base/environment/bin/mlserver", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py", line 76, in main
root()
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py", line 19, in wrapper
return asyncio.run(f(*args, **kwargs))
File "/usr/local/lib/python3.8/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/usr/local/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py", line 40, in start
settings, models = await load_settings(folder)
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/serve.py", line 37, in load_settings
available_models = await repository.list()
File "/usr/local/lib/python3.8/site-packages/mlserver/repository.py", line 37, in list
model_settings = ModelSettings()
File "pydantic/env_settings.py", line 36, in pydantic.env_settings.BaseSettings.__init__
File "pydantic/main.py", line 406, in pydantic.main.BaseModel.__init__
pydantic.error_wrappers.ValidationError: 1 validation error for ModelSettings
implementation
ensure this value contains valid import path or valid callable: No module named 'mlserver_mlflow' (type=type_error.pyobject; error_message=No module named 'mlserver_mlflow')
```
- [x] manually removing final CMD line in Dockerfile and starting interactive container. Running `./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && mlserver start $MLSERVER_MODELS_DIR` successfully launches the service
conda.yaml file:
```
channels:
- defaults
- conda-forge
- anaconda
dependencies:
- python=3.7.10
- pip
- gcc_linux-aarch64
- gxx_linux-aarch64
- pip:
- mlflow
- mlserver==0.4.0
- mlserver-mlflow==0.4.0
name: conda
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/cli/constants.py`
Content:
```
1 DockerfileName = "Dockerfile"
2 DockerfileTemplate = """
3 FROM continuumio/miniconda3:4.10.3 AS env-builder
4 SHELL ["/bin/bash", "-c"]
5
6 ARG MLSERVER_ENV_NAME="mlserver-custom-env" \\
7 MLSERVER_ENV_TARBALL="./envs/base.tar.gz"
8
9 RUN conda config --add channels conda-forge && \\
10 conda install conda-pack
11
12 # The `[]` character range will ensure that Docker doesn't complain if the
13 # files don't exist:
14 # https://stackoverflow.com/a/65138098/5015573
15 COPY \\
16 ./environment.ym[l] \\
17 ./environment.yam[l] \\
18 ./conda.ym[l] \\
19 ./conda.yam[l] \\
20 .
21 RUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\
22 for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\
23 if [[ -f $envFile ]]; then \\
24 conda env create \
25 --name $MLSERVER_ENV_NAME \\
26 --file $envFile; \\
27 conda-pack \
28 -n $MLSERVER_ENV_NAME \\
29 -o $MLSERVER_ENV_TARBALL; \\
30 fi \\
31 done; \\
32 chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)
33
34 FROM seldonio/mlserver:{version}-slim
35 SHELL ["/bin/bash", "-c"]
36
37 # Copy all potential sources for custom environments
38 COPY \\
39 --chown=1000 \\
40 --from=env-builder \\
41 /envs/base.tar.g[z] \\
42 ./envs/base.tar.gz
43 COPY \\
44 ./settings.jso[n] \\
45 ./model-settings.jso[n] \\
46 ./requirements.tx[t] \\
47 .
48
49 USER root
50 # Install dependencies system-wide, to ensure that they are available for every
51 # user
52 RUN ./hack/build-env.sh . ./envs/base && \
53 chown -R 1000:0 ./envs/base && \\
54 chmod -R 776 ./envs/base
55 USER 1000
56
57 # Copy everything else
58 COPY . .
59
60 # Override MLServer's own `CMD` to activate the embedded environment
61 # (optionally activating the hot-loaded one as well).
62 CMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\
63 mlserver start $MLSERVER_MODELS_DIR
64 """
65
66 DockerignoreName = ".dockerignore"
67 Dockerignore = """
68 # Binaries for programs and plugins
69 *.exe
70 *.exe~
71 *.dll
72 *.so
73 *.dylib
74 *.pyc
75 *.pyo
76 *.pyd
77 bin
78
79 # Mac file system
80 **/.DS_Store
81
82 # Python dev
83 __pycache__
84 .Python
85 env
86 pip-log.txt
87 pip-delete-this-directory.txt
88 .mypy_cache
89 eggs/
90 .eggs/
91 *.egg-info/
92 ./pytest_cache
93 .tox
94 build/
95 dist/
96
97 # Notebook Checkpoints
98 .ipynb_checkpoints
99
100 .coverage
101 .coverage.*
102 .cache
103 nosetests.xml
104 coverage.xml
105 *,cover
106 *.log
107 .git
108 """
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/cli/constants.py b/mlserver/cli/constants.py
--- a/mlserver/cli/constants.py
+++ b/mlserver/cli/constants.py
@@ -24,7 +24,7 @@
conda env create \
--name $MLSERVER_ENV_NAME \\
--file $envFile; \\
- conda-pack \
+ conda-pack --ignore-missing-files \
-n $MLSERVER_ENV_NAME \\
-o $MLSERVER_ENV_TARBALL; \\
fi \\
| {"golden_diff": "diff --git a/mlserver/cli/constants.py b/mlserver/cli/constants.py\n--- a/mlserver/cli/constants.py\n+++ b/mlserver/cli/constants.py\n@@ -24,7 +24,7 @@\n conda env create \\\n --name $MLSERVER_ENV_NAME \\\\\n --file $envFile; \\\\\n- conda-pack \\\n+ conda-pack --ignore-missing-files \\\n -n $MLSERVER_ENV_NAME \\\\\n -o $MLSERVER_ENV_TARBALL; \\\\\n fi \\\\\n", "issue": "Activating custom conda env in mlserver build image\nHello,\r\nI\u2019ve encountered an issue when using `mlserver build ...` with `1.1.0dev` where the custom conda environment is not activated. The image builds and all packages are present in the image. However, when starting the image it crashes on `mlserver start` calling what seems to be native Python 3.8 rather than the conda installed python.\r\n\r\n```\r\n--> Sourcing new environment at ./envs/base/environment...\r\n\r\n--> Calling conda-unpack...\r\n\r\n--> Disabling user-installed packages...\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"/opt/mlserver/envs/base/environment/bin/mlserver\", line 8, in <module>\r\n\r\n sys.exit(main())\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py\", line 76, in main\r\n\r\n root()\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1128, in __call__\r\n\r\n return self.main(*args, **kwargs)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1053, in main\r\n\r\n rv = self.invoke(ctx)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1659, in invoke\r\n\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1395, in invoke\r\n\r\n return ctx.invoke(self.callback, **ctx.params)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 754, in invoke\r\n\r\n return __callback(*args, **kwargs)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py\", line 19, in wrapper\r\n\r\n return asyncio.run(f(*args, **kwargs))\r\n\r\n File \"/usr/local/lib/python3.8/asyncio/runners.py\", line 44, in run\r\n\r\n return loop.run_until_complete(main)\r\n\r\n File \"/usr/local/lib/python3.8/asyncio/base_events.py\", line 616, in run_until_complete\r\n\r\n return future.result()\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py\", line 40, in start\r\n\r\n settings, models = await load_settings(folder)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/serve.py\", line 37, in load_settings\r\n\r\n available_models = await repository.list()\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/repository.py\", line 37, in list\r\n\r\n model_settings = ModelSettings()\r\n\r\n File \"pydantic/env_settings.py\", line 36, in pydantic.env_settings.BaseSettings.__init__\r\n\r\n File \"pydantic/main.py\", line 406, in pydantic.main.BaseModel.__init__\r\n\r\npydantic.error_wrappers.ValidationError: 1 validation error for ModelSettings\r\n\r\nimplementation\r\n\r\n ensure this value contains valid import path or valid callable: No module named 'mlserver_mlflow' (type=type_error.pyobject; error_message=No module named 'mlserver_mlflow')\r\n\r\n```\r\n\r\n- [x] manually removing final CMD line in Dockerfile and starting interactive container. Running `./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && mlserver start $MLSERVER_MODELS_DIR` successfully launches the service\r\n\r\nconda.yaml file:\r\n```\r\nchannels:\r\n- defaults\r\n- conda-forge\r\n- anaconda\r\ndependencies:\r\n- python=3.7.10\r\n- pip\r\n- gcc_linux-aarch64\r\n- gxx_linux-aarch64\r\n- pip:\r\n - mlflow\r\n - mlserver==0.4.0\r\n - mlserver-mlflow==0.4.0\r\nname: conda\r\n```\n", "before_files": [{"content": "DockerfileName = \"Dockerfile\"\nDockerfileTemplate = \"\"\"\nFROM continuumio/miniconda3:4.10.3 AS env-builder\nSHELL [\"/bin/bash\", \"-c\"]\n\nARG MLSERVER_ENV_NAME=\"mlserver-custom-env\" \\\\\n MLSERVER_ENV_TARBALL=\"./envs/base.tar.gz\"\n\nRUN conda config --add channels conda-forge && \\\\\n conda install conda-pack\n\n# The `[]` character range will ensure that Docker doesn't complain if the\n# files don't exist:\n# https://stackoverflow.com/a/65138098/5015573\nCOPY \\\\\n ./environment.ym[l] \\\\\n ./environment.yam[l] \\\\\n ./conda.ym[l] \\\\\n ./conda.yam[l] \\\\\n .\nRUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\\\\n for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\\\\n if [[ -f $envFile ]]; then \\\\\n conda env create \\\n --name $MLSERVER_ENV_NAME \\\\\n --file $envFile; \\\\\n conda-pack \\\n -n $MLSERVER_ENV_NAME \\\\\n -o $MLSERVER_ENV_TARBALL; \\\\\n fi \\\\\n done; \\\\\n chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)\n\nFROM seldonio/mlserver:{version}-slim\nSHELL [\"/bin/bash\", \"-c\"]\n\n# Copy all potential sources for custom environments\nCOPY \\\\\n --chown=1000 \\\\\n --from=env-builder \\\\\n /envs/base.tar.g[z] \\\\\n ./envs/base.tar.gz\nCOPY \\\\\n ./settings.jso[n] \\\\\n ./model-settings.jso[n] \\\\\n ./requirements.tx[t] \\\\\n .\n\nUSER root\n# Install dependencies system-wide, to ensure that they are available for every\n# user\nRUN ./hack/build-env.sh . ./envs/base && \\\n chown -R 1000:0 ./envs/base && \\\\\n chmod -R 776 ./envs/base\nUSER 1000\n\n# Copy everything else\nCOPY . .\n\n# Override MLServer's own `CMD` to activate the embedded environment\n# (optionally activating the hot-loaded one as well).\nCMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\\\\n mlserver start $MLSERVER_MODELS_DIR\n\"\"\"\n\nDockerignoreName = \".dockerignore\"\nDockerignore = \"\"\"\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n*.pyc\n*.pyo\n*.pyd\nbin\n\n# Mac file system\n**/.DS_Store\n\n# Python dev\n__pycache__\n.Python\nenv\npip-log.txt\npip-delete-this-directory.txt\n.mypy_cache\neggs/\n.eggs/\n*.egg-info/\n./pytest_cache\n.tox\nbuild/\ndist/\n\n# Notebook Checkpoints\n.ipynb_checkpoints\n\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n*.log\n.git\n\"\"\"\n", "path": "mlserver/cli/constants.py"}], "after_files": [{"content": "DockerfileName = \"Dockerfile\"\nDockerfileTemplate = \"\"\"\nFROM continuumio/miniconda3:4.10.3 AS env-builder\nSHELL [\"/bin/bash\", \"-c\"]\n\nARG MLSERVER_ENV_NAME=\"mlserver-custom-env\" \\\\\n MLSERVER_ENV_TARBALL=\"./envs/base.tar.gz\"\n\nRUN conda config --add channels conda-forge && \\\\\n conda install conda-pack\n\n# The `[]` character range will ensure that Docker doesn't complain if the\n# files don't exist:\n# https://stackoverflow.com/a/65138098/5015573\nCOPY \\\\\n ./environment.ym[l] \\\\\n ./environment.yam[l] \\\\\n ./conda.ym[l] \\\\\n ./conda.yam[l] \\\\\n .\nRUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\\\\n for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\\\\n if [[ -f $envFile ]]; then \\\\\n conda env create \\\n --name $MLSERVER_ENV_NAME \\\\\n --file $envFile; \\\\\n conda-pack --ignore-missing-files \\\n -n $MLSERVER_ENV_NAME \\\\\n -o $MLSERVER_ENV_TARBALL; \\\\\n fi \\\\\n done; \\\\\n chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)\n\nFROM seldonio/mlserver:{version}-slim\nSHELL [\"/bin/bash\", \"-c\"]\n\n# Copy all potential sources for custom environments\nCOPY \\\\\n --chown=1000 \\\\\n --from=env-builder \\\\\n /envs/base.tar.g[z] \\\\\n ./envs/base.tar.gz\nCOPY \\\\\n ./settings.jso[n] \\\\\n ./model-settings.jso[n] \\\\\n ./requirements.tx[t] \\\\\n .\n\nUSER root\n# Install dependencies system-wide, to ensure that they are available for every\n# user\nRUN ./hack/build-env.sh . ./envs/base && \\\n chown -R 1000:0 ./envs/base && \\\\\n chmod -R 776 ./envs/base\nUSER 1000\n\n# Copy everything else\nCOPY . .\n\n# Override MLServer's own `CMD` to activate the embedded environment\n# (optionally activating the hot-loaded one as well).\nCMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\\\\n mlserver start $MLSERVER_MODELS_DIR\n\"\"\"\n\nDockerignoreName = \".dockerignore\"\nDockerignore = \"\"\"\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n*.pyc\n*.pyo\n*.pyd\nbin\n\n# Mac file system\n**/.DS_Store\n\n# Python dev\n__pycache__\n.Python\nenv\npip-log.txt\npip-delete-this-directory.txt\n.mypy_cache\neggs/\n.eggs/\n*.egg-info/\n./pytest_cache\n.tox\nbuild/\ndist/\n\n# Notebook Checkpoints\n.ipynb_checkpoints\n\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n*.log\n.git\n\"\"\"\n", "path": "mlserver/cli/constants.py"}]} | 2,000 | 108 |
gh_patches_debug_32002 | rasdani/github-patches | git_diff | Mailu__Mailu-2069 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Setup utility] Cannot generate files when database flavors have been switched
## Before you open your issue
- [x] Check if no issue or pull-request for this already exists.
- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [x] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [x] docker swarm
### Versions
All versions
## Description
In the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected.
When you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.
As a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite.
The problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html .
I didn't check yet how to resolve this.
## Replication Steps
1) Go to https://setup.mailu.io/master/ and click next.
2). Enter a value for main domain server and public hostname.
3). Select roundcube as webmail.
4). At the bottom switch to postgresql or mysql as database
5). Switch back to SQLite as database.
6). Click Setup mailu. Note that the button does not work.
## Expected behaviour
The Setup Mailu button works after following about steps.
[Setup utility] Cannot generate files when database flavors have been switched
## Before you open your issue
- [x] Check if no issue or pull-request for this already exists.
- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [x] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [x] docker swarm
### Versions
All versions
## Description
In the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected.
When you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.
As a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite.
The problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html .
I didn't check yet how to resolve this.
## Replication Steps
1) Go to https://setup.mailu.io/master/ and click next.
2). Enter a value for main domain server and public hostname.
3). Select roundcube as webmail.
4). At the bottom switch to postgresql or mysql as database
5). Switch back to SQLite as database.
6). Click Setup mailu. Note that the button does not work.
## Expected behaviour
The Setup Mailu button works after following about steps.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optional/postgresql/start.py`
Content:
```
1 #!/usr/bin/python3
2
3 import anosql
4 import psycopg2
5 import glob
6 import os
7 import subprocess
8 from socrate import conf
9
10 def setup():
11 conn = psycopg2.connect(user='postgres')
12 queries = anosql.load_queries('postgres', '/conf/queries.sql')
13 # Mailu user
14 queries.create_mailu_user(conn)
15 queries.update_pw(conn, pw=os.environ.get("DB_PW"))
16 # Healthcheck user
17 queries.create_health_user(conn)
18 queries.grant_health(conn)
19 conn.commit()
20 # create db cannot be atomic. But this script is the only active connection, this is kinda safe.
21 if not queries.check_db(conn):
22 conn.set_isolation_level(0)
23 queries.create_db(conn)
24 conn.set_isolation_level(1)
25 conn.close()
26
27 # Check if /data is empty
28 if not os.listdir("/data"):
29 os.system("chown -R postgres:postgres /data")
30 os.system("chmod 0700 /data")
31 base_backups=sorted(glob.glob("/backup/base-*"))
32 if base_backups:
33 # Restore the latest backup
34 subprocess.call(["tar", "--same-owner", "-zpxf", base_backups[-1] + "/base.tar.gz" , "-C", "/data"])
35 if os.listdir("/backup/wal_archive"):
36 with open("/data/recovery.conf", "w") as rec:
37 rec.write("restore_command = 'gunzip < /backup/wal_archive/%f > %p'\n")
38 rec.write("standby_mode = off\n")
39 os.system("chown postgres:postgres /data/recovery.conf")
40 else:
41 # Bootstrap the database
42 os.system("sudo -u postgres initdb -D /data")
43
44 # Create backup directory structure, if it does not yet exist
45 os.system("mkdir -p /backup/wal_archive")
46 os.system("chown -R postgres:postgres /backup")
47
48 # Render config files
49 for pg_file in glob.glob("/conf/*.conf"):
50 conf.jinja(pg_file, os.environ, os.path.join("/data", os.path.basename(pg_file)))
51
52 # (Re)start postgresql locally for DB and user creation
53 os.system("sudo -u postgres pg_ctl start -D /data -o '-h \"''\" '")
54 while os.path.isfile("recovery.conf"):
55 pass
56 os.system("sudo -u postgres pg_ctl -D /data promote")
57 setup()
58 os.system("sudo -u postgres pg_ctl stop -m smart -w -D /data")
59
60 out=open("/proc/1/fd/1", "w")
61 err=open("/proc/1/fd/2", "w")
62 # Run the cron deamon
63 subprocess.Popen(["crond", "-f"], stdout=out, stderr=err)
64 # Run postgresql service
65 os.system("sudo -u postgres postgres -D /data -h \*")
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optional/postgresql/start.py b/optional/postgresql/start.py
deleted file mode 100755
--- a/optional/postgresql/start.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/python3
-
-import anosql
-import psycopg2
-import glob
-import os
-import subprocess
-from socrate import conf
-
-def setup():
- conn = psycopg2.connect(user='postgres')
- queries = anosql.load_queries('postgres', '/conf/queries.sql')
- # Mailu user
- queries.create_mailu_user(conn)
- queries.update_pw(conn, pw=os.environ.get("DB_PW"))
- # Healthcheck user
- queries.create_health_user(conn)
- queries.grant_health(conn)
- conn.commit()
- # create db cannot be atomic. But this script is the only active connection, this is kinda safe.
- if not queries.check_db(conn):
- conn.set_isolation_level(0)
- queries.create_db(conn)
- conn.set_isolation_level(1)
- conn.close()
-
-# Check if /data is empty
-if not os.listdir("/data"):
- os.system("chown -R postgres:postgres /data")
- os.system("chmod 0700 /data")
- base_backups=sorted(glob.glob("/backup/base-*"))
- if base_backups:
- # Restore the latest backup
- subprocess.call(["tar", "--same-owner", "-zpxf", base_backups[-1] + "/base.tar.gz" , "-C", "/data"])
- if os.listdir("/backup/wal_archive"):
- with open("/data/recovery.conf", "w") as rec:
- rec.write("restore_command = 'gunzip < /backup/wal_archive/%f > %p'\n")
- rec.write("standby_mode = off\n")
- os.system("chown postgres:postgres /data/recovery.conf")
- else:
- # Bootstrap the database
- os.system("sudo -u postgres initdb -D /data")
-
-# Create backup directory structure, if it does not yet exist
-os.system("mkdir -p /backup/wal_archive")
-os.system("chown -R postgres:postgres /backup")
-
-# Render config files
-for pg_file in glob.glob("/conf/*.conf"):
- conf.jinja(pg_file, os.environ, os.path.join("/data", os.path.basename(pg_file)))
-
-# (Re)start postgresql locally for DB and user creation
-os.system("sudo -u postgres pg_ctl start -D /data -o '-h \"''\" '")
-while os.path.isfile("recovery.conf"):
- pass
-os.system("sudo -u postgres pg_ctl -D /data promote")
-setup()
-os.system("sudo -u postgres pg_ctl stop -m smart -w -D /data")
-
-out=open("/proc/1/fd/1", "w")
-err=open("/proc/1/fd/2", "w")
-# Run the cron deamon
-subprocess.Popen(["crond", "-f"], stdout=out, stderr=err)
-# Run postgresql service
-os.system("sudo -u postgres postgres -D /data -h \*")
| {"golden_diff": "diff --git a/optional/postgresql/start.py b/optional/postgresql/start.py\ndeleted file mode 100755\n--- a/optional/postgresql/start.py\n+++ /dev/null\n@@ -1,65 +0,0 @@\n-#!/usr/bin/python3\n-\n-import anosql\n-import psycopg2\n-import glob\n-import os\n-import subprocess\n-from socrate import conf\n-\n-def setup():\n- conn = psycopg2.connect(user='postgres')\n- queries = anosql.load_queries('postgres', '/conf/queries.sql')\n- # Mailu user\n- queries.create_mailu_user(conn)\n- queries.update_pw(conn, pw=os.environ.get(\"DB_PW\"))\n- # Healthcheck user\n- queries.create_health_user(conn)\n- queries.grant_health(conn)\n- conn.commit()\n- # create db cannot be atomic. But this script is the only active connection, this is kinda safe.\n- if not queries.check_db(conn):\n- conn.set_isolation_level(0)\n- queries.create_db(conn)\n- conn.set_isolation_level(1)\n- conn.close()\n-\n-# Check if /data is empty\n-if not os.listdir(\"/data\"):\n- os.system(\"chown -R postgres:postgres /data\")\n- os.system(\"chmod 0700 /data\")\n- base_backups=sorted(glob.glob(\"/backup/base-*\"))\n- if base_backups:\n- # Restore the latest backup\n- subprocess.call([\"tar\", \"--same-owner\", \"-zpxf\", base_backups[-1] + \"/base.tar.gz\" , \"-C\", \"/data\"])\n- if os.listdir(\"/backup/wal_archive\"):\n- with open(\"/data/recovery.conf\", \"w\") as rec:\n- rec.write(\"restore_command = 'gunzip < /backup/wal_archive/%f > %p'\\n\")\n- rec.write(\"standby_mode = off\\n\")\n- os.system(\"chown postgres:postgres /data/recovery.conf\")\n- else:\n- # Bootstrap the database\n- os.system(\"sudo -u postgres initdb -D /data\")\n-\n-# Create backup directory structure, if it does not yet exist\n-os.system(\"mkdir -p /backup/wal_archive\")\n-os.system(\"chown -R postgres:postgres /backup\")\n-\n-# Render config files\n-for pg_file in glob.glob(\"/conf/*.conf\"):\n- conf.jinja(pg_file, os.environ, os.path.join(\"/data\", os.path.basename(pg_file)))\n-\n-# (Re)start postgresql locally for DB and user creation\n-os.system(\"sudo -u postgres pg_ctl start -D /data -o '-h \\\"''\\\" '\")\n-while os.path.isfile(\"recovery.conf\"):\n- pass\n-os.system(\"sudo -u postgres pg_ctl -D /data promote\")\n-setup()\n-os.system(\"sudo -u postgres pg_ctl stop -m smart -w -D /data\")\n-\n-out=open(\"/proc/1/fd/1\", \"w\")\n-err=open(\"/proc/1/fd/2\", \"w\")\n-# Run the cron deamon\n-subprocess.Popen([\"crond\", \"-f\"], stdout=out, stderr=err)\n-# Run postgresql service\n-os.system(\"sudo -u postgres postgres -D /data -h \\*\")\n", "issue": "[Setup utility] Cannot generate files when database flavors have been switched\n## Before you open your issue\r\n- [x] Check if no issue or pull-request for this already exists.\r\n- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [x] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [x] docker-compose\r\n - [ ] kubernetes\r\n - [x] docker swarm\r\n\r\n### Versions\r\nAll versions\r\n\r\n## Description\r\nIn the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected. \r\nWhen you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.\r\n\r\nAs a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite. \r\n\r\nThe problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html . \r\nI didn't check yet how to resolve this.\r\n\r\n## Replication Steps\r\n1) Go to https://setup.mailu.io/master/ and click next.\r\n2). Enter a value for main domain server and public hostname.\r\n3). Select roundcube as webmail.\r\n4). At the bottom switch to postgresql or mysql as database\r\n5). Switch back to SQLite as database.\r\n6). Click Setup mailu. Note that the button does not work.\r\n\r\n## Expected behaviour\r\nThe Setup Mailu button works after following about steps.\n[Setup utility] Cannot generate files when database flavors have been switched\n## Before you open your issue\r\n- [x] Check if no issue or pull-request for this already exists.\r\n- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [x] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [x] docker-compose\r\n - [ ] kubernetes\r\n - [x] docker swarm\r\n\r\n### Versions\r\nAll versions\r\n\r\n## Description\r\nIn the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected. \r\nWhen you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.\r\n\r\nAs a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite. \r\n\r\nThe problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html . \r\nI didn't check yet how to resolve this.\r\n\r\n## Replication Steps\r\n1) Go to https://setup.mailu.io/master/ and click next.\r\n2). Enter a value for main domain server and public hostname.\r\n3). Select roundcube as webmail.\r\n4). At the bottom switch to postgresql or mysql as database\r\n5). Switch back to SQLite as database.\r\n6). Click Setup mailu. Note that the button does not work.\r\n\r\n## Expected behaviour\r\nThe Setup Mailu button works after following about steps.\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport anosql\nimport psycopg2\nimport glob\nimport os\nimport subprocess\nfrom socrate import conf\n\ndef setup():\n conn = psycopg2.connect(user='postgres')\n queries = anosql.load_queries('postgres', '/conf/queries.sql')\n # Mailu user\n queries.create_mailu_user(conn)\n queries.update_pw(conn, pw=os.environ.get(\"DB_PW\"))\n # Healthcheck user\n queries.create_health_user(conn)\n queries.grant_health(conn)\n conn.commit()\n # create db cannot be atomic. But this script is the only active connection, this is kinda safe.\n if not queries.check_db(conn):\n conn.set_isolation_level(0)\n queries.create_db(conn)\n conn.set_isolation_level(1)\n conn.close()\n\n# Check if /data is empty\nif not os.listdir(\"/data\"):\n os.system(\"chown -R postgres:postgres /data\")\n os.system(\"chmod 0700 /data\")\n base_backups=sorted(glob.glob(\"/backup/base-*\"))\n if base_backups:\n # Restore the latest backup\n subprocess.call([\"tar\", \"--same-owner\", \"-zpxf\", base_backups[-1] + \"/base.tar.gz\" , \"-C\", \"/data\"])\n if os.listdir(\"/backup/wal_archive\"):\n with open(\"/data/recovery.conf\", \"w\") as rec:\n rec.write(\"restore_command = 'gunzip < /backup/wal_archive/%f > %p'\\n\")\n rec.write(\"standby_mode = off\\n\")\n os.system(\"chown postgres:postgres /data/recovery.conf\")\n else:\n # Bootstrap the database\n os.system(\"sudo -u postgres initdb -D /data\")\n\n# Create backup directory structure, if it does not yet exist\nos.system(\"mkdir -p /backup/wal_archive\")\nos.system(\"chown -R postgres:postgres /backup\")\n\n# Render config files\nfor pg_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(pg_file, os.environ, os.path.join(\"/data\", os.path.basename(pg_file)))\n\n# (Re)start postgresql locally for DB and user creation\nos.system(\"sudo -u postgres pg_ctl start -D /data -o '-h \\\"''\\\" '\")\nwhile os.path.isfile(\"recovery.conf\"):\n pass\nos.system(\"sudo -u postgres pg_ctl -D /data promote\")\nsetup()\nos.system(\"sudo -u postgres pg_ctl stop -m smart -w -D /data\")\n\nout=open(\"/proc/1/fd/1\", \"w\")\nerr=open(\"/proc/1/fd/2\", \"w\")\n# Run the cron deamon\nsubprocess.Popen([\"crond\", \"-f\"], stdout=out, stderr=err)\n# Run postgresql service\nos.system(\"sudo -u postgres postgres -D /data -h \\*\")\n", "path": "optional/postgresql/start.py"}], "after_files": [{"content": null, "path": "optional/postgresql/start.py"}]} | 1,854 | 710 |
gh_patches_debug_41778 | rasdani/github-patches | git_diff | google__flax-2064 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Document default stride for pooling functions
### Discussed in https://github.com/google/flax/discussions/2023
<div type='discussions-op-text'>
<sup>Originally posted by **dogeplusplus** April 3, 2022</sup>
A bit of a nitpick but I was wondering why the default behavior of pooling functions is to have stride 1 instead of the `window_shape`? I feel that for most use cases the stride would be the dimension of the kernel size as in other frameworks.</div>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flax/linen/pooling.py`
Content:
```
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Pooling modules."""
16
17 from jax import lax
18 import jax.numpy as jnp
19
20 import numpy as np
21
22
23 def pool(inputs, init, reduce_fn, window_shape, strides, padding):
24 """Helper function to define pooling functions.
25
26 Pooling functions are implemented using the ReduceWindow XLA op.
27 NOTE: Be aware that pooling is not generally differentiable.
28 That means providing a reduce_fn that is differentiable does not imply
29 that pool is differentiable.
30
31 Args:
32 inputs: input data with dimensions (batch, window dims..., features).
33 init: the initial value for the reduction
34 reduce_fn: a reduce function of the form `(T, T) -> T`.
35 window_shape: a shape tuple defining the window to reduce over.
36 strides: a sequence of `n` integers, representing the inter-window
37 strides.
38 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
39 of `n` `(low, high)` integer pairs that give the padding to apply before
40 and after each spatial dimension.
41 Returns:
42 The output of the reduction for each window slice.
43 """
44 strides = strides or (1,) * len(window_shape)
45 assert len(window_shape) == len(strides), (
46 f"len({window_shape}) must equal len({strides})")
47 strides = (1,) + strides + (1,)
48 dims = (1,) + window_shape + (1,)
49
50 is_single_input = False
51 if inputs.ndim == len(dims) - 1:
52 # add singleton batch dimension because lax.reduce_window always
53 # needs a batch dimension.
54 inputs = inputs[None]
55 is_single_input = True
56
57 assert inputs.ndim == len(dims), f"len({inputs.shape}) != len({dims})"
58 if not isinstance(padding, str):
59 padding = tuple(map(tuple, padding))
60 assert len(padding) == len(window_shape), (
61 f"padding {padding} must specify pads for same number of dims as "
62 f"window_shape {window_shape}")
63 assert all([len(x) == 2 for x in padding]), (
64 f"each entry in padding {padding} must be length 2")
65 padding = ((0, 0),) + padding + ((0, 0),)
66 y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
67 if is_single_input:
68 y = jnp.squeeze(y, axis=0)
69 return y
70
71
72 def avg_pool(inputs, window_shape, strides=None, padding="VALID"):
73 """Pools the input by taking the average over a window.
74
75 Args:
76 inputs: input data with dimensions (batch, window dims..., features).
77 window_shape: a shape tuple defining the window to reduce over.
78 strides: a sequence of `n` integers, representing the inter-window
79 strides (default: `(1, ..., 1)`).
80 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
81 of `n` `(low, high)` integer pairs that give the padding to apply before
82 and after each spatial dimension (default: `'VALID'`).
83 Returns:
84 The average for each window slice.
85 """
86 y = pool(inputs, 0., lax.add, window_shape, strides, padding)
87 y = y / np.prod(window_shape)
88 return y
89
90
91 def max_pool(inputs, window_shape, strides=None, padding="VALID"):
92 """Pools the input by taking the maximum of a window slice.
93
94 Args:
95 inputs: input data with dimensions (batch, window dims..., features).
96 window_shape: a shape tuple defining the window to reduce over.
97 strides: a sequence of `n` integers, representing the inter-window
98 strides (default: `(1, ..., 1)`).
99 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
100 of `n` `(low, high)` integer pairs that give the padding to apply before
101 and after each spatial dimension (default: `'VALID'`).
102 Returns:
103 The maximum for each window slice.
104 """
105 y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)
106 return y
107
108
109 def min_pool(inputs, window_shape, strides=None, padding="VALID"):
110 """Pools the input by taking the minimum of a window slice.
111
112 Args:
113 inputs: Input data with dimensions (batch, window dims..., features).
114 window_shape: A shape tuple defining the window to reduce over.
115 strides: A sequence of `n` integers, representing the inter-window strides
116 (default: `(1, ..., 1)`).
117 padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of
118 `n` `(low, high)` integer pairs that give the padding to apply before and
119 after each spatial dimension (default: `'VALID'`).
120
121 Returns:
122 The minimum for each window slice.
123 """
124 return pool(inputs, jnp.inf, lax.min, window_shape, strides, padding)
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flax/linen/pooling.py b/flax/linen/pooling.py
--- a/flax/linen/pooling.py
+++ b/flax/linen/pooling.py
@@ -25,8 +25,8 @@
Pooling functions are implemented using the ReduceWindow XLA op.
NOTE: Be aware that pooling is not generally differentiable.
- That means providing a reduce_fn that is differentiable does not imply
- that pool is differentiable.
+ That means providing a reduce_fn that is differentiable does not imply that
+ pool is differentiable.
Args:
inputs: input data with dimensions (batch, window dims..., features).
@@ -34,7 +34,7 @@
reduce_fn: a reduce function of the form `(T, T) -> T`.
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
- strides.
+ strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension.
@@ -76,7 +76,7 @@
inputs: input data with dimensions (batch, window dims..., features).
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
- strides (default: `(1, ..., 1)`).
+ strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension (default: `'VALID'`).
@@ -95,7 +95,7 @@
inputs: input data with dimensions (batch, window dims..., features).
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
- strides (default: `(1, ..., 1)`).
+ strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension (default: `'VALID'`).
@@ -113,7 +113,7 @@
inputs: Input data with dimensions (batch, window dims..., features).
window_shape: A shape tuple defining the window to reduce over.
strides: A sequence of `n` integers, representing the inter-window strides
- (default: `(1, ..., 1)`).
+ (default: `(1, ..., 1)`).
padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension (default: `'VALID'`).
| {"golden_diff": "diff --git a/flax/linen/pooling.py b/flax/linen/pooling.py\n--- a/flax/linen/pooling.py\n+++ b/flax/linen/pooling.py\n@@ -25,8 +25,8 @@\n \n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n- That means providing a reduce_fn that is differentiable does not imply\n- that pool is differentiable.\n+ That means providing a reduce_fn that is differentiable does not imply that\n+ pool is differentiable.\n \n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n@@ -34,7 +34,7 @@\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n- strides.\n+ strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n@@ -76,7 +76,7 @@\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n- strides (default: `(1, ..., 1)`).\n+ strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n@@ -95,7 +95,7 @@\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n- strides (default: `(1, ..., 1)`).\n+ strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n@@ -113,7 +113,7 @@\n inputs: Input data with dimensions (batch, window dims..., features).\n window_shape: A shape tuple defining the window to reduce over.\n strides: A sequence of `n` integers, representing the inter-window strides\n- (default: `(1, ..., 1)`).\n+ (default: `(1, ..., 1)`).\n padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of\n `n` `(low, high)` integer pairs that give the padding to apply before and\n after each spatial dimension (default: `'VALID'`).\n", "issue": "Document default stride for pooling functions\n### Discussed in https://github.com/google/flax/discussions/2023\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **dogeplusplus** April 3, 2022</sup>\r\nA bit of a nitpick but I was wondering why the default behavior of pooling functions is to have stride 1 instead of the `window_shape`? I feel that for most use cases the stride would be the dimension of the kernel size as in other frameworks.</div>\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pooling modules.\"\"\"\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as np\n\n\ndef pool(inputs, init, reduce_fn, window_shape, strides, padding):\n \"\"\"Helper function to define pooling functions.\n\n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n That means providing a reduce_fn that is differentiable does not imply\n that pool is differentiable.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n init: the initial value for the reduction\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n Returns:\n The output of the reduction for each window slice.\n \"\"\"\n strides = strides or (1,) * len(window_shape)\n assert len(window_shape) == len(strides), (\n f\"len({window_shape}) must equal len({strides})\")\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n\n is_single_input = False\n if inputs.ndim == len(dims) - 1:\n # add singleton batch dimension because lax.reduce_window always\n # needs a batch dimension.\n inputs = inputs[None]\n is_single_input = True\n\n assert inputs.ndim == len(dims), f\"len({inputs.shape}) != len({dims})\"\n if not isinstance(padding, str):\n padding = tuple(map(tuple, padding))\n assert len(padding) == len(window_shape), (\n f\"padding {padding} must specify pads for same number of dims as \"\n f\"window_shape {window_shape}\")\n assert all([len(x) == 2 for x in padding]), (\n f\"each entry in padding {padding} must be length 2\")\n padding = ((0, 0),) + padding + ((0, 0),)\n y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n if is_single_input:\n y = jnp.squeeze(y, axis=0)\n return y\n\n\ndef avg_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the average over a window.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The average for each window slice.\n \"\"\"\n y = pool(inputs, 0., lax.add, window_shape, strides, padding)\n y = y / np.prod(window_shape)\n return y\n\n\ndef max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the maximum of a window slice.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The maximum for each window slice.\n \"\"\"\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y\n\n\ndef min_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the minimum of a window slice.\n\n Args:\n inputs: Input data with dimensions (batch, window dims..., features).\n window_shape: A shape tuple defining the window to reduce over.\n strides: A sequence of `n` integers, representing the inter-window strides\n (default: `(1, ..., 1)`).\n padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of\n `n` `(low, high)` integer pairs that give the padding to apply before and\n after each spatial dimension (default: `'VALID'`).\n\n Returns:\n The minimum for each window slice.\n \"\"\"\n return pool(inputs, jnp.inf, lax.min, window_shape, strides, padding)\n", "path": "flax/linen/pooling.py"}], "after_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pooling modules.\"\"\"\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as np\n\n\ndef pool(inputs, init, reduce_fn, window_shape, strides, padding):\n \"\"\"Helper function to define pooling functions.\n\n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n That means providing a reduce_fn that is differentiable does not imply that\n pool is differentiable.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n init: the initial value for the reduction\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n Returns:\n The output of the reduction for each window slice.\n \"\"\"\n strides = strides or (1,) * len(window_shape)\n assert len(window_shape) == len(strides), (\n f\"len({window_shape}) must equal len({strides})\")\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n\n is_single_input = False\n if inputs.ndim == len(dims) - 1:\n # add singleton batch dimension because lax.reduce_window always\n # needs a batch dimension.\n inputs = inputs[None]\n is_single_input = True\n\n assert inputs.ndim == len(dims), f\"len({inputs.shape}) != len({dims})\"\n if not isinstance(padding, str):\n padding = tuple(map(tuple, padding))\n assert len(padding) == len(window_shape), (\n f\"padding {padding} must specify pads for same number of dims as \"\n f\"window_shape {window_shape}\")\n assert all([len(x) == 2 for x in padding]), (\n f\"each entry in padding {padding} must be length 2\")\n padding = ((0, 0),) + padding + ((0, 0),)\n y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n if is_single_input:\n y = jnp.squeeze(y, axis=0)\n return y\n\n\ndef avg_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the average over a window.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The average for each window slice.\n \"\"\"\n y = pool(inputs, 0., lax.add, window_shape, strides, padding)\n y = y / np.prod(window_shape)\n return y\n\n\ndef max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the maximum of a window slice.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The maximum for each window slice.\n \"\"\"\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y\n\n\ndef min_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the minimum of a window slice.\n\n Args:\n inputs: Input data with dimensions (batch, window dims..., features).\n window_shape: A shape tuple defining the window to reduce over.\n strides: A sequence of `n` integers, representing the inter-window strides\n (default: `(1, ..., 1)`).\n padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of\n `n` `(low, high)` integer pairs that give the padding to apply before and\n after each spatial dimension (default: `'VALID'`).\n\n Returns:\n The minimum for each window slice.\n \"\"\"\n return pool(inputs, jnp.inf, lax.min, window_shape, strides, padding)\n", "path": "flax/linen/pooling.py"}]} | 1,871 | 698 |
gh_patches_debug_30539 | rasdani/github-patches | git_diff | chainer__chainer-1266 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Concat reverse indexing not working correctly
The following works correctly:
```
a_data = np.zeros((10, 5))
b_data = np.zeros((10, 3))
a = Variable(a_data)
b = Variable(b_data)
F.concat((a, b), axis=1)
```
However, if I change the last line into:
```
F.concat((a, b), axis=-1)
```
it gives the error:
```
Invalid operation is performed in: Concat (Forward)
Expect: in_types[0].shape[1] == in_types[1].shape[1]
Actual: 5 != 3
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/array/concat.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6
7
8 class Concat(function.Function):
9
10 """Concatenate multiple tensors towards specified axis."""
11
12 # concat along the channel dimension by default
13 def __init__(self, axis=1):
14 self.axis = axis
15
16 def check_type_forward(self, in_types):
17 type_check.expect(in_types.size() > 0)
18 type_check.expect(in_types[0].ndim >
19 type_check.Variable(self.axis, 'axis'))
20
21 ndim = in_types[0].ndim.eval()
22 for i in range(1, in_types.size().eval()):
23 type_check.expect(
24 in_types[0].dtype == in_types[i].dtype,
25 in_types[0].ndim == in_types[i].ndim,
26 )
27 for d in range(0, ndim):
28 if d == self.axis:
29 continue
30 type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
31
32 def forward(self, xs):
33 xp = cuda.get_array_module(*xs)
34 return xp.concatenate(xs, axis=self.axis),
35
36 def backward(self, xs, gy):
37 if not xs[:-1]:
38 return gy
39
40 xp = cuda.get_array_module(*xs)
41 sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()
42 return xp.split(gy[0], sizes, axis=self.axis)
43
44
45 def concat(xs, axis=1):
46 """Concatenates given variables along an axis.
47
48 Args:
49 xs (tuple of Variables): Variables to be concatenated.
50 axis (int): Axis that the input arrays are concatenated along.
51
52 Returns:
53 ~chainer.Variable: Output variable.
54
55 """
56 return Concat(axis=axis)(*xs)
57
```
Path: `cupy/manipulation/split.py`
Content:
```
1 import numpy
2 import six
3
4
5 def array_split(ary, indices_or_sections, axis=0):
6 """Splits an array into multiple sub arrays along a given axis.
7
8 This function is almost equivalent to :func:`cupy.split`. The only
9 difference is that this function allows an integer sections that does not
10 evenly divide the axis.
11
12 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`
13
14 """
15 if ary.ndim <= axis:
16 raise IndexError('Axis exceeds ndim')
17 size = ary.shape[axis]
18
19 if numpy.isscalar(indices_or_sections):
20 each_size = (size - 1) // indices_or_sections + 1
21 indices = [i * each_size
22 for i in six.moves.range(1, indices_or_sections)]
23 else:
24 indices = indices_or_sections
25
26 if len(indices) == 0:
27 return [ary]
28
29 skip = (slice(None),) * axis
30 ret = []
31 i = 0
32 for index in indices:
33 ret.append(ary[skip + (slice(i, index),)])
34 i = index
35 ret.append(ary[skip + (slice(i, size),)])
36
37 return ret
38
39
40 def dsplit(ary, indices_or_sections):
41 """Splits an array into multiple sub arrays along the third axis.
42
43 This is equivalent to ``split`` with ``axis=2``.
44
45 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
46
47 """
48 if ary.ndim <= 2:
49 raise ValueError('Cannot dsplit an array with less than 3 dimensions')
50 return split(ary, indices_or_sections, 2)
51
52
53 def hsplit(ary, indices_or_sections):
54 """Splits an array into multiple sub arrays horizontally.
55
56 This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
57 dimension, and otherwise that with ``axis=1``.
58
59 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`
60
61 """
62 if ary.ndim == 0:
63 raise ValueError('Cannot hsplit a zero-dimensional array')
64 if ary.ndim == 1:
65 return split(ary, indices_or_sections, 0)
66 else:
67 return split(ary, indices_or_sections, 1)
68
69
70 def split(ary, indices_or_sections, axis=0):
71 """Splits an array into multiple sub arrays along a given axis.
72
73 Args:
74 ary (cupy.ndarray): Array to split.
75 indices_or_sections (int or sequence of ints): A value indicating how
76 to divide the axis. If it is an integer, then is treated as the
77 number of sections, and the axis is evenly divided. Otherwise,
78 the integers indicate indices to split at. Note that the sequence
79 on the device memory is not allowed.
80 axis (int): Axis along which the array is split.
81
82 Returns:
83 A list of sub arrays. Each array is a view of the corresponding input
84 array.
85
86 .. seealso:: :func:`numpy.split`
87
88 """
89 if ary.ndim <= axis:
90 raise IndexError('Axis exceeds ndim')
91 size = ary.shape[axis]
92
93 if numpy.isscalar(indices_or_sections):
94 if size % indices_or_sections != 0:
95 raise ValueError(
96 'indices_or_sections must divide the size along the axes.\n'
97 'If you want to split the array into non-equally-sized '
98 'arrays, use array_split instead.')
99 return array_split(ary, indices_or_sections, axis)
100
101
102 def vsplit(ary, indices_or_sections):
103 """Splits an array into multiple sub arrays along the first axis.
104
105 This is equivalent to ``split`` with ``axis=0``.
106
107 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
108
109 """
110 if ary.ndim <= 1:
111 raise ValueError('Cannot vsplit an array with less than 2 dimensions')
112 return split(ary, indices_or_sections, 0)
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/array/concat.py b/chainer/functions/array/concat.py
--- a/chainer/functions/array/concat.py
+++ b/chainer/functions/array/concat.py
@@ -18,14 +18,19 @@
type_check.expect(in_types[0].ndim >
type_check.Variable(self.axis, 'axis'))
+ type_check.expect(
+ -in_types[0].ndim <= self.axis,
+ self.axis < in_types[0].ndim
+ )
ndim = in_types[0].ndim.eval()
+ axis = self.axis % ndim
for i in range(1, in_types.size().eval()):
type_check.expect(
in_types[0].dtype == in_types[i].dtype,
in_types[0].ndim == in_types[i].ndim,
)
for d in range(0, ndim):
- if d == self.axis:
+ if d == axis:
continue
type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
diff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py
--- a/cupy/manipulation/split.py
+++ b/cupy/manipulation/split.py
@@ -12,8 +12,10 @@
.. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`
"""
- if ary.ndim <= axis:
+ ndim = ary.ndim
+ if -ndim > axis or ndim <= axis:
raise IndexError('Axis exceeds ndim')
+ axis %= ndim
size = ary.shape[axis]
if numpy.isscalar(indices_or_sections):
@@ -33,7 +35,6 @@
ret.append(ary[skip + (slice(i, index),)])
i = index
ret.append(ary[skip + (slice(i, size),)])
-
return ret
| {"golden_diff": "diff --git a/chainer/functions/array/concat.py b/chainer/functions/array/concat.py\n--- a/chainer/functions/array/concat.py\n+++ b/chainer/functions/array/concat.py\n@@ -18,14 +18,19 @@\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n \n+ type_check.expect(\n+ -in_types[0].ndim <= self.axis,\n+ self.axis < in_types[0].ndim\n+ )\n ndim = in_types[0].ndim.eval()\n+ axis = self.axis % ndim\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n- if d == self.axis:\n+ if d == axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n \ndiff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py\n--- a/cupy/manipulation/split.py\n+++ b/cupy/manipulation/split.py\n@@ -12,8 +12,10 @@\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n \n \"\"\"\n- if ary.ndim <= axis:\n+ ndim = ary.ndim\n+ if -ndim > axis or ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n+ axis %= ndim\n size = ary.shape[axis]\n \n if numpy.isscalar(indices_or_sections):\n@@ -33,7 +35,6 @@\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(i, size),)])\n-\n return ret\n", "issue": "Concat reverse indexing not working correctly\nThe following works correctly:\n\n```\na_data = np.zeros((10, 5))\nb_data = np.zeros((10, 3))\na = Variable(a_data)\nb = Variable(b_data)\nF.concat((a, b), axis=1)\n```\n\nHowever, if I change the last line into:\n\n```\nF.concat((a, b), axis=-1)\n```\n\nit gives the error:\n\n```\nInvalid operation is performed in: Concat (Forward)\n\nExpect: in_types[0].shape[1] == in_types[1].shape[1]\nActual: 5 != 3\n```\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n ndim = in_types[0].ndim.eval()\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def forward(self, xs):\n xp = cuda.get_array_module(*xs)\n return xp.concatenate(xs, axis=self.axis),\n\n def backward(self, xs, gy):\n if not xs[:-1]:\n return gy\n\n xp = cuda.get_array_module(*xs)\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return xp.split(gy[0], sizes, axis=self.axis)\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/array/concat.py"}, {"content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n if len(indices) == 0:\n return [ary]\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(i, size),)])\n\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Each array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n type_check.expect(\n -in_types[0].ndim <= self.axis,\n self.axis < in_types[0].ndim\n )\n ndim = in_types[0].ndim.eval()\n axis = self.axis % ndim\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def forward(self, xs):\n xp = cuda.get_array_module(*xs)\n return xp.concatenate(xs, axis=self.axis),\n\n def backward(self, xs, gy):\n if not xs[:-1]:\n return gy\n\n xp = cuda.get_array_module(*xs)\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return xp.split(gy[0], sizes, axis=self.axis)\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/array/concat.py"}, {"content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n ndim = ary.ndim\n if -ndim > axis or ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n axis %= ndim\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n if len(indices) == 0:\n return [ary]\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(i, size),)])\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Each array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py"}]} | 2,042 | 436 |
gh_patches_debug_11688 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-272 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Core plugins zips to published artifacts.
The plugins built with the core repo should be bundled as zips and hosted at artifacts.opensearch.org.
This task involves updating Opensearch's build.sh script to build and include these artifacts with the component's artifact list.
Right now we also have brittle logic that assumes any component with "plugins" in their artifacts list is a plugin repository. This should be updated to identify the min bundle component in another way. Perhaps with a separate artifact folder for "min-bundle"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bundle-workflow/python/build_workflow/builder.py`
Content:
```
1 # Copyright OpenSearch Contributors.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import os
5
6 '''
7 This class is responsible for executing the build for a component and passing the results to a build recorder.
8 It will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.
9 Artifacts found in "<build root>/artifacts/<maven|plugins|libs|bundle>" will be recognized and recorded.
10 '''
11 class Builder:
12 def __init__(self, component_name, git_repo, script_finder, build_recorder):
13 '''
14 Construct a new Builder instance.
15 :param component_name: The name of the component to build.
16 :param git_repo: A GitRepository instance containing the checked-out code.
17 :param script_finder: The ScriptFinder to use for finding build.sh scripts.
18 :param build_recorder: The build recorder that will capture build information and artifacts.
19 '''
20
21 self.component_name = component_name
22 self.git_repo = git_repo
23 self.script_finder = script_finder
24 self.build_recorder = build_recorder
25 self.output_path = 'artifacts'
26
27 def build(self, version, arch, snapshot):
28 build_script = self.script_finder.find_build_script(self.component_name, self.git_repo.dir)
29 build_command = f'{build_script} -v {version} -a {arch} -s {str(snapshot).lower()} -o {self.output_path}'
30 self.git_repo.execute(build_command)
31 self.build_recorder.record_component(self.component_name, self.git_repo)
32
33 def export_artifacts(self):
34 artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))
35 for artifact_type in ["maven", "bundle", "plugins", "libs"]:
36 for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):
37 for file_name in files:
38 absolute_path = os.path.join(dir, file_name)
39 relative_path = os.path.relpath(absolute_path, artifacts_dir)
40 self.build_recorder.record_artifact(self.component_name, artifact_type, relative_path, absolute_path)
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bundle-workflow/python/build_workflow/builder.py b/bundle-workflow/python/build_workflow/builder.py
--- a/bundle-workflow/python/build_workflow/builder.py
+++ b/bundle-workflow/python/build_workflow/builder.py
@@ -32,7 +32,7 @@
def export_artifacts(self):
artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))
- for artifact_type in ["maven", "bundle", "plugins", "libs"]:
+ for artifact_type in ["maven", "bundle", "plugins", "libs", "core-plugins"]:
for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):
for file_name in files:
absolute_path = os.path.join(dir, file_name)
| {"golden_diff": "diff --git a/bundle-workflow/python/build_workflow/builder.py b/bundle-workflow/python/build_workflow/builder.py\n--- a/bundle-workflow/python/build_workflow/builder.py\n+++ b/bundle-workflow/python/build_workflow/builder.py\n@@ -32,7 +32,7 @@\n \n def export_artifacts(self):\n artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))\n- for artifact_type in [\"maven\", \"bundle\", \"plugins\", \"libs\"]:\n+ for artifact_type in [\"maven\", \"bundle\", \"plugins\", \"libs\", \"core-plugins\"]:\n for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\n", "issue": "Add Core plugins zips to published artifacts.\nThe plugins built with the core repo should be bundled as zips and hosted at artifacts.opensearch.org.\r\n\r\nThis task involves updating Opensearch's build.sh script to build and include these artifacts with the component's artifact list.\r\n\r\nRight now we also have brittle logic that assumes any component with \"plugins\" in their artifacts list is a plugin repository. This should be updated to identify the min bundle component in another way. Perhaps with a separate artifact folder for \"min-bundle\"\n", "before_files": [{"content": "# Copyright OpenSearch Contributors.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\n\n'''\nThis class is responsible for executing the build for a component and passing the results to a build recorder.\nIt will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.\nArtifacts found in \"<build root>/artifacts/<maven|plugins|libs|bundle>\" will be recognized and recorded.\n'''\nclass Builder:\n def __init__(self, component_name, git_repo, script_finder, build_recorder):\n '''\n Construct a new Builder instance.\n :param component_name: The name of the component to build.\n :param git_repo: A GitRepository instance containing the checked-out code.\n :param script_finder: The ScriptFinder to use for finding build.sh scripts.\n :param build_recorder: The build recorder that will capture build information and artifacts.\n '''\n\n self.component_name = component_name\n self.git_repo = git_repo\n self.script_finder = script_finder\n self.build_recorder = build_recorder\n self.output_path = 'artifacts'\n\n def build(self, version, arch, snapshot):\n build_script = self.script_finder.find_build_script(self.component_name, self.git_repo.dir)\n build_command = f'{build_script} -v {version} -a {arch} -s {str(snapshot).lower()} -o {self.output_path}'\n self.git_repo.execute(build_command)\n self.build_recorder.record_component(self.component_name, self.git_repo)\n\n def export_artifacts(self):\n artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))\n for artifact_type in [\"maven\", \"bundle\", \"plugins\", \"libs\"]:\n for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\n relative_path = os.path.relpath(absolute_path, artifacts_dir)\n self.build_recorder.record_artifact(self.component_name, artifact_type, relative_path, absolute_path)\n", "path": "bundle-workflow/python/build_workflow/builder.py"}], "after_files": [{"content": "# Copyright OpenSearch Contributors.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\n\n'''\nThis class is responsible for executing the build for a component and passing the results to a build recorder.\nIt will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.\nArtifacts found in \"<build root>/artifacts/<maven|plugins|libs|bundle>\" will be recognized and recorded.\n'''\nclass Builder:\n def __init__(self, component_name, git_repo, script_finder, build_recorder):\n '''\n Construct a new Builder instance.\n :param component_name: The name of the component to build.\n :param git_repo: A GitRepository instance containing the checked-out code.\n :param script_finder: The ScriptFinder to use for finding build.sh scripts.\n :param build_recorder: The build recorder that will capture build information and artifacts.\n '''\n\n self.component_name = component_name\n self.git_repo = git_repo\n self.script_finder = script_finder\n self.build_recorder = build_recorder\n self.output_path = 'artifacts'\n\n def build(self, version, arch, snapshot):\n build_script = self.script_finder.find_build_script(self.component_name, self.git_repo.dir)\n build_command = f'{build_script} -v {version} -a {arch} -s {str(snapshot).lower()} -o {self.output_path}'\n self.git_repo.execute(build_command)\n self.build_recorder.record_component(self.component_name, self.git_repo)\n\n def export_artifacts(self):\n artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))\n for artifact_type in [\"maven\", \"bundle\", \"plugins\", \"libs\", \"core-plugins\"]:\n for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\n relative_path = os.path.relpath(absolute_path, artifacts_dir)\n self.build_recorder.record_artifact(self.component_name, artifact_type, relative_path, absolute_path)\n", "path": "bundle-workflow/python/build_workflow/builder.py"}]} | 889 | 173 |
gh_patches_debug_390 | rasdani/github-patches | git_diff | google__turbinia-616 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add retries to tox
Tox fails when trying to check links within our docs if the link is temporarily down/unresponsive. Adding retries to sphinx config should take care of that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 # import os
14 # import sys
15 # sys.path.insert(0, os.path.abspath('.'))
16
17 from __future__ import unicode_literals
18 import re
19
20 from recommonmark.parser import CommonMarkParser
21 from recommonmark.transform import AutoStructify
22 from docutils import nodes, transforms
23
24 # -- Project information -----------------------------------------------------
25
26 project = 'Turbinia'
27 copyright = '2020, Google Inc'
28 author = 'Turbinia maintainers'
29
30 # -- General configuration ---------------------------------------------------
31
32 # Add any Sphinx extension module names here, as strings. They can be
33 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 # ones.
35 extensions = [
36 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',
37 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',
38 'recommonmark'
39 ]
40
41 # Add any paths that contain templates here, relative to this directory.
42 templates_path = ['_templates']
43
44 # List of patterns, relative to source directory, that match files and
45 # directories to ignore when looking for source files.
46 # This pattern also affects html_static_path and html_extra_path.
47 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']
48
49 # -- Options for HTML output -------------------------------------------------
50
51 # The theme to use for HTML and HTML Help pages. See the documentation for
52 # a list of builtin themes.
53 #
54 html_theme = 'sphinx_rtd_theme'
55
56 # The master toctree document.
57 master_doc = 'index'
58
59 # The name of the Pygments (syntax highlighting) style to use.
60 pygments_style = 'sphinx'
61
62 # Add any paths that contain custom static files (such as style sheets) here,
63 # relative to this directory. They are copied after the builtin static files,
64 # so a file named "default.css" will overwrite the builtin "default.css".
65 html_static_path = ['_static']
66
67 # The default sidebars (for documents that don't match any pattern) are
68 # defined by theme itself. Builtin themes are using these templates by
69 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
70 # 'searchbox.html']``.
71 #
72 html_sidebars = {
73 '**': [
74 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',
75 'searchbox.html'
76 ]
77 }
78
79
80 # Output file base name for HTML help builder.
81 htmlhelp_basename = 'turbiniadoc'
82
83 html_logo = "images/turbinia-logo.jpg"
84
85
86 class ProcessLink(transforms.Transform):
87 """Transform definition to parse .md references to internal pages."""
88
89 default_priority = 1000
90
91 def find_replace(self, node):
92 """Parses URIs containing .md and replaces them with their HTML page."""
93 if isinstance(node, nodes.reference) and 'refuri' in node:
94 r = node['refuri']
95 if r.endswith('.md'):
96 r = r[:-3] + '.html'
97 node['refuri'] = r
98
99 return node
100
101 def traverse(self, node):
102 """Traverse the document tree rooted at node.
103 node : docutil node
104 current root node to traverse
105 """
106 self.find_replace(node)
107
108 for c in node.children:
109 self.traverse(c)
110
111 # pylint: disable=arguments-differ,attribute-defined-outside-init
112 # this was taken from GRR's config file for documentation
113 def apply(self):
114 self.current_level = 0
115 self.traverse(self.document)
116
117
118 def setup(app):
119 """Add custom parsers to Sphinx generation."""
120 app.add_config_value(
121 'recommonmark_config', {
122 'enable_auto_doc_ref': False,
123 }, True)
124 app.add_transform(AutoStructify)
125 app.add_transform(ProcessLink)
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -76,6 +76,8 @@
]
}
+# Adding retries to linkchecks before declaring a link broken
+linkcheck_retries = 3
# Output file base name for HTML help builder.
htmlhelp_basename = 'turbiniadoc'
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -76,6 +76,8 @@\n ]\n }\n \n+# Adding retries to linkchecks before declaring a link broken\n+linkcheck_retries = 3\n \n # Output file base name for HTML help builder.\n htmlhelp_basename = 'turbiniadoc'\n", "issue": "Add retries to tox\nTox fails when trying to check links within our docs if the link is temporarily down/unresponsive. Adding retries to sphinx config should take care of that.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom __future__ import unicode_literals\nimport re\n\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom docutils import nodes, transforms\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Turbinia'\ncopyright = '2020, Google Inc'\nauthor = 'Turbinia maintainers'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\n 'recommonmark'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n '**': [\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\n 'searchbox.html'\n ]\n}\n\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'turbiniadoc'\n\nhtml_logo = \"images/turbinia-logo.jpg\"\n\n\nclass ProcessLink(transforms.Transform):\n \"\"\"Transform definition to parse .md references to internal pages.\"\"\"\n\n default_priority = 1000\n\n def find_replace(self, node):\n \"\"\"Parses URIs containing .md and replaces them with their HTML page.\"\"\"\n if isinstance(node, nodes.reference) and 'refuri' in node:\n r = node['refuri']\n if r.endswith('.md'):\n r = r[:-3] + '.html'\n node['refuri'] = r\n\n return node\n\n def traverse(self, node):\n \"\"\"Traverse the document tree rooted at node.\n node : docutil node\n current root node to traverse\n \"\"\"\n self.find_replace(node)\n\n for c in node.children:\n self.traverse(c)\n\n # pylint: disable=arguments-differ,attribute-defined-outside-init\n # this was taken from GRR's config file for documentation\n def apply(self):\n self.current_level = 0\n self.traverse(self.document)\n\n\ndef setup(app):\n \"\"\"Add custom parsers to Sphinx generation.\"\"\"\n app.add_config_value(\n 'recommonmark_config', {\n 'enable_auto_doc_ref': False,\n }, True)\n app.add_transform(AutoStructify)\n app.add_transform(ProcessLink)\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom __future__ import unicode_literals\nimport re\n\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom docutils import nodes, transforms\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Turbinia'\ncopyright = '2020, Google Inc'\nauthor = 'Turbinia maintainers'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\n 'recommonmark'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n '**': [\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\n 'searchbox.html'\n ]\n}\n\n# Adding retries to linkchecks before declaring a link broken\nlinkcheck_retries = 3\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'turbiniadoc'\n\nhtml_logo = \"images/turbinia-logo.jpg\"\n\n\nclass ProcessLink(transforms.Transform):\n \"\"\"Transform definition to parse .md references to internal pages.\"\"\"\n\n default_priority = 1000\n\n def find_replace(self, node):\n \"\"\"Parses URIs containing .md and replaces them with their HTML page.\"\"\"\n if isinstance(node, nodes.reference) and 'refuri' in node:\n r = node['refuri']\n if r.endswith('.md'):\n r = r[:-3] + '.html'\n node['refuri'] = r\n\n return node\n\n def traverse(self, node):\n \"\"\"Traverse the document tree rooted at node.\n node : docutil node\n current root node to traverse\n \"\"\"\n self.find_replace(node)\n\n for c in node.children:\n self.traverse(c)\n\n # pylint: disable=arguments-differ,attribute-defined-outside-init\n # this was taken from GRR's config file for documentation\n def apply(self):\n self.current_level = 0\n self.traverse(self.document)\n\n\ndef setup(app):\n \"\"\"Add custom parsers to Sphinx generation.\"\"\"\n app.add_config_value(\n 'recommonmark_config', {\n 'enable_auto_doc_ref': False,\n }, True)\n app.add_transform(AutoStructify)\n app.add_transform(ProcessLink)\n", "path": "docs/conf.py"}]} | 1,511 | 82 |
gh_patches_debug_34066 | rasdani/github-patches | git_diff | freedomofpress__securedrop-1309 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NetworkManager hook notifications broken on Tails 2.x
The invocation of `notify-send` in `securedrop_init.py` does not show a notification in Tails 2.x like it did in Tails 1.x. This is due to dbus-related changes in Debian Jessie, and is a known issue as a quick [search](https://labs.riseup.net/code/projects/tails/search?utf8=%E2%9C%93&changesets=1&q=notify-send) of the Tails issue tracker demonstrates.
Furthermore, it looks like Tails has a special wrapper script, `tails-notify-user`, specifically meant for the use case of displaying notifications to the user from background scripts running as different users, so we should just use that instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tails_files/securedrop_init.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 import sys
5 import subprocess
6
7
8 if __name__ == '__main__':
9 # check for root
10 if os.geteuid() != 0:
11 sys.exit('You need to run this as root')
12
13 # paths
14 path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'
15 path_torrc_backup = '/etc/tor/torrc.bak'
16 path_torrc = '/etc/tor/torrc'
17
18 # load torrc_additions
19 if os.path.isfile(path_torrc_additions):
20 torrc_additions = open(path_torrc_additions).read()
21 else:
22 sys.exit('Error opening {0} for reading'.format(path_torrc_additions))
23
24 # load torrc
25 if os.path.isfile(path_torrc_backup):
26 torrc = open(path_torrc_backup).read()
27 else:
28 if os.path.isfile(path_torrc):
29 torrc = open(path_torrc).read()
30 else:
31 sys.exit('Error opening {0} for reading'.format(path_torrc))
32
33 # save a backup
34 open(path_torrc_backup, 'w').write(torrc)
35
36 # append the additions
37 open(path_torrc, 'w').write(torrc + torrc_additions)
38
39 # reload tor
40 subprocess.call(['/usr/sbin/service', 'tor', 'reload'])
41
42 # success
43 subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',
44 'Updated torrc!', 'You can now connect to your SecureDrop\ndocument interface.'])
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tails_files/securedrop_init.py b/tails_files/securedrop_init.py
--- a/tails_files/securedrop_init.py
+++ b/tails_files/securedrop_init.py
@@ -1,44 +1,47 @@
-#!/usr/bin/env python
+#!/usr/bin/python
import os
import sys
import subprocess
-if __name__ == '__main__':
- # check for root
- if os.geteuid() != 0:
- sys.exit('You need to run this as root')
+# check for root
+if os.geteuid() != 0:
+ sys.exit('You need to run this as root')
- # paths
- path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'
- path_torrc_backup = '/etc/tor/torrc.bak'
- path_torrc = '/etc/tor/torrc'
+# paths
+path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'
+path_torrc_backup = '/etc/tor/torrc.bak'
+path_torrc = '/etc/tor/torrc'
- # load torrc_additions
- if os.path.isfile(path_torrc_additions):
- torrc_additions = open(path_torrc_additions).read()
- else:
- sys.exit('Error opening {0} for reading'.format(path_torrc_additions))
+# load torrc_additions
+if os.path.isfile(path_torrc_additions):
+ torrc_additions = open(path_torrc_additions).read()
+else:
+ sys.exit('Error opening {0} for reading'.format(path_torrc_additions))
- # load torrc
- if os.path.isfile(path_torrc_backup):
- torrc = open(path_torrc_backup).read()
+# load torrc
+if os.path.isfile(path_torrc_backup):
+ torrc = open(path_torrc_backup).read()
+else:
+ if os.path.isfile(path_torrc):
+ torrc = open(path_torrc).read()
else:
- if os.path.isfile(path_torrc):
- torrc = open(path_torrc).read()
- else:
- sys.exit('Error opening {0} for reading'.format(path_torrc))
+ sys.exit('Error opening {0} for reading'.format(path_torrc))
- # save a backup
- open(path_torrc_backup, 'w').write(torrc)
+ # save a backup
+ open(path_torrc_backup, 'w').write(torrc)
- # append the additions
- open(path_torrc, 'w').write(torrc + torrc_additions)
+# append the additions
+open(path_torrc, 'w').write(torrc + torrc_additions)
- # reload tor
- subprocess.call(['/usr/sbin/service', 'tor', 'reload'])
+# reload tor
+try:
+ subprocess.check_call(['systemctl', 'reload', '[email protected]'])
+except subprocess.CalledProcessError:
+ sys.exit('Error reloading Tor')
- # success
- subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',
- 'Updated torrc!', 'You can now connect to your SecureDrop\ndocument interface.'])
+# notify the user
+subprocess.call(['tails-notify-user',
+ 'SecureDrop successfully auto-configured!',
+ 'You can now access the Document Interface.\nIf you are an admin, you can now SSH to the servers.'])
| {"golden_diff": "diff --git a/tails_files/securedrop_init.py b/tails_files/securedrop_init.py\n--- a/tails_files/securedrop_init.py\n+++ b/tails_files/securedrop_init.py\n@@ -1,44 +1,47 @@\n-#!/usr/bin/env python\n+#!/usr/bin/python\n \n import os\n import sys\n import subprocess\n \n \n-if __name__ == '__main__':\n- # check for root\n- if os.geteuid() != 0:\n- sys.exit('You need to run this as root')\n+# check for root\n+if os.geteuid() != 0:\n+ sys.exit('You need to run this as root')\n \n- # paths\n- path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\n- path_torrc_backup = '/etc/tor/torrc.bak'\n- path_torrc = '/etc/tor/torrc'\n+# paths\n+path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\n+path_torrc_backup = '/etc/tor/torrc.bak'\n+path_torrc = '/etc/tor/torrc'\n \n- # load torrc_additions\n- if os.path.isfile(path_torrc_additions):\n- torrc_additions = open(path_torrc_additions).read()\n- else:\n- sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n+# load torrc_additions\n+if os.path.isfile(path_torrc_additions):\n+ torrc_additions = open(path_torrc_additions).read()\n+else:\n+ sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n \n- # load torrc\n- if os.path.isfile(path_torrc_backup):\n- torrc = open(path_torrc_backup).read()\n+# load torrc\n+if os.path.isfile(path_torrc_backup):\n+ torrc = open(path_torrc_backup).read()\n+else:\n+ if os.path.isfile(path_torrc):\n+ torrc = open(path_torrc).read()\n else:\n- if os.path.isfile(path_torrc):\n- torrc = open(path_torrc).read()\n- else:\n- sys.exit('Error opening {0} for reading'.format(path_torrc))\n+ sys.exit('Error opening {0} for reading'.format(path_torrc))\n \n- # save a backup\n- open(path_torrc_backup, 'w').write(torrc)\n+ # save a backup\n+ open(path_torrc_backup, 'w').write(torrc)\n \n- # append the additions\n- open(path_torrc, 'w').write(torrc + torrc_additions)\n+# append the additions\n+open(path_torrc, 'w').write(torrc + torrc_additions)\n \n- # reload tor\n- subprocess.call(['/usr/sbin/service', 'tor', 'reload'])\n+# reload tor\n+try:\n+ subprocess.check_call(['systemctl', 'reload', '[email protected]'])\n+except subprocess.CalledProcessError:\n+ sys.exit('Error reloading Tor')\n \n- # success\n- subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',\n- 'Updated torrc!', 'You can now connect to your SecureDrop\\ndocument interface.'])\n+# notify the user\n+subprocess.call(['tails-notify-user',\n+ 'SecureDrop successfully auto-configured!',\n+ 'You can now access the Document Interface.\\nIf you are an admin, you can now SSH to the servers.'])\n", "issue": "NetworkManager hook notifications broken on Tails 2.x\nThe invocation of `notify-send` in `securedrop_init.py` does not show a notification in Tails 2.x like it did in Tails 1.x. This is due to dbus-related changes in Debian Jessie, and is a known issue as a quick [search](https://labs.riseup.net/code/projects/tails/search?utf8=%E2%9C%93&changesets=1&q=notify-send) of the Tails issue tracker demonstrates.\n\nFurthermore, it looks like Tails has a special wrapper script, `tails-notify-user`, specifically meant for the use case of displaying notifications to the user from background scripts running as different users, so we should just use that instead.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport subprocess\n\n\nif __name__ == '__main__':\n # check for root\n if os.geteuid() != 0:\n sys.exit('You need to run this as root')\n\n # paths\n path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\n path_torrc_backup = '/etc/tor/torrc.bak'\n path_torrc = '/etc/tor/torrc'\n\n # load torrc_additions\n if os.path.isfile(path_torrc_additions):\n torrc_additions = open(path_torrc_additions).read()\n else:\n sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n\n # load torrc\n if os.path.isfile(path_torrc_backup):\n torrc = open(path_torrc_backup).read()\n else:\n if os.path.isfile(path_torrc):\n torrc = open(path_torrc).read()\n else:\n sys.exit('Error opening {0} for reading'.format(path_torrc))\n\n # save a backup\n open(path_torrc_backup, 'w').write(torrc)\n\n # append the additions\n open(path_torrc, 'w').write(torrc + torrc_additions)\n\n # reload tor\n subprocess.call(['/usr/sbin/service', 'tor', 'reload'])\n\n # success\n subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',\n 'Updated torrc!', 'You can now connect to your SecureDrop\\ndocument interface.'])\n", "path": "tails_files/securedrop_init.py"}], "after_files": [{"content": "#!/usr/bin/python\n\nimport os\nimport sys\nimport subprocess\n\n\n# check for root\nif os.geteuid() != 0:\n sys.exit('You need to run this as root')\n\n# paths\npath_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\npath_torrc_backup = '/etc/tor/torrc.bak'\npath_torrc = '/etc/tor/torrc'\n\n# load torrc_additions\nif os.path.isfile(path_torrc_additions):\n torrc_additions = open(path_torrc_additions).read()\nelse:\n sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n\n# load torrc\nif os.path.isfile(path_torrc_backup):\n torrc = open(path_torrc_backup).read()\nelse:\n if os.path.isfile(path_torrc):\n torrc = open(path_torrc).read()\n else:\n sys.exit('Error opening {0} for reading'.format(path_torrc))\n\n # save a backup\n open(path_torrc_backup, 'w').write(torrc)\n\n# append the additions\nopen(path_torrc, 'w').write(torrc + torrc_additions)\n\n# reload tor\ntry:\n subprocess.check_call(['systemctl', 'reload', '[email protected]'])\nexcept subprocess.CalledProcessError:\n sys.exit('Error reloading Tor')\n\n# notify the user\nsubprocess.call(['tails-notify-user',\n 'SecureDrop successfully auto-configured!',\n 'You can now access the Document Interface.\\nIf you are an admin, you can now SSH to the servers.'])\n", "path": "tails_files/securedrop_init.py"}]} | 889 | 842 |
gh_patches_debug_22367 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-tf-515 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Subword tokenisation spacer can mark the beginning of word
Certain sequence noising operations need to retrieve a list of words from the raw list of subword tokens. For example:
* Decoding with word removal/reordering to produce noisy back-translations as in [Scaling BT paper](https://arxiv.org/abs/1808.09381)
* Word omission to support the new contrastive learning feature as in the [contrastive learning paper](https://www.aclweb.org/anthology/P19-1623.pdf)
* Presumably more features relying on word level noise might come up in the future
In these cases the user should specify some details for the sub-tokenisation process:
1. What subword tokens was used? (`decoding_subword_token`)
2. Was that token a joiner or a spacer? (`decoding_subword_token_is_spacer`)
When the user specifies (explicitly or implicitly) a spacer, the framework assumes that the spacer symbol appears at the beginning of each word, similar to what SentencePiece does. However this does not have to be the case, the spacer could also appear at the end of each word - for example [this one does](https://github.com/kovalevfm/SubTokenizer). If that extra sub-tokenisation flexibility is desired, we can add this configuration parameter. A sample implementation could look like [this](https://github.com/steremma/OpenNMT-tf/commit/d109af49911431e424b28def575fb94f07bfec47).
I realise that most user's rely on standard tools that are covered by the current implementation. If there is a user base for which the extra flexibility is desired, I can submit a PR that reads this option from the YAML.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opennmt/data/text.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Text manipulation."""
4
5 import tensorflow as tf
6
7
8 def tokens_to_chars(tokens):
9 """Splits tokens into unicode characters.
10
11 Args:
12 tokens: A string ``tf.Tensor`` of shape :math:`[T]`.
13
14 Returns:
15 The characters as a 2D string ``tf.RaggedTensor``.
16 """
17 return tf.strings.unicode_split(tokens, "UTF-8")
18
19 def tokens_to_words(tokens, subword_token="■", is_spacer=None):
20 """Converts a sequence of tokens to a sequence of words.
21
22 For example, if a BPE tokenization produces this sequence:
23
24 ["He@@", "llo", "W@@", "orld", "@@!"]
25
26 this function will return the tensor:
27
28 [["He@@", "llo", ""], ["W@@", "orld", "@@!"]]
29
30 Args:
31 tokens: A 1D string ``tf.Tensor``.
32 subword_token: The special token used by the subword tokenizer.
33 is_spacer: Whether :obj:`subword_token` is used as a spacer (as in
34 SentencePiece) or a joiner (as in BPE). If ``None``, will infer
35 directly from :obj:`subword_token`.
36
37 Returns:
38 The words as a 2D string ``tf.RaggedTensor``.
39 """
40 if is_spacer is None:
41 is_spacer = subword_token == "▁"
42 if is_spacer:
43 subword = tf.strings.regex_full_match(tokens, "[^%s].*" % subword_token)
44 else:
45 right = tf.strings.regex_full_match(tokens, ".*%s" % subword_token)
46 left = tf.strings.regex_full_match(tokens, "%s.*" % subword_token)
47 subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)
48 start = tf.logical_not(subword)
49 start_indices = tf.squeeze(tf.where(start), -1)
50 return tf.RaggedTensor.from_row_starts(tokens, start_indices)
51
52 def alignment_matrix_from_pharaoh(alignment_line,
53 source_length,
54 target_length,
55 dtype=tf.float32):
56 """Parse Pharaoh alignments into an alignment matrix.
57
58 Args:
59 alignment_line: A string ``tf.Tensor`` in the Pharaoh format.
60 source_length: The length of the source sentence, without special symbols.
61 target_length: The length of the target sentence, without special symbols.
62 dtype: The output matrix dtype. Defaults to ``tf.float32`` for convenience
63 when computing the guided alignment loss.
64
65 Returns:
66 The alignment matrix as a 2-D ``tf.Tensor`` of type :obj:`dtype` and shape
67 ``[target_length, source_length]``, where ``[i, j] = 1`` if the ``i`` th
68 target word is aligned with the ``j`` th source word.
69 """
70 align_pairs_str = tf.strings.split([alignment_line]).values
71 align_pairs_flat_str = tf.strings.split(align_pairs_str, sep="-").values
72 align_pairs_flat = tf.strings.to_number(align_pairs_flat_str, out_type=tf.int64)
73 sparse_indices = tf.reshape(align_pairs_flat, [-1, 2])
74 sparse_values = tf.ones([tf.shape(sparse_indices)[0]], dtype=dtype)
75 source_length = tf.cast(source_length, tf.int64)
76 target_length = tf.cast(target_length, tf.int64)
77 alignment_matrix_sparse = tf.sparse.SparseTensor(
78 sparse_indices, sparse_values, [source_length, target_length])
79 alignment_matrix = tf.sparse.to_dense(alignment_matrix_sparse, validate_indices=False)
80 return tf.transpose(alignment_matrix)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opennmt/data/text.py b/opennmt/data/text.py
--- a/opennmt/data/text.py
+++ b/opennmt/data/text.py
@@ -40,13 +40,18 @@
if is_spacer is None:
is_spacer = subword_token == "▁"
if is_spacer:
- subword = tf.strings.regex_full_match(tokens, "[^%s].*" % subword_token)
+ # First token implicitly starts with a spacer.
+ left_and_single = tf.logical_or(
+ tf.strings.regex_full_match(tokens, "%s.*" % subword_token),
+ tf.one_hot(0, tf.shape(tokens)[0], on_value=True, off_value=False))
+ right = tf.strings.regex_full_match(tokens, ".+%s" % subword_token)
+ word_start = tf.logical_or(tf.roll(right, shift=1, axis=0), left_and_single)
else:
right = tf.strings.regex_full_match(tokens, ".*%s" % subword_token)
left = tf.strings.regex_full_match(tokens, "%s.*" % subword_token)
subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)
- start = tf.logical_not(subword)
- start_indices = tf.squeeze(tf.where(start), -1)
+ word_start = tf.logical_not(subword)
+ start_indices = tf.squeeze(tf.where(word_start), -1)
return tf.RaggedTensor.from_row_starts(tokens, start_indices)
def alignment_matrix_from_pharaoh(alignment_line,
| {"golden_diff": "diff --git a/opennmt/data/text.py b/opennmt/data/text.py\n--- a/opennmt/data/text.py\n+++ b/opennmt/data/text.py\n@@ -40,13 +40,18 @@\n if is_spacer is None:\n is_spacer = subword_token == \"\u2581\"\n if is_spacer:\n- subword = tf.strings.regex_full_match(tokens, \"[^%s].*\" % subword_token)\n+ # First token implicitly starts with a spacer.\n+ left_and_single = tf.logical_or(\n+ tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token),\n+ tf.one_hot(0, tf.shape(tokens)[0], on_value=True, off_value=False))\n+ right = tf.strings.regex_full_match(tokens, \".+%s\" % subword_token)\n+ word_start = tf.logical_or(tf.roll(right, shift=1, axis=0), left_and_single)\n else:\n right = tf.strings.regex_full_match(tokens, \".*%s\" % subword_token)\n left = tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token)\n subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)\n- start = tf.logical_not(subword)\n- start_indices = tf.squeeze(tf.where(start), -1)\n+ word_start = tf.logical_not(subword)\n+ start_indices = tf.squeeze(tf.where(word_start), -1)\n return tf.RaggedTensor.from_row_starts(tokens, start_indices)\n \n def alignment_matrix_from_pharaoh(alignment_line,\n", "issue": "Subword tokenisation spacer can mark the beginning of word\nCertain sequence noising operations need to retrieve a list of words from the raw list of subword tokens. For example:\r\n\r\n* Decoding with word removal/reordering to produce noisy back-translations as in [Scaling BT paper](https://arxiv.org/abs/1808.09381)\r\n\r\n* Word omission to support the new contrastive learning feature as in the [contrastive learning paper](https://www.aclweb.org/anthology/P19-1623.pdf)\r\n\r\n* Presumably more features relying on word level noise might come up in the future\r\n\r\nIn these cases the user should specify some details for the sub-tokenisation process: \r\n1. What subword tokens was used? (`decoding_subword_token`)\r\n2. Was that token a joiner or a spacer? (`decoding_subword_token_is_spacer`)\r\n\r\nWhen the user specifies (explicitly or implicitly) a spacer, the framework assumes that the spacer symbol appears at the beginning of each word, similar to what SentencePiece does. However this does not have to be the case, the spacer could also appear at the end of each word - for example [this one does](https://github.com/kovalevfm/SubTokenizer). If that extra sub-tokenisation flexibility is desired, we can add this configuration parameter. A sample implementation could look like [this](https://github.com/steremma/OpenNMT-tf/commit/d109af49911431e424b28def575fb94f07bfec47).\r\n\r\nI realise that most user's rely on standard tools that are covered by the current implementation. If there is a user base for which the extra flexibility is desired, I can submit a PR that reads this option from the YAML.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Text manipulation.\"\"\"\n\nimport tensorflow as tf\n\n\ndef tokens_to_chars(tokens):\n \"\"\"Splits tokens into unicode characters.\n\n Args:\n tokens: A string ``tf.Tensor`` of shape :math:`[T]`.\n\n Returns:\n The characters as a 2D string ``tf.RaggedTensor``.\n \"\"\"\n return tf.strings.unicode_split(tokens, \"UTF-8\")\n\ndef tokens_to_words(tokens, subword_token=\"\uffed\", is_spacer=None):\n \"\"\"Converts a sequence of tokens to a sequence of words.\n\n For example, if a BPE tokenization produces this sequence:\n\n [\"He@@\", \"llo\", \"W@@\", \"orld\", \"@@!\"]\n\n this function will return the tensor:\n\n [[\"He@@\", \"llo\", \"\"], [\"W@@\", \"orld\", \"@@!\"]]\n\n Args:\n tokens: A 1D string ``tf.Tensor``.\n subword_token: The special token used by the subword tokenizer.\n is_spacer: Whether :obj:`subword_token` is used as a spacer (as in\n SentencePiece) or a joiner (as in BPE). If ``None``, will infer\n directly from :obj:`subword_token`.\n\n Returns:\n The words as a 2D string ``tf.RaggedTensor``.\n \"\"\"\n if is_spacer is None:\n is_spacer = subword_token == \"\u2581\"\n if is_spacer:\n subword = tf.strings.regex_full_match(tokens, \"[^%s].*\" % subword_token)\n else:\n right = tf.strings.regex_full_match(tokens, \".*%s\" % subword_token)\n left = tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token)\n subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)\n start = tf.logical_not(subword)\n start_indices = tf.squeeze(tf.where(start), -1)\n return tf.RaggedTensor.from_row_starts(tokens, start_indices)\n\ndef alignment_matrix_from_pharaoh(alignment_line,\n source_length,\n target_length,\n dtype=tf.float32):\n \"\"\"Parse Pharaoh alignments into an alignment matrix.\n\n Args:\n alignment_line: A string ``tf.Tensor`` in the Pharaoh format.\n source_length: The length of the source sentence, without special symbols.\n target_length: The length of the target sentence, without special symbols.\n dtype: The output matrix dtype. Defaults to ``tf.float32`` for convenience\n when computing the guided alignment loss.\n\n Returns:\n The alignment matrix as a 2-D ``tf.Tensor`` of type :obj:`dtype` and shape\n ``[target_length, source_length]``, where ``[i, j] = 1`` if the ``i`` th\n target word is aligned with the ``j`` th source word.\n \"\"\"\n align_pairs_str = tf.strings.split([alignment_line]).values\n align_pairs_flat_str = tf.strings.split(align_pairs_str, sep=\"-\").values\n align_pairs_flat = tf.strings.to_number(align_pairs_flat_str, out_type=tf.int64)\n sparse_indices = tf.reshape(align_pairs_flat, [-1, 2])\n sparse_values = tf.ones([tf.shape(sparse_indices)[0]], dtype=dtype)\n source_length = tf.cast(source_length, tf.int64)\n target_length = tf.cast(target_length, tf.int64)\n alignment_matrix_sparse = tf.sparse.SparseTensor(\n sparse_indices, sparse_values, [source_length, target_length])\n alignment_matrix = tf.sparse.to_dense(alignment_matrix_sparse, validate_indices=False)\n return tf.transpose(alignment_matrix)\n", "path": "opennmt/data/text.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Text manipulation.\"\"\"\n\nimport tensorflow as tf\n\n\ndef tokens_to_chars(tokens):\n \"\"\"Splits tokens into unicode characters.\n\n Args:\n tokens: A string ``tf.Tensor`` of shape :math:`[T]`.\n\n Returns:\n The characters as a 2D string ``tf.RaggedTensor``.\n \"\"\"\n return tf.strings.unicode_split(tokens, \"UTF-8\")\n\ndef tokens_to_words(tokens, subword_token=\"\uffed\", is_spacer=None):\n \"\"\"Converts a sequence of tokens to a sequence of words.\n\n For example, if a BPE tokenization produces this sequence:\n\n [\"He@@\", \"llo\", \"W@@\", \"orld\", \"@@!\"]\n\n this function will return the tensor:\n\n [[\"He@@\", \"llo\", \"\"], [\"W@@\", \"orld\", \"@@!\"]]\n\n Args:\n tokens: A 1D string ``tf.Tensor``.\n subword_token: The special token used by the subword tokenizer.\n is_spacer: Whether :obj:`subword_token` is used as a spacer (as in\n SentencePiece) or a joiner (as in BPE). If ``None``, will infer\n directly from :obj:`subword_token`.\n\n Returns:\n The words as a 2D string ``tf.RaggedTensor``.\n \"\"\"\n if is_spacer is None:\n is_spacer = subword_token == \"\u2581\"\n if is_spacer:\n # First token implicitly starts with a spacer.\n left_and_single = tf.logical_or(\n tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token),\n tf.one_hot(0, tf.shape(tokens)[0], on_value=True, off_value=False))\n right = tf.strings.regex_full_match(tokens, \".+%s\" % subword_token)\n word_start = tf.logical_or(tf.roll(right, shift=1, axis=0), left_and_single)\n else:\n right = tf.strings.regex_full_match(tokens, \".*%s\" % subword_token)\n left = tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token)\n subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)\n word_start = tf.logical_not(subword)\n start_indices = tf.squeeze(tf.where(word_start), -1)\n return tf.RaggedTensor.from_row_starts(tokens, start_indices)\n\ndef alignment_matrix_from_pharaoh(alignment_line,\n source_length,\n target_length,\n dtype=tf.float32):\n \"\"\"Parse Pharaoh alignments into an alignment matrix.\n\n Args:\n alignment_line: A string ``tf.Tensor`` in the Pharaoh format.\n source_length: The length of the source sentence, without special symbols.\n target_length: The length of the target sentence, without special symbols.\n dtype: The output matrix dtype. Defaults to ``tf.float32`` for convenience\n when computing the guided alignment loss.\n\n Returns:\n The alignment matrix as a 2-D ``tf.Tensor`` of type :obj:`dtype` and shape\n ``[target_length, source_length]``, where ``[i, j] = 1`` if the ``i`` th\n target word is aligned with the ``j`` th source word.\n \"\"\"\n align_pairs_str = tf.strings.split([alignment_line]).values\n align_pairs_flat_str = tf.strings.split(align_pairs_str, sep=\"-\").values\n align_pairs_flat = tf.strings.to_number(align_pairs_flat_str, out_type=tf.int64)\n sparse_indices = tf.reshape(align_pairs_flat, [-1, 2])\n sparse_values = tf.ones([tf.shape(sparse_indices)[0]], dtype=dtype)\n source_length = tf.cast(source_length, tf.int64)\n target_length = tf.cast(target_length, tf.int64)\n alignment_matrix_sparse = tf.sparse.SparseTensor(\n sparse_indices, sparse_values, [source_length, target_length])\n alignment_matrix = tf.sparse.to_dense(alignment_matrix_sparse, validate_indices=False)\n return tf.transpose(alignment_matrix)\n", "path": "opennmt/data/text.py"}]} | 1,603 | 343 |
gh_patches_debug_29109 | rasdani/github-patches | git_diff | saleor__saleor-8874 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
management/commands errors
There are some queries that reference deleted fields in this module (examples below). I was wondering does it matter if this module is updated because it seems like this file hasn't been updated in a while, or are there other reasons that these queries still exist? Thanks.
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L45
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L51
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L52
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L53
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/core/management/commands/change_currency.py`
Content:
```
1 from babel.numbers import UnknownCurrencyError, validate_currency
2 from django.core.management.base import BaseCommand, CommandError
3
4 from ....checkout.models import Checkout
5 from ....discount.models import Voucher
6 from ....giftcard.models import GiftCard
7 from ....order.models import Order, OrderLine
8 from ....payment.models import Payment, Transaction
9 from ....product.models import Product, ProductVariant
10 from ....shipping.models import ShippingMethod
11
12
13 class Command(BaseCommand):
14 help = (
15 "Change currency in all models in the database. "
16 "Note, that this command only changes currency code "
17 "without doing any conversion. "
18 "Currency set by this command must match "
19 "with the value set in DEFAULT_CURRENCY environment variable."
20 )
21
22 def add_arguments(self, parser):
23 parser.add_argument("currency", type=str)
24
25 parser.add_argument(
26 "--force",
27 action="store_true",
28 help="Allows running command without validation.",
29 )
30
31 def handle(self, **options):
32 force = options.get("force", False)
33 currency = options["currency"]
34
35 if not force:
36 try:
37 validate_currency(currency)
38 except UnknownCurrencyError:
39 raise CommandError(
40 "Unknown currency. "
41 "Use `--force` flag to force migration currencies."
42 )
43
44 Checkout.objects.update(currency=currency)
45 Voucher.objects.update(currency=currency)
46 GiftCard.objects.update(currency=currency)
47 Order.objects.update(currency=currency)
48 OrderLine.objects.update(currency=currency)
49 Payment.objects.update(currency=currency)
50 Transaction.objects.update(currency=currency)
51 Product.objects.update(currency=currency)
52 ProductVariant.objects.update(currency=currency)
53 ShippingMethod.objects.update(currency=currency)
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/core/management/commands/change_currency.py b/saleor/core/management/commands/change_currency.py
deleted file mode 100644
--- a/saleor/core/management/commands/change_currency.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from babel.numbers import UnknownCurrencyError, validate_currency
-from django.core.management.base import BaseCommand, CommandError
-
-from ....checkout.models import Checkout
-from ....discount.models import Voucher
-from ....giftcard.models import GiftCard
-from ....order.models import Order, OrderLine
-from ....payment.models import Payment, Transaction
-from ....product.models import Product, ProductVariant
-from ....shipping.models import ShippingMethod
-
-
-class Command(BaseCommand):
- help = (
- "Change currency in all models in the database. "
- "Note, that this command only changes currency code "
- "without doing any conversion. "
- "Currency set by this command must match "
- "with the value set in DEFAULT_CURRENCY environment variable."
- )
-
- def add_arguments(self, parser):
- parser.add_argument("currency", type=str)
-
- parser.add_argument(
- "--force",
- action="store_true",
- help="Allows running command without validation.",
- )
-
- def handle(self, **options):
- force = options.get("force", False)
- currency = options["currency"]
-
- if not force:
- try:
- validate_currency(currency)
- except UnknownCurrencyError:
- raise CommandError(
- "Unknown currency. "
- "Use `--force` flag to force migration currencies."
- )
-
- Checkout.objects.update(currency=currency)
- Voucher.objects.update(currency=currency)
- GiftCard.objects.update(currency=currency)
- Order.objects.update(currency=currency)
- OrderLine.objects.update(currency=currency)
- Payment.objects.update(currency=currency)
- Transaction.objects.update(currency=currency)
- Product.objects.update(currency=currency)
- ProductVariant.objects.update(currency=currency)
- ShippingMethod.objects.update(currency=currency)
| {"golden_diff": "diff --git a/saleor/core/management/commands/change_currency.py b/saleor/core/management/commands/change_currency.py\ndeleted file mode 100644\n--- a/saleor/core/management/commands/change_currency.py\n+++ /dev/null\n@@ -1,53 +0,0 @@\n-from babel.numbers import UnknownCurrencyError, validate_currency\n-from django.core.management.base import BaseCommand, CommandError\n-\n-from ....checkout.models import Checkout\n-from ....discount.models import Voucher\n-from ....giftcard.models import GiftCard\n-from ....order.models import Order, OrderLine\n-from ....payment.models import Payment, Transaction\n-from ....product.models import Product, ProductVariant\n-from ....shipping.models import ShippingMethod\n-\n-\n-class Command(BaseCommand):\n- help = (\n- \"Change currency in all models in the database. \"\n- \"Note, that this command only changes currency code \"\n- \"without doing any conversion. \"\n- \"Currency set by this command must match \"\n- \"with the value set in DEFAULT_CURRENCY environment variable.\"\n- )\n-\n- def add_arguments(self, parser):\n- parser.add_argument(\"currency\", type=str)\n-\n- parser.add_argument(\n- \"--force\",\n- action=\"store_true\",\n- help=\"Allows running command without validation.\",\n- )\n-\n- def handle(self, **options):\n- force = options.get(\"force\", False)\n- currency = options[\"currency\"]\n-\n- if not force:\n- try:\n- validate_currency(currency)\n- except UnknownCurrencyError:\n- raise CommandError(\n- \"Unknown currency. \"\n- \"Use `--force` flag to force migration currencies.\"\n- )\n-\n- Checkout.objects.update(currency=currency)\n- Voucher.objects.update(currency=currency)\n- GiftCard.objects.update(currency=currency)\n- Order.objects.update(currency=currency)\n- OrderLine.objects.update(currency=currency)\n- Payment.objects.update(currency=currency)\n- Transaction.objects.update(currency=currency)\n- Product.objects.update(currency=currency)\n- ProductVariant.objects.update(currency=currency)\n- ShippingMethod.objects.update(currency=currency)\n", "issue": "management/commands errors\nThere are some queries that reference deleted fields in this module (examples below). I was wondering does it matter if this module is updated because it seems like this file hasn't been updated in a while, or are there other reasons that these queries still exist? Thanks.\r\n\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L45\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L51\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L52\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L53\n", "before_files": [{"content": "from babel.numbers import UnknownCurrencyError, validate_currency\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom ....checkout.models import Checkout\nfrom ....discount.models import Voucher\nfrom ....giftcard.models import GiftCard\nfrom ....order.models import Order, OrderLine\nfrom ....payment.models import Payment, Transaction\nfrom ....product.models import Product, ProductVariant\nfrom ....shipping.models import ShippingMethod\n\n\nclass Command(BaseCommand):\n help = (\n \"Change currency in all models in the database. \"\n \"Note, that this command only changes currency code \"\n \"without doing any conversion. \"\n \"Currency set by this command must match \"\n \"with the value set in DEFAULT_CURRENCY environment variable.\"\n )\n\n def add_arguments(self, parser):\n parser.add_argument(\"currency\", type=str)\n\n parser.add_argument(\n \"--force\",\n action=\"store_true\",\n help=\"Allows running command without validation.\",\n )\n\n def handle(self, **options):\n force = options.get(\"force\", False)\n currency = options[\"currency\"]\n\n if not force:\n try:\n validate_currency(currency)\n except UnknownCurrencyError:\n raise CommandError(\n \"Unknown currency. \"\n \"Use `--force` flag to force migration currencies.\"\n )\n\n Checkout.objects.update(currency=currency)\n Voucher.objects.update(currency=currency)\n GiftCard.objects.update(currency=currency)\n Order.objects.update(currency=currency)\n OrderLine.objects.update(currency=currency)\n Payment.objects.update(currency=currency)\n Transaction.objects.update(currency=currency)\n Product.objects.update(currency=currency)\n ProductVariant.objects.update(currency=currency)\n ShippingMethod.objects.update(currency=currency)\n", "path": "saleor/core/management/commands/change_currency.py"}], "after_files": [{"content": null, "path": "saleor/core/management/commands/change_currency.py"}]} | 895 | 462 |
gh_patches_debug_11801 | rasdani/github-patches | git_diff | getmoto__moto-399 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 2.6 issues with wheels and dependencies
My Travis tests failed on Python 2.6: piskvorky/smart_open#15 .
After some digging around it appears this is because of `moto`. Moto apparently depends on some `ordereddict` package, but that package is not installed (nor mentioned anywhere in the docs, AFAICS).
Do you think you could make `ordereddict` a dependency for moto, when installing on Python 2.6?
In other words, after I successfully run `pip install moto`, I'd expect moto to work, even on Python 2.6.
And thanks for the great package!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import unicode_literals
3 from setuptools import setup, find_packages
4
5 install_requires = [
6 "Jinja2",
7 "boto>=2.20.0",
8 "flask",
9 "httpretty>=0.6.1",
10 "requests",
11 "xmltodict",
12 "six",
13 "werkzeug",
14 ]
15
16 import sys
17
18 if sys.version_info < (2, 7):
19 # No buildint OrderedDict before 2.7
20 install_requires.append('ordereddict')
21
22 setup(
23 name='moto',
24 version='0.4.10',
25 description='A library that allows your python tests to easily'
26 ' mock out the boto library',
27 author='Steve Pulec',
28 author_email='spulec@gmail',
29 url='https://github.com/spulec/moto',
30 entry_points={
31 'console_scripts': [
32 'moto_server = moto.server:main',
33 ],
34 },
35 packages=find_packages(exclude=("tests", "tests.*")),
36 install_requires=install_requires,
37 license="Apache",
38 test_suite="tests",
39 classifiers=[
40 "Programming Language :: Python :: 2",
41 "Programming Language :: Python :: 2.6",
42 "Programming Language :: Python :: 2.7",
43 "Programming Language :: Python :: 3",
44 "Programming Language :: Python :: 3.3",
45 "License :: OSI Approved :: Apache Software License",
46 "Topic :: Software Development :: Testing",
47 ],
48 )
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,11 +13,10 @@
"werkzeug",
]
-import sys
-
-if sys.version_info < (2, 7):
- # No buildint OrderedDict before 2.7
- install_requires.append('ordereddict')
+extras_require = {
+ # No builtin OrderedDict before 2.7
+ ':python_version=="2.6"': ['ordereddict'],
+}
setup(
name='moto',
@@ -34,6 +33,7 @@
},
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=install_requires,
+ extras_require=extras_require,
license="Apache",
test_suite="tests",
classifiers=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,11 +13,10 @@\n \"werkzeug\",\n ]\n \n-import sys\n-\n-if sys.version_info < (2, 7):\n- # No buildint OrderedDict before 2.7\n- install_requires.append('ordereddict')\n+extras_require = {\n+ # No builtin OrderedDict before 2.7\n+ ':python_version==\"2.6\"': ['ordereddict'],\n+}\n \n setup(\n name='moto',\n@@ -34,6 +33,7 @@\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n+ extras_require=extras_require,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n", "issue": "Python 2.6 issues with wheels and dependencies\nMy Travis tests failed on Python 2.6: piskvorky/smart_open#15 .\n\nAfter some digging around it appears this is because of `moto`. Moto apparently depends on some `ordereddict` package, but that package is not installed (nor mentioned anywhere in the docs, AFAICS).\n\nDo you think you could make `ordereddict` a dependency for moto, when installing on Python 2.6?\n\nIn other words, after I successfully run `pip install moto`, I'd expect moto to work, even on Python 2.6.\n\nAnd thanks for the great package!\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nfrom setuptools import setup, find_packages\n\ninstall_requires = [\n \"Jinja2\",\n \"boto>=2.20.0\",\n \"flask\",\n \"httpretty>=0.6.1\",\n \"requests\",\n \"xmltodict\",\n \"six\",\n \"werkzeug\",\n]\n\nimport sys\n\nif sys.version_info < (2, 7):\n # No buildint OrderedDict before 2.7\n install_requires.append('ordereddict')\n\nsetup(\n name='moto',\n version='0.4.10',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='spulec@gmail',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nfrom setuptools import setup, find_packages\n\ninstall_requires = [\n \"Jinja2\",\n \"boto>=2.20.0\",\n \"flask\",\n \"httpretty>=0.6.1\",\n \"requests\",\n \"xmltodict\",\n \"six\",\n \"werkzeug\",\n]\n\nextras_require = {\n # No builtin OrderedDict before 2.7\n ':python_version==\"2.6\"': ['ordereddict'],\n}\n\nsetup(\n name='moto',\n version='0.4.10',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='spulec@gmail',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n extras_require=extras_require,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}]} | 807 | 177 |
gh_patches_debug_2230 | rasdani/github-patches | git_diff | getsentry__sentry-18644 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BufferError: Local: Queue full
I am receiving this error once every 2-4 days and I need to restart Sentry to fix it. This started after moving to the Docker version of Sentry.
I never noticed this being an issue on 9.1.2 also with Clickhouse and Snuba running, but without Kafka.
> https://observ.app/share/issue/4e4f208a500d48cc898770930706959a/
I am not sure where to look / poke / monitor to see this queue that is being spoken of and how I can flush it / enlarge it if needed.
`sentry queues list` showed all 0's so it's not looking like there is a massive backlog of events.
Any help is appreciated!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/utils/pubsub.py`
Content:
```
1 from __future__ import absolute_import
2
3 import redis
4 import logging
5
6 from threading import Thread
7 from six.moves.queue import Queue, Full
8
9
10 class QueuedPublisherService(object):
11 """
12 A publisher that queues items locally and publishes them to a
13 remote pubsub service on a background thread.
14
15 Maintains a lossy internal queue for posting, will discard the
16 value if the queue is full or not immediately available. Will also
17 drop items if the publish operation to the remote service fails.
18 """
19
20 def __init__(self, publisher):
21 self._started = False
22 self.publisher = publisher
23
24 def _start(self):
25 if self._started:
26 return True
27
28 self.q = q = Queue(maxsize=100)
29
30 def worker():
31 while True:
32 (channel, key, value) = q.get()
33 try:
34 self.publisher.publish(channel, key=key, value=value)
35 except Exception as e:
36 logger = logging.getLogger("sentry.errors")
37 logger.debug("could not submit event to pubsub: %s" % e)
38 finally:
39 q.task_done()
40
41 t = Thread(target=worker)
42 t.setDaemon(True)
43 t.start()
44
45 self._started = True
46 return True
47
48 def publish(self, channel, value, key=None):
49 if not self._start():
50 return
51
52 try:
53 self.q.put((channel, key, value), block=False)
54 except Full:
55 return
56
57
58 class RedisPublisher(object):
59 def __init__(self, connection):
60 self.rds = None if connection is None else redis.StrictRedis(**connection)
61
62 def publish(self, channel, value, key=None):
63 if self.rds is not None:
64 self.rds.publish(channel, value)
65
66
67 class KafkaPublisher(object):
68 def __init__(self, connection, asynchronous=True):
69 from confluent_kafka import Producer
70
71 self.producer = Producer(connection or {})
72 self.asynchronous = asynchronous
73
74 def publish(self, channel, value, key=None):
75 self.producer.produce(topic=channel, value=value, key=key)
76 if not self.asynchronous:
77 self.producer.flush()
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/utils/pubsub.py b/src/sentry/utils/pubsub.py
--- a/src/sentry/utils/pubsub.py
+++ b/src/sentry/utils/pubsub.py
@@ -73,5 +73,7 @@
def publish(self, channel, value, key=None):
self.producer.produce(topic=channel, value=value, key=key)
- if not self.asynchronous:
+ if self.asynchronous:
+ self.producer.poll(0)
+ else:
self.producer.flush()
| {"golden_diff": "diff --git a/src/sentry/utils/pubsub.py b/src/sentry/utils/pubsub.py\n--- a/src/sentry/utils/pubsub.py\n+++ b/src/sentry/utils/pubsub.py\n@@ -73,5 +73,7 @@\n \n def publish(self, channel, value, key=None):\n self.producer.produce(topic=channel, value=value, key=key)\n- if not self.asynchronous:\n+ if self.asynchronous:\n+ self.producer.poll(0)\n+ else:\n self.producer.flush()\n", "issue": "BufferError: Local: Queue full\nI am receiving this error once every 2-4 days and I need to restart Sentry to fix it. This started after moving to the Docker version of Sentry.\r\n\r\nI never noticed this being an issue on 9.1.2 also with Clickhouse and Snuba running, but without Kafka.\r\n\r\n> https://observ.app/share/issue/4e4f208a500d48cc898770930706959a/\r\n\r\nI am not sure where to look / poke / monitor to see this queue that is being spoken of and how I can flush it / enlarge it if needed.\r\n\r\n`sentry queues list` showed all 0's so it's not looking like there is a massive backlog of events.\r\n\r\nAny help is appreciated!\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport redis\nimport logging\n\nfrom threading import Thread\nfrom six.moves.queue import Queue, Full\n\n\nclass QueuedPublisherService(object):\n \"\"\"\n A publisher that queues items locally and publishes them to a\n remote pubsub service on a background thread.\n\n Maintains a lossy internal queue for posting, will discard the\n value if the queue is full or not immediately available. Will also\n drop items if the publish operation to the remote service fails.\n \"\"\"\n\n def __init__(self, publisher):\n self._started = False\n self.publisher = publisher\n\n def _start(self):\n if self._started:\n return True\n\n self.q = q = Queue(maxsize=100)\n\n def worker():\n while True:\n (channel, key, value) = q.get()\n try:\n self.publisher.publish(channel, key=key, value=value)\n except Exception as e:\n logger = logging.getLogger(\"sentry.errors\")\n logger.debug(\"could not submit event to pubsub: %s\" % e)\n finally:\n q.task_done()\n\n t = Thread(target=worker)\n t.setDaemon(True)\n t.start()\n\n self._started = True\n return True\n\n def publish(self, channel, value, key=None):\n if not self._start():\n return\n\n try:\n self.q.put((channel, key, value), block=False)\n except Full:\n return\n\n\nclass RedisPublisher(object):\n def __init__(self, connection):\n self.rds = None if connection is None else redis.StrictRedis(**connection)\n\n def publish(self, channel, value, key=None):\n if self.rds is not None:\n self.rds.publish(channel, value)\n\n\nclass KafkaPublisher(object):\n def __init__(self, connection, asynchronous=True):\n from confluent_kafka import Producer\n\n self.producer = Producer(connection or {})\n self.asynchronous = asynchronous\n\n def publish(self, channel, value, key=None):\n self.producer.produce(topic=channel, value=value, key=key)\n if not self.asynchronous:\n self.producer.flush()\n", "path": "src/sentry/utils/pubsub.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport redis\nimport logging\n\nfrom threading import Thread\nfrom six.moves.queue import Queue, Full\n\n\nclass QueuedPublisherService(object):\n \"\"\"\n A publisher that queues items locally and publishes them to a\n remote pubsub service on a background thread.\n\n Maintains a lossy internal queue for posting, will discard the\n value if the queue is full or not immediately available. Will also\n drop items if the publish operation to the remote service fails.\n \"\"\"\n\n def __init__(self, publisher):\n self._started = False\n self.publisher = publisher\n\n def _start(self):\n if self._started:\n return True\n\n self.q = q = Queue(maxsize=100)\n\n def worker():\n while True:\n (channel, key, value) = q.get()\n try:\n self.publisher.publish(channel, key=key, value=value)\n except Exception as e:\n logger = logging.getLogger(\"sentry.errors\")\n logger.debug(\"could not submit event to pubsub: %s\" % e)\n finally:\n q.task_done()\n\n t = Thread(target=worker)\n t.setDaemon(True)\n t.start()\n\n self._started = True\n return True\n\n def publish(self, channel, value, key=None):\n if not self._start():\n return\n\n try:\n self.q.put((channel, key, value), block=False)\n except Full:\n return\n\n\nclass RedisPublisher(object):\n def __init__(self, connection):\n self.rds = None if connection is None else redis.StrictRedis(**connection)\n\n def publish(self, channel, value, key=None):\n if self.rds is not None:\n self.rds.publish(channel, value)\n\n\nclass KafkaPublisher(object):\n def __init__(self, connection, asynchronous=True):\n from confluent_kafka import Producer\n\n self.producer = Producer(connection or {})\n self.asynchronous = asynchronous\n\n def publish(self, channel, value, key=None):\n self.producer.produce(topic=channel, value=value, key=key)\n if self.asynchronous:\n self.producer.poll(0)\n else:\n self.producer.flush()\n", "path": "src/sentry/utils/pubsub.py"}]} | 1,051 | 115 |
gh_patches_debug_28661 | rasdani/github-patches | git_diff | Kinto__kinto-696 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Inconsistency with 404 response on empty collections
- Set `read_only` to true
- Give readonly access to the postgresql user
- Give `read` permission to everyone on a bucket `foo`
- Going to `/buckets/foo/collections/unknown` gives 404
- Going to `/buckets/foo/collections/unknown/records` gives 503
Listing the records of an unknown collection should definitely give 404, except with the `default` bucket plugin.
Very related to https://github.com/Kinto/kinto/issues/558
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/views/records.py`
Content:
```
1 import copy
2
3 import jsonschema
4 from kinto.core import resource
5 from kinto.core.errors import raise_invalid
6 from jsonschema import exceptions as jsonschema_exceptions
7 from pyramid.security import Authenticated
8 from pyramid.settings import asbool
9
10 from kinto.views import RelaxedUUID, object_exists_or_404
11
12
13 class RecordSchema(resource.ResourceSchema):
14 class Options:
15 preserve_unknown = True
16
17
18 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
19
20
21 @resource.register(name='record',
22 collection_path=_parent_path + '/records',
23 record_path=_parent_path + '/records/{{id}}')
24 class Record(resource.ShareableResource):
25
26 mapping = RecordSchema()
27 schema_field = 'schema'
28
29 def __init__(self, *args, **kwargs):
30 super(Record, self).__init__(*args, **kwargs)
31
32 self.model.id_generator = RelaxedUUID()
33
34 # Check if already fetched before (in batch).
35 collections = self.request.bound_data.setdefault('collections', {})
36 collection_uri = self.get_parent_id(self.request)
37 if collection_uri not in collections:
38 # Unknown yet, fetch from storage.
39 collection_parent_id = '/buckets/%s' % self.bucket_id
40 collection = object_exists_or_404(self.request,
41 collection_id='collection',
42 parent_id=collection_parent_id,
43 object_id=self.collection_id)
44 collections[collection_uri] = collection
45
46 self._collection = collections[collection_uri]
47
48 def get_parent_id(self, request):
49 self.bucket_id = request.matchdict['bucket_id']
50 self.collection_id = request.matchdict['collection_id']
51 return '/buckets/%s/collections/%s' % (self.bucket_id,
52 self.collection_id)
53
54 def is_known_field(self, field_name):
55 """Without schema, any field is considered as known."""
56 return True
57
58 def process_record(self, new, old=None):
59 """Validate records against collection schema, if any."""
60 new = super(Record, self).process_record(new, old)
61
62 schema = self._collection.get('schema')
63 settings = self.request.registry.settings
64 schema_validation = 'experimental_collection_schema_validation'
65 if not schema or not asbool(settings.get(schema_validation)):
66 return new
67
68 collection_timestamp = self._collection[self.model.modified_field]
69
70 try:
71 stripped = copy.deepcopy(new)
72 stripped.pop(self.model.id_field, None)
73 stripped.pop(self.model.modified_field, None)
74 stripped.pop(self.model.permissions_field, None)
75 stripped.pop(self.schema_field, None)
76 jsonschema.validate(stripped, schema)
77 except jsonschema_exceptions.ValidationError as e:
78 try:
79 field = e.path.pop() if e.path else e.validator_value.pop()
80 except AttributeError:
81 field = None
82 raise_invalid(self.request, name=field, description=e.message)
83
84 new[self.schema_field] = collection_timestamp
85 return new
86
87 def collection_get(self):
88 result = super(Record, self).collection_get()
89 self._handle_cache_expires(self.request.response)
90 return result
91
92 def get(self):
93 result = super(Record, self).get()
94 self._handle_cache_expires(self.request.response)
95 return result
96
97 def _handle_cache_expires(self, response):
98 """If the parent collection defines a ``cache_expires`` attribute,
99 then cache-control response headers are sent.
100
101 .. note::
102
103 Those headers are also sent if the
104 ``kinto.record_cache_expires_seconds`` setting is defined.
105 """
106 is_anonymous = Authenticated not in self.request.effective_principals
107 if not is_anonymous:
108 return
109
110 cache_expires = self._collection.get('cache_expires')
111 if cache_expires is None:
112 by_bucket = '%s_record_cache_expires_seconds' % (self.bucket_id)
113 by_collection = '%s_%s_record_cache_expires_seconds' % (
114 self.bucket_id, self.collection_id)
115 settings = self.request.registry.settings
116 cache_expires = settings.get(by_collection,
117 settings.get(by_bucket))
118
119 if cache_expires is not None:
120 response.cache_expires(seconds=int(cache_expires))
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -26,23 +26,22 @@
mapping = RecordSchema()
schema_field = 'schema'
- def __init__(self, *args, **kwargs):
- super(Record, self).__init__(*args, **kwargs)
-
- self.model.id_generator = RelaxedUUID()
-
+ def __init__(self, request, **kwargs):
+ # Before all, first check that the parent collection exists.
# Check if already fetched before (in batch).
- collections = self.request.bound_data.setdefault('collections', {})
- collection_uri = self.get_parent_id(self.request)
+ collections = request.bound_data.setdefault('collections', {})
+ collection_uri = self.get_parent_id(request)
if collection_uri not in collections:
# Unknown yet, fetch from storage.
collection_parent_id = '/buckets/%s' % self.bucket_id
- collection = object_exists_or_404(self.request,
+ collection = object_exists_or_404(request,
collection_id='collection',
parent_id=collection_parent_id,
object_id=self.collection_id)
collections[collection_uri] = collection
+ super(Record, self).__init__(request, **kwargs)
+ self.model.id_generator = RelaxedUUID()
self._collection = collections[collection_uri]
def get_parent_id(self, request):
| {"golden_diff": "diff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -26,23 +26,22 @@\n mapping = RecordSchema()\n schema_field = 'schema'\n \n- def __init__(self, *args, **kwargs):\n- super(Record, self).__init__(*args, **kwargs)\n-\n- self.model.id_generator = RelaxedUUID()\n-\n+ def __init__(self, request, **kwargs):\n+ # Before all, first check that the parent collection exists.\n # Check if already fetched before (in batch).\n- collections = self.request.bound_data.setdefault('collections', {})\n- collection_uri = self.get_parent_id(self.request)\n+ collections = request.bound_data.setdefault('collections', {})\n+ collection_uri = self.get_parent_id(request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n- collection = object_exists_or_404(self.request,\n+ collection = object_exists_or_404(request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n \n+ super(Record, self).__init__(request, **kwargs)\n+ self.model.id_generator = RelaxedUUID()\n self._collection = collections[collection_uri]\n \n def get_parent_id(self, request):\n", "issue": "Inconsistency with 404 response on empty collections\n- Set `read_only` to true\n- Give readonly access to the postgresql user\n- Give `read` permission to everyone on a bucket `foo`\n- Going to `/buckets/foo/collections/unknown` gives 404\n- Going to `/buckets/foo/collections/unknown/records` gives 503\n\nListing the records of an unknown collection should definitely give 404, except with the `default` bucket plugin.\n\nVery related to https://github.com/Kinto/kinto/issues/558\n\n", "before_files": [{"content": "import copy\n\nimport jsonschema\nfrom kinto.core import resource\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import RelaxedUUID, object_exists_or_404\n\n\nclass RecordSchema(resource.ResourceSchema):\n class Options:\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n self.model.id_generator = RelaxedUUID()\n\n # Check if already fetched before (in batch).\n collections = self.request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(self.request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (self.bucket_id,\n self.collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super(Record, self).process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n try:\n field = e.path.pop() if e.path else e.validator_value.pop()\n except AttributeError:\n field = None\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super(Record, self).collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super(Record, self).get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '%s_record_cache_expires_seconds' % (self.bucket_id)\n by_collection = '%s_%s_record_cache_expires_seconds' % (\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}], "after_files": [{"content": "import copy\n\nimport jsonschema\nfrom kinto.core import resource\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import RelaxedUUID, object_exists_or_404\n\n\nclass RecordSchema(resource.ResourceSchema):\n class Options:\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, request, **kwargs):\n # Before all, first check that the parent collection exists.\n # Check if already fetched before (in batch).\n collections = request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n collection = object_exists_or_404(request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n super(Record, self).__init__(request, **kwargs)\n self.model.id_generator = RelaxedUUID()\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (self.bucket_id,\n self.collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super(Record, self).process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n try:\n field = e.path.pop() if e.path else e.validator_value.pop()\n except AttributeError:\n field = None\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super(Record, self).collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super(Record, self).get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '%s_record_cache_expires_seconds' % (self.bucket_id)\n by_collection = '%s_%s_record_cache_expires_seconds' % (\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}]} | 1,534 | 329 |
gh_patches_debug_50453 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-3837 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Maybe a bug about module checking
### Bug description
<!-- Use this section to clearly and concisely describe the bug. -->
If I use conda to install only jupyterhub and python (conda install -c conda-forge python=3.9 jupyterhub), the following message showed as someone try to login:
```
Failed to set groups [Errno 1] Operation not permitted
Traceback (most recent call last):
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser", line 7, in <module>
from jupyterhub.singleuser import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py", line 5, in <module>
from .app import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py", line 38, in <module>
raise _import_error
TypeError: exceptions must derive from BaseException
```
I think the problem is the lines from 32 to 36 in jupyterhub/singleuser/app.py
```
except ImportError as e:
continue
if _import_error is None:
_import_error = e
else:
break
```
I changed that with:
```
except ImportError as e:
if _import_error is None:
_import_error = e
else:
break
continue
```
then the better message showed:
```
Failed to set groups [Errno 1] Operation not permitted
Traceback (most recent call last):
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser", line 7, in <module>
from jupyterhub.singleuser import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py", line 5, in <module>
from .app import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py", line 38, in <module>
raise _import_error
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py", line 30, in <module>
App = import_item(JUPYTERHUB_SINGLEUSER_APP)
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/traitlets/utils/importstring.py", line 30, in import_item
module = __import__(package, fromlist=[obj])
ModuleNotFoundError: No module named 'jupyter_server'
```
The above message let me know that I have to install jupyter_server.
This issue can be closed anytime.
Any suggestion is welcome.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/singleuser/app.py`
Content:
```
1 """Make a single-user app based on the environment:
2
3 - $JUPYTERHUB_SINGLEUSER_APP, the base Application class, to be wrapped in JupyterHub authentication.
4 default: jupyter_server.serverapp.ServerApp
5
6 .. versionchanged:: 2.0
7
8 Default app changed to launch `jupyter labhub`.
9 Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.
10 """
11 import os
12
13 from traitlets import import_item
14
15 from .mixins import make_singleuser_app
16
17 JUPYTERHUB_SINGLEUSER_APP = os.environ.get("JUPYTERHUB_SINGLEUSER_APP")
18
19
20 if JUPYTERHUB_SINGLEUSER_APP:
21 App = import_item(JUPYTERHUB_SINGLEUSER_APP)
22 else:
23 App = None
24 _import_error = None
25 for JUPYTERHUB_SINGLEUSER_APP in (
26 "jupyter_server.serverapp.ServerApp",
27 "notebook.notebookapp.NotebookApp",
28 ):
29 try:
30 App = import_item(JUPYTERHUB_SINGLEUSER_APP)
31 except ImportError as e:
32 continue
33 if _import_error is None:
34 _import_error = e
35 else:
36 break
37 if App is None:
38 raise _import_error
39
40
41 SingleUserNotebookApp = make_singleuser_app(App)
42
43
44 def main():
45 """Launch a jupyterhub single-user server"""
46 if not os.environ.get("JUPYTERHUB_SINGLEUSER_APP"):
47 # app not specified, launch jupyter-labhub by default,
48 # if jupyterlab is recent enough (3.1).
49 # This is a minimally extended ServerApp that does:
50 # 1. ensure lab extension is enabled, and
51 # 2. set default URL to `/lab`
52 import re
53
54 _version_pat = re.compile(r"(\d+)\.(\d+)")
55 try:
56 import jupyterlab
57 from jupyterlab.labhubapp import SingleUserLabApp
58
59 m = _version_pat.match(jupyterlab.__version__)
60 except Exception:
61 m = None
62
63 if m is not None:
64 version_tuple = tuple(int(v) for v in m.groups())
65 if version_tuple >= (3, 1):
66 return SingleUserLabApp.launch_instance()
67
68 return SingleUserNotebookApp.launch_instance()
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jupyterhub/singleuser/app.py b/jupyterhub/singleuser/app.py
--- a/jupyterhub/singleuser/app.py
+++ b/jupyterhub/singleuser/app.py
@@ -29,9 +29,9 @@
try:
App = import_item(JUPYTERHUB_SINGLEUSER_APP)
except ImportError as e:
- continue
if _import_error is None:
_import_error = e
+ continue
else:
break
if App is None:
| {"golden_diff": "diff --git a/jupyterhub/singleuser/app.py b/jupyterhub/singleuser/app.py\n--- a/jupyterhub/singleuser/app.py\n+++ b/jupyterhub/singleuser/app.py\n@@ -29,9 +29,9 @@\n try:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\n except ImportError as e:\n- continue\n if _import_error is None:\n _import_error = e\n+ continue\n else:\n break\n if App is None:\n", "issue": "Maybe a bug about module checking\n### Bug description\r\n<!-- Use this section to clearly and concisely describe the bug. -->\r\nIf I use conda to install only jupyterhub and python (conda install -c conda-forge python=3.9 jupyterhub), the following message showed as someone try to login:\r\n\r\n```\r\nFailed to set groups [Errno 1] Operation not permitted\r\nTraceback (most recent call last):\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser\", line 7, in <module>\r\n from jupyterhub.singleuser import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py\", line 5, in <module>\r\n from .app import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py\", line 38, in <module>\r\n raise _import_error\r\nTypeError: exceptions must derive from BaseException\r\n```\r\nI think the problem is the lines from 32 to 36 in jupyterhub/singleuser/app.py\r\n```\r\n except ImportError as e:\r\n continue\r\n if _import_error is None:\r\n _import_error = e\r\n else:\r\n break\r\n```\r\n\r\nI changed that with:\r\n```\r\n except ImportError as e:\r\n if _import_error is None:\r\n _import_error = e\r\n else:\r\n break\r\n continue\r\n```\r\nthen the better message showed:\r\n```\r\nFailed to set groups [Errno 1] Operation not permitted\r\nTraceback (most recent call last):\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser\", line 7, in <module>\r\n from jupyterhub.singleuser import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py\", line 5, in <module>\r\n from .app import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py\", line 38, in <module>\r\n raise _import_error\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py\", line 30, in <module>\r\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/traitlets/utils/importstring.py\", line 30, in import_item\r\n module = __import__(package, fromlist=[obj])\r\nModuleNotFoundError: No module named 'jupyter_server'\r\n```\r\nThe above message let me know that I have to install jupyter_server.\r\nThis issue can be closed anytime.\r\nAny suggestion is welcome.\r\n\n", "before_files": [{"content": "\"\"\"Make a single-user app based on the environment:\n\n- $JUPYTERHUB_SINGLEUSER_APP, the base Application class, to be wrapped in JupyterHub authentication.\n default: jupyter_server.serverapp.ServerApp\n\n.. versionchanged:: 2.0\n\n Default app changed to launch `jupyter labhub`.\n Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.\n\"\"\"\nimport os\n\nfrom traitlets import import_item\n\nfrom .mixins import make_singleuser_app\n\nJUPYTERHUB_SINGLEUSER_APP = os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\")\n\n\nif JUPYTERHUB_SINGLEUSER_APP:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\nelse:\n App = None\n _import_error = None\n for JUPYTERHUB_SINGLEUSER_APP in (\n \"jupyter_server.serverapp.ServerApp\",\n \"notebook.notebookapp.NotebookApp\",\n ):\n try:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\n except ImportError as e:\n continue\n if _import_error is None:\n _import_error = e\n else:\n break\n if App is None:\n raise _import_error\n\n\nSingleUserNotebookApp = make_singleuser_app(App)\n\n\ndef main():\n \"\"\"Launch a jupyterhub single-user server\"\"\"\n if not os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\"):\n # app not specified, launch jupyter-labhub by default,\n # if jupyterlab is recent enough (3.1).\n # This is a minimally extended ServerApp that does:\n # 1. ensure lab extension is enabled, and\n # 2. set default URL to `/lab`\n import re\n\n _version_pat = re.compile(r\"(\\d+)\\.(\\d+)\")\n try:\n import jupyterlab\n from jupyterlab.labhubapp import SingleUserLabApp\n\n m = _version_pat.match(jupyterlab.__version__)\n except Exception:\n m = None\n\n if m is not None:\n version_tuple = tuple(int(v) for v in m.groups())\n if version_tuple >= (3, 1):\n return SingleUserLabApp.launch_instance()\n\n return SingleUserNotebookApp.launch_instance()\n", "path": "jupyterhub/singleuser/app.py"}], "after_files": [{"content": "\"\"\"Make a single-user app based on the environment:\n\n- $JUPYTERHUB_SINGLEUSER_APP, the base Application class, to be wrapped in JupyterHub authentication.\n default: jupyter_server.serverapp.ServerApp\n\n.. versionchanged:: 2.0\n\n Default app changed to launch `jupyter labhub`.\n Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.\n\"\"\"\nimport os\n\nfrom traitlets import import_item\n\nfrom .mixins import make_singleuser_app\n\nJUPYTERHUB_SINGLEUSER_APP = os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\")\n\n\nif JUPYTERHUB_SINGLEUSER_APP:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\nelse:\n App = None\n _import_error = None\n for JUPYTERHUB_SINGLEUSER_APP in (\n \"jupyter_server.serverapp.ServerApp\",\n \"notebook.notebookapp.NotebookApp\",\n ):\n try:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\n except ImportError as e:\n if _import_error is None:\n _import_error = e\n continue\n else:\n break\n if App is None:\n raise _import_error\n\n\nSingleUserNotebookApp = make_singleuser_app(App)\n\n\ndef main():\n \"\"\"Launch a jupyterhub single-user server\"\"\"\n if not os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\"):\n # app not specified, launch jupyter-labhub by default,\n # if jupyterlab is recent enough (3.1).\n # This is a minimally extended ServerApp that does:\n # 1. ensure lab extension is enabled, and\n # 2. set default URL to `/lab`\n import re\n\n _version_pat = re.compile(r\"(\\d+)\\.(\\d+)\")\n try:\n import jupyterlab\n from jupyterlab.labhubapp import SingleUserLabApp\n\n m = _version_pat.match(jupyterlab.__version__)\n except Exception:\n m = None\n\n if m is not None:\n version_tuple = tuple(int(v) for v in m.groups())\n if version_tuple >= (3, 1):\n return SingleUserLabApp.launch_instance()\n\n return SingleUserNotebookApp.launch_instance()\n", "path": "jupyterhub/singleuser/app.py"}]} | 1,619 | 111 |
gh_patches_debug_39273 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3126 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider jimmy-johns is broken
During the global build at 2021-09-29-14-42-48, spider **jimmy-johns** failed with **0 features** and **1544 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/logs/jimmy-johns.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/jimmy_johns.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4
5 from locations.items import GeojsonPointItem
6
7 STATES = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
8 "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
9 "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
10 "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
11 "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
12 HEADERS = { 'Content-Type': 'application/json' }
13 JJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'
14 CITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')
15 STORES = JJBASE.format('GetStoreAddressesByCityAndState')
16
17 class JimmyJohnsSpider(scrapy.Spider):
18 name = "jimmy-johns"
19 item_attributes = { 'brand': "Jimmy John's", 'brand_wikidata': "Q1689380" }
20 allowed_domains = ["www.jimmyjohns.com"]
21 download_delay = 0.2
22
23 def start_requests(self):
24 for state in STATES:
25 current_state = json.dumps({ 'state': state })
26 request = scrapy.Request(
27 CITIES,
28 method='POST',
29 body=current_state,
30 headers=HEADERS,
31 callback=self.parse_cities
32 )
33 request.meta['state'] = state
34 yield request
35
36 def parse_cities(self, response):
37 cities = json.loads(response.body)
38 for city in cities['d']:
39 current_city = json.dumps({ 'state': response.meta['state'], 'city': city })
40 request = scrapy.Request(
41 STORES,
42 method='POST',
43 body=current_city,
44 headers=HEADERS,
45 callback=self.parse
46 )
47 yield request
48
49 def parse(self, response):
50 stores = json.loads(response.body)
51 for store in stores['d']:
52 full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])
53 yield GeojsonPointItem(
54 name=store['storename'],
55 addr_full=full,
56 opening_hours=store['hours'],
57 phone=store['telephone'],
58 ref=store['storeid'],
59 lon=float(store['lng']),
60 lat=float(store['lat']),
61 )
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/jimmy_johns.py b/locations/spiders/jimmy_johns.py
--- a/locations/spiders/jimmy_johns.py
+++ b/locations/spiders/jimmy_johns.py
@@ -1,61 +1,36 @@
# -*- coding: utf-8 -*-
+from os import stat
import scrapy
+from urllib import parse
import json
-
from locations.items import GeojsonPointItem
-STATES = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
- "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
- "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
- "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
- "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
-HEADERS = { 'Content-Type': 'application/json' }
-JJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'
-CITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')
-STORES = JJBASE.format('GetStoreAddressesByCityAndState')
+class TemplateSpider(scrapy.Spider):
+ name = "jimmy_johns"
+ allowed_domains = ["locations.jimmyjohns.com"]
+ start_urls = (
+ 'https://locations.jimmyjohns.com/sitemap.xml',
+ )
-class JimmyJohnsSpider(scrapy.Spider):
- name = "jimmy-johns"
- item_attributes = { 'brand': "Jimmy John's", 'brand_wikidata': "Q1689380" }
- allowed_domains = ["www.jimmyjohns.com"]
- download_delay = 0.2
+ def parse(self, response):
+ stores = response.xpath('//url/loc[contains(text(),"sandwiches")]/text()').extract()
+ for store in stores:
+ yield scrapy.Request(response.urljoin(store), callback=self.parse_store)
- def start_requests(self):
- for state in STATES:
- current_state = json.dumps({ 'state': state })
- request = scrapy.Request(
- CITIES,
- method='POST',
- body=current_state,
- headers=HEADERS,
- callback=self.parse_cities
- )
- request.meta['state'] = state
- yield request
+ def parse_store(self, response):
+ data = json.loads(response.xpath('//script[@type="application/ld+json"]//text()').extract_first())
- def parse_cities(self, response):
- cities = json.loads(response.body)
- for city in cities['d']:
- current_city = json.dumps({ 'state': response.meta['state'], 'city': city })
- request = scrapy.Request(
- STORES,
- method='POST',
- body=current_city,
- headers=HEADERS,
- callback=self.parse
- )
- yield request
+ properties = {
+ 'ref': data[0]['url'],
+ 'addr_full': data[0]['address']['streetAddress'],
+ 'city': data[0]['address']['addressLocality'],
+ 'state': data[0]['address']['addressRegion'],
+ 'postcode': data[0]['address']['postalCode'],
+ 'website': response.url,
+ 'lat': data[0]['geo']['latitude'],
+ 'lon': data[0]['geo']['longitude'],
+ }
+ if data[0]['address']['telephone']:
+ properties['phone'] = data[0]['address']['telephone']
- def parse(self, response):
- stores = json.loads(response.body)
- for store in stores['d']:
- full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])
- yield GeojsonPointItem(
- name=store['storename'],
- addr_full=full,
- opening_hours=store['hours'],
- phone=store['telephone'],
- ref=store['storeid'],
- lon=float(store['lng']),
- lat=float(store['lat']),
- )
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/jimmy_johns.py b/locations/spiders/jimmy_johns.py\n--- a/locations/spiders/jimmy_johns.py\n+++ b/locations/spiders/jimmy_johns.py\n@@ -1,61 +1,36 @@\n # -*- coding: utf-8 -*-\n+from os import stat\n import scrapy\n+from urllib import parse\n import json\n-\n from locations.items import GeojsonPointItem\n \n-STATES = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\",\n- \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\",\n- \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n- \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\",\n- \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\n-HEADERS = { 'Content-Type': 'application/json' }\n-JJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'\n-CITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')\n-STORES = JJBASE.format('GetStoreAddressesByCityAndState')\n+class TemplateSpider(scrapy.Spider):\n+ name = \"jimmy_johns\"\n+ allowed_domains = [\"locations.jimmyjohns.com\"]\n+ start_urls = (\n+ 'https://locations.jimmyjohns.com/sitemap.xml',\n+ )\n \n-class JimmyJohnsSpider(scrapy.Spider):\n- name = \"jimmy-johns\"\n- item_attributes = { 'brand': \"Jimmy John's\", 'brand_wikidata': \"Q1689380\" }\n- allowed_domains = [\"www.jimmyjohns.com\"]\n- download_delay = 0.2\n+ def parse(self, response):\n+ stores = response.xpath('//url/loc[contains(text(),\"sandwiches\")]/text()').extract()\n+ for store in stores:\n+ yield scrapy.Request(response.urljoin(store), callback=self.parse_store)\n \n- def start_requests(self):\n- for state in STATES:\n- current_state = json.dumps({ 'state': state })\n- request = scrapy.Request(\n- CITIES,\n- method='POST',\n- body=current_state,\n- headers=HEADERS,\n- callback=self.parse_cities\n- )\n- request.meta['state'] = state\n- yield request\n+ def parse_store(self, response):\n+ data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]//text()').extract_first())\n \n- def parse_cities(self, response):\n- cities = json.loads(response.body)\n- for city in cities['d']:\n- current_city = json.dumps({ 'state': response.meta['state'], 'city': city })\n- request = scrapy.Request(\n- STORES,\n- method='POST',\n- body=current_city,\n- headers=HEADERS,\n- callback=self.parse\n- )\n- yield request\n+ properties = {\n+ 'ref': data[0]['url'],\n+ 'addr_full': data[0]['address']['streetAddress'],\n+ 'city': data[0]['address']['addressLocality'],\n+ 'state': data[0]['address']['addressRegion'],\n+ 'postcode': data[0]['address']['postalCode'],\n+ 'website': response.url,\n+ 'lat': data[0]['geo']['latitude'],\n+ 'lon': data[0]['geo']['longitude'],\n+ }\n+ if data[0]['address']['telephone']:\n+ properties['phone'] = data[0]['address']['telephone']\n \n- def parse(self, response):\n- stores = json.loads(response.body)\n- for store in stores['d']:\n- full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])\n- yield GeojsonPointItem(\n- name=store['storename'],\n- addr_full=full,\n- opening_hours=store['hours'],\n- phone=store['telephone'],\n- ref=store['storeid'],\n- lon=float(store['lng']),\n- lat=float(store['lat']),\n- )\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider jimmy-johns is broken\nDuring the global build at 2021-09-29-14-42-48, spider **jimmy-johns** failed with **0 features** and **1544 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/logs/jimmy-johns.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\n\nSTATES = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\",\n \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\",\n \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\",\n \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\nHEADERS = { 'Content-Type': 'application/json' }\nJJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'\nCITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')\nSTORES = JJBASE.format('GetStoreAddressesByCityAndState')\n\nclass JimmyJohnsSpider(scrapy.Spider):\n name = \"jimmy-johns\"\n item_attributes = { 'brand': \"Jimmy John's\", 'brand_wikidata': \"Q1689380\" }\n allowed_domains = [\"www.jimmyjohns.com\"]\n download_delay = 0.2\n\n def start_requests(self):\n for state in STATES:\n current_state = json.dumps({ 'state': state })\n request = scrapy.Request(\n CITIES,\n method='POST',\n body=current_state,\n headers=HEADERS,\n callback=self.parse_cities\n )\n request.meta['state'] = state\n yield request\n\n def parse_cities(self, response):\n cities = json.loads(response.body)\n for city in cities['d']:\n current_city = json.dumps({ 'state': response.meta['state'], 'city': city })\n request = scrapy.Request(\n STORES,\n method='POST',\n body=current_city,\n headers=HEADERS,\n callback=self.parse\n )\n yield request\n\n def parse(self, response):\n stores = json.loads(response.body)\n for store in stores['d']:\n full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])\n yield GeojsonPointItem(\n name=store['storename'],\n addr_full=full,\n opening_hours=store['hours'],\n phone=store['telephone'],\n ref=store['storeid'],\n lon=float(store['lng']),\n lat=float(store['lat']),\n )\n", "path": "locations/spiders/jimmy_johns.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom os import stat\nimport scrapy\nfrom urllib import parse\nimport json\nfrom locations.items import GeojsonPointItem\n\nclass TemplateSpider(scrapy.Spider):\n name = \"jimmy_johns\"\n allowed_domains = [\"locations.jimmyjohns.com\"]\n start_urls = (\n 'https://locations.jimmyjohns.com/sitemap.xml',\n )\n\n def parse(self, response):\n stores = response.xpath('//url/loc[contains(text(),\"sandwiches\")]/text()').extract()\n for store in stores:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_store)\n\n def parse_store(self, response):\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]//text()').extract_first())\n\n properties = {\n 'ref': data[0]['url'],\n 'addr_full': data[0]['address']['streetAddress'],\n 'city': data[0]['address']['addressLocality'],\n 'state': data[0]['address']['addressRegion'],\n 'postcode': data[0]['address']['postalCode'],\n 'website': response.url,\n 'lat': data[0]['geo']['latitude'],\n 'lon': data[0]['geo']['longitude'],\n }\n if data[0]['address']['telephone']:\n properties['phone'] = data[0]['address']['telephone']\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/jimmy_johns.py"}]} | 1,177 | 1,020 |
gh_patches_debug_4071 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-173 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug during saving probabilities
**Describe the bug**
Small bug is occurring during saving probabilities in classification tasks. This is due to file existence check. It should check if the file exists instead of checking if the directory exists.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `GANDLF/inference_manager.py`
Content:
```
1 from GANDLF.inference_loop import inference_loop
2 import os
3 import numpy as np
4 import torch
5 import torch.nn.functional as F
6
7
8 def InferenceManager(dataframe, outputDir, parameters, device):
9 """
10 This function takes in a dataframe, with some other parameters and performs the inference
11 """
12 # get the indeces for kfold splitting
13 inferenceData_full = dataframe
14
15 # # initialize parameters for inference
16 if not ("weights" in parameters):
17 parameters["weights"] = None # no need for loss weights for inference
18 if not ("class_weights" in parameters):
19 parameters["class_weights"] = None # no need for class weights for inference
20
21 n_folds = parameters["nested_training"]["validation"]
22
23 fold_dirs = []
24 if n_folds > 1:
25 directories = sorted(os.listdir(outputDir))
26 for d in directories:
27 if d.isdigit():
28 fold_dirs.append(os.path.join(outputDir, d, ""))
29 else:
30 fold_dirs = [outputDir]
31
32 probs_list = []
33
34 is_classification = parameters["problem_type"] == "classification"
35
36 for fold_dir in fold_dirs:
37 parameters["current_fold_dir"] = fold_dir
38 inference_loop(
39 inferenceDataFromPickle=inferenceData_full,
40 outputDir=fold_dir,
41 device=device,
42 parameters=parameters,
43 )
44
45 logits_dir = os.path.join(fold_dir, "logits.csv")
46 is_logits_dir_exist = os.path.isdir(logits_dir)
47
48 if is_classification and is_logits_dir_exist:
49 fold_logits = np.genfromtxt(logits_dir, delimiter=",")
50 fold_logits = torch.from_numpy(fold_logits)
51 fold_probs = F.softmax(fold_logits, dim=1)
52 probs_list.append(fold_probs)
53
54 if probs_list and is_classification:
55 probs_list = torch.stack(probs_list)
56 averaged_probs = torch.mean(probs_list, 0).numpy()
57 np.savetxt(
58 os.path.join(outputDir, "averaged_probabilities.csv"),
59 averaged_probs,
60 delimiter=",",
61 )
62
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/GANDLF/inference_manager.py b/GANDLF/inference_manager.py
--- a/GANDLF/inference_manager.py
+++ b/GANDLF/inference_manager.py
@@ -43,7 +43,7 @@
)
logits_dir = os.path.join(fold_dir, "logits.csv")
- is_logits_dir_exist = os.path.isdir(logits_dir)
+ is_logits_dir_exist = os.path.isfile(logits_dir)
if is_classification and is_logits_dir_exist:
fold_logits = np.genfromtxt(logits_dir, delimiter=",")
| {"golden_diff": "diff --git a/GANDLF/inference_manager.py b/GANDLF/inference_manager.py\n--- a/GANDLF/inference_manager.py\n+++ b/GANDLF/inference_manager.py\n@@ -43,7 +43,7 @@\n )\n \n logits_dir = os.path.join(fold_dir, \"logits.csv\")\n- is_logits_dir_exist = os.path.isdir(logits_dir)\n+ is_logits_dir_exist = os.path.isfile(logits_dir)\n \n if is_classification and is_logits_dir_exist:\n fold_logits = np.genfromtxt(logits_dir, delimiter=\",\")\n", "issue": "Bug during saving probabilities\n**Describe the bug**\r\nSmall bug is occurring during saving probabilities in classification tasks. This is due to file existence check. It should check if the file exists instead of checking if the directory exists.\r\n\n", "before_files": [{"content": "from GANDLF.inference_loop import inference_loop\nimport os\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\ndef InferenceManager(dataframe, outputDir, parameters, device):\n \"\"\"\n This function takes in a dataframe, with some other parameters and performs the inference\n \"\"\"\n # get the indeces for kfold splitting\n inferenceData_full = dataframe\n\n # # initialize parameters for inference\n if not (\"weights\" in parameters):\n parameters[\"weights\"] = None # no need for loss weights for inference\n if not (\"class_weights\" in parameters):\n parameters[\"class_weights\"] = None # no need for class weights for inference\n\n n_folds = parameters[\"nested_training\"][\"validation\"]\n\n fold_dirs = []\n if n_folds > 1:\n directories = sorted(os.listdir(outputDir))\n for d in directories:\n if d.isdigit():\n fold_dirs.append(os.path.join(outputDir, d, \"\"))\n else:\n fold_dirs = [outputDir]\n\n probs_list = []\n\n is_classification = parameters[\"problem_type\"] == \"classification\"\n\n for fold_dir in fold_dirs:\n parameters[\"current_fold_dir\"] = fold_dir\n inference_loop(\n inferenceDataFromPickle=inferenceData_full,\n outputDir=fold_dir,\n device=device,\n parameters=parameters,\n )\n\n logits_dir = os.path.join(fold_dir, \"logits.csv\")\n is_logits_dir_exist = os.path.isdir(logits_dir)\n\n if is_classification and is_logits_dir_exist:\n fold_logits = np.genfromtxt(logits_dir, delimiter=\",\")\n fold_logits = torch.from_numpy(fold_logits)\n fold_probs = F.softmax(fold_logits, dim=1)\n probs_list.append(fold_probs)\n\n if probs_list and is_classification:\n probs_list = torch.stack(probs_list)\n averaged_probs = torch.mean(probs_list, 0).numpy()\n np.savetxt(\n os.path.join(outputDir, \"averaged_probabilities.csv\"),\n averaged_probs,\n delimiter=\",\",\n )\n\n", "path": "GANDLF/inference_manager.py"}], "after_files": [{"content": "from GANDLF.inference_loop import inference_loop\nimport os\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\ndef InferenceManager(dataframe, outputDir, parameters, device):\n \"\"\"\n This function takes in a dataframe, with some other parameters and performs the inference\n \"\"\"\n # get the indeces for kfold splitting\n inferenceData_full = dataframe\n\n # # initialize parameters for inference\n if not (\"weights\" in parameters):\n parameters[\"weights\"] = None # no need for loss weights for inference\n if not (\"class_weights\" in parameters):\n parameters[\"class_weights\"] = None # no need for class weights for inference\n\n n_folds = parameters[\"nested_training\"][\"validation\"]\n\n fold_dirs = []\n if n_folds > 1:\n directories = sorted(os.listdir(outputDir))\n for d in directories:\n if d.isdigit():\n fold_dirs.append(os.path.join(outputDir, d, \"\"))\n else:\n fold_dirs = [outputDir]\n\n probs_list = []\n\n is_classification = parameters[\"problem_type\"] == \"classification\"\n\n for fold_dir in fold_dirs:\n parameters[\"current_fold_dir\"] = fold_dir\n inference_loop(\n inferenceDataFromPickle=inferenceData_full,\n outputDir=fold_dir,\n device=device,\n parameters=parameters,\n )\n\n logits_dir = os.path.join(fold_dir, \"logits.csv\")\n is_logits_dir_exist = os.path.isfile(logits_dir)\n\n if is_classification and is_logits_dir_exist:\n fold_logits = np.genfromtxt(logits_dir, delimiter=\",\")\n fold_logits = torch.from_numpy(fold_logits)\n fold_probs = F.softmax(fold_logits, dim=1)\n probs_list.append(fold_probs)\n\n if probs_list and is_classification:\n probs_list = torch.stack(probs_list)\n averaged_probs = torch.mean(probs_list, 0).numpy()\n np.savetxt(\n os.path.join(outputDir, \"averaged_probabilities.csv\"),\n averaged_probs,\n delimiter=\",\",\n )\n\n", "path": "GANDLF/inference_manager.py"}]} | 866 | 122 |
gh_patches_debug_3037 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-4360 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Windows: Cannot bundle with debug if pkg_resources is a dependency
This issue happens when I try to bundle my project, in the Analysis.assemble phase and only when I try to do it with debug enabled. PyInstaller tries to compile a module that is part of an executable (pyinstaller.exe in this case) which fails because it cannot read the module.
This is with Windows 10, Python 3.6.6 (official from python.org) and PyInstaller 3.5.dev0+51429f8fc (which should be the latest develop version as of today).
Here is the traceback:
```
Traceback (most recent call last):
File "c:\python36-32\Lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\python36-32\Lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\RMYROY~1\VIRTUA~1\CDDA-G~3\Scripts\pyinstaller.exe\__main__.py", line 9, in <module>
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\__main__.py", line 111, in run
run_build(pyi_config, spec_file, **vars(args))
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\__main__.py", line 63, in run_build
PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 846, in main
build(specfile, kw.get('distpath'), kw.get('workpath'), kw.get('clean_build'))
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 793, in build
exec(code, spec_namespace)
File "launcher.spec", line 17, in <module>
noarchive=True)
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 243, in __init__
self.__postinit__()
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\datastruct.py", line 158, in __postinit__
self.assemble()
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 599, in assemble
for name, path, typecode in compile_py_files(new_toc, CONF['workpath']):
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\utils\misc.py", line 150, in compile_py_files
with open(obj_fnm, 'rb') as fh:
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\RMYROY~1\\VIRTUA~1\\CDDA-G~3\\Scripts\\pyinstaller.exe\\__main__.pyo'
```
For some reason, the following entry is added in Analysis.pure
```python
('__main__.pyc', 'C:\\Users\\RMYROY~1\\VIRTUA~1\\CDDA-G~3\\Scripts\\pyinstaller.exe\\__main__.py', 'PYMODULE')
```
**That entry is incorrect in that it shouldn't have been added in pure or it shouldn't be compiled in assemble which is the source of this issue.**
Here is my spec file:
```python
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['cddagl\\launcher.py'],
pathex=['C:\\Program Files (x86)\\Windows Kits\\10\\Redist\\ucrt\\DLLs\\x86\\', 'C:\\Users\\Rémy Roy\\Projects\\CDDA-Game-Launcher'],
binaries=[],
datas=[('alembic', 'alembic'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('cddagl/VERSION', 'cddagl'), ('C:\\Users\\Rémy Roy\\VirtualEnvs\\CDDA-Game-Launcher\\Scripts\\UnRAR.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ja/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ja/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],
hiddenimports=['lxml.cssselect', 'babel.numbers'],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=True)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[('v', None, 'OPTION')],
exclude_binaries=True,
name='launcher',
debug=True,
bootloader_ignore_signals=False,
strip=False,
upx=False,
console=True , icon='cddagl\\resources\\launcher.ico')
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=False,
upx_exclude=[],
name='launcher')
```
You can probably reproduce this issue easily by cloning [my project](https://github.com/remyroy/CDDA-Game-Launcher) and issuing the following command:
```
python setup.py freeze --debug=1
```
Here is the full pyinstaller log output: https://gist.github.com/remyroy/37f7f0a912d5d714a947cddfb78769d4
I'll investigate how that entry is added in Analysis to give more context to this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-pkg_resources.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2019, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 from PyInstaller.utils.hooks import collect_submodules
10
11 # pkg_resources keeps vendored modules in its _vendor subpackage, and does
12 # sys.meta_path based import magic to expose them as pkg_resources.extern.*
13 hiddenimports = collect_submodules('pkg_resources._vendor')
14
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py
--- a/PyInstaller/hooks/hook-pkg_resources.py
+++ b/PyInstaller/hooks/hook-pkg_resources.py
@@ -11,3 +11,5 @@
# pkg_resources keeps vendored modules in its _vendor subpackage, and does
# sys.meta_path based import magic to expose them as pkg_resources.extern.*
hiddenimports = collect_submodules('pkg_resources._vendor')
+
+excludedimports = ['__main__']
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py\n--- a/PyInstaller/hooks/hook-pkg_resources.py\n+++ b/PyInstaller/hooks/hook-pkg_resources.py\n@@ -11,3 +11,5 @@\n # pkg_resources keeps vendored modules in its _vendor subpackage, and does\n # sys.meta_path based import magic to expose them as pkg_resources.extern.*\n hiddenimports = collect_submodules('pkg_resources._vendor')\n+\n+excludedimports = ['__main__']\n", "issue": "Windows: Cannot bundle with debug if pkg_resources is a dependency\nThis issue happens when I try to bundle my project, in the Analysis.assemble phase and only when I try to do it with debug enabled. PyInstaller tries to compile a module that is part of an executable (pyinstaller.exe in this case) which fails because it cannot read the module.\r\n\r\nThis is with Windows 10, Python 3.6.6 (official from python.org) and PyInstaller 3.5.dev0+51429f8fc (which should be the latest develop version as of today).\r\n\r\nHere is the traceback:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"c:\\python36-32\\Lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\python36-32\\Lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\RMYROY~1\\VIRTUA~1\\CDDA-G~3\\Scripts\\pyinstaller.exe\\__main__.py\", line 9, in <module>\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\__main__.py\", line 111, in run\r\n run_build(pyi_config, spec_file, **vars(args))\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\__main__.py\", line 63, in run_build\r\n PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 846, in main\r\n build(specfile, kw.get('distpath'), kw.get('workpath'), kw.get('clean_build'))\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 793, in build\r\n exec(code, spec_namespace)\r\n File \"launcher.spec\", line 17, in <module>\r\n noarchive=True)\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 243, in __init__\r\n self.__postinit__()\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\datastruct.py\", line 158, in __postinit__\r\n self.assemble()\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 599, in assemble\r\n for name, path, typecode in compile_py_files(new_toc, CONF['workpath']):\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\utils\\misc.py\", line 150, in compile_py_files\r\n with open(obj_fnm, 'rb') as fh:\r\nFileNotFoundError: [Errno 2] No such file or directory: 'C:\\\\Users\\\\RMYROY~1\\\\VIRTUA~1\\\\CDDA-G~3\\\\Scripts\\\\pyinstaller.exe\\\\__main__.pyo'\r\n```\r\n\r\nFor some reason, the following entry is added in Analysis.pure\r\n\r\n```python\r\n('__main__.pyc', 'C:\\\\Users\\\\RMYROY~1\\\\VIRTUA~1\\\\CDDA-G~3\\\\Scripts\\\\pyinstaller.exe\\\\__main__.py', 'PYMODULE')\r\n```\r\n\r\n**That entry is incorrect in that it shouldn't have been added in pure or it shouldn't be compiled in assemble which is the source of this issue.**\r\n\r\nHere is my spec file:\r\n\r\n```python\r\n# -*- mode: python ; coding: utf-8 -*-\r\n\r\nblock_cipher = None\r\n\r\n\r\na = Analysis(['cddagl\\\\launcher.py'],\r\n pathex=['C:\\\\Program Files (x86)\\\\Windows Kits\\\\10\\\\Redist\\\\ucrt\\\\DLLs\\\\x86\\\\', 'C:\\\\Users\\\\R\u00e9my Roy\\\\Projects\\\\CDDA-Game-Launcher'],\r\n binaries=[],\r\n datas=[('alembic', 'alembic'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('cddagl/VERSION', 'cddagl'), ('C:\\\\Users\\\\R\u00e9my Roy\\\\VirtualEnvs\\\\CDDA-Game-Launcher\\\\Scripts\\\\UnRAR.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ja/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ja/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],\r\n hiddenimports=['lxml.cssselect', 'babel.numbers'],\r\n hookspath=[],\r\n runtime_hooks=[],\r\n excludes=[],\r\n win_no_prefer_redirects=False,\r\n win_private_assemblies=False,\r\n cipher=block_cipher,\r\n noarchive=True)\r\npyz = PYZ(a.pure, a.zipped_data,\r\n cipher=block_cipher)\r\nexe = EXE(pyz,\r\n a.scripts,\r\n [('v', None, 'OPTION')],\r\n exclude_binaries=True,\r\n name='launcher',\r\n debug=True,\r\n bootloader_ignore_signals=False,\r\n strip=False,\r\n upx=False,\r\n console=True , icon='cddagl\\\\resources\\\\launcher.ico')\r\ncoll = COLLECT(exe,\r\n a.binaries,\r\n a.zipfiles,\r\n a.datas,\r\n strip=False,\r\n upx=False,\r\n upx_exclude=[],\r\n name='launcher')\r\n```\r\n\r\nYou can probably reproduce this issue easily by cloning [my project](https://github.com/remyroy/CDDA-Game-Launcher) and issuing the following command:\r\n\r\n```\r\npython setup.py freeze --debug=1\r\n```\r\n\r\nHere is the full pyinstaller log output: https://gist.github.com/remyroy/37f7f0a912d5d714a947cddfb78769d4\r\n\r\nI'll investigate how that entry is added in Analysis to give more context to this issue.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2019, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import collect_submodules\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does\n# sys.meta_path based import magic to expose them as pkg_resources.extern.*\nhiddenimports = collect_submodules('pkg_resources._vendor')\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2019, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import collect_submodules\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does\n# sys.meta_path based import magic to expose them as pkg_resources.extern.*\nhiddenimports = collect_submodules('pkg_resources._vendor')\n\nexcludedimports = ['__main__']\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}]} | 1,920 | 119 |
gh_patches_debug_878 | rasdani/github-patches | git_diff | privacyidea__privacyidea-1746 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix typo in registration token
The example of the registration token contains a typo.
The toketype of course is a "registration" token, not a "register".
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `privacyidea/lib/tokens/registrationtoken.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # privacyIDEA
4 # Aug 12, 2014 Cornelius Kölbel
5 # License: AGPLv3
6 # contact: http://www.privacyidea.org
7 #
8 # 2015-01-29 Adapt during migration to flask
9 # Cornelius Kölbel <[email protected]>
10 #
11 # This code is free software; you can redistribute it and/or
12 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
13 # License as published by the Free Software Foundation; either
14 # version 3 of the License, or any later version.
15 #
16 # This code is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
20 #
21 # You should have received a copy of the GNU Affero General Public
22 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #
24 """
25 This file contains the definition of the RegisterToken class.
26
27 The code is tested in test_lib_tokens_registration.py.
28 """
29
30 import logging
31
32 from privacyidea.lib.utils import to_unicode
33 from privacyidea.lib.tokens.passwordtoken import PasswordTokenClass
34 from privacyidea.lib.log import log_with
35 from privacyidea.lib.crypto import generate_password
36 from privacyidea.lib.decorators import check_token_locked
37 from privacyidea.lib import _
38
39 optional = True
40 required = False
41
42 log = logging.getLogger(__name__)
43
44
45 class RegistrationTokenClass(PasswordTokenClass):
46 """
47 Token to implement a registration code.
48 It can be used to create a registration code or a "TAN" which can be used
49 once by a user to authenticate somewhere. After this registration code is
50 used, the token is automatically deleted.
51
52 The idea is to provide a workflow, where the user can get a registration code
53 by e.g. postal mail and then use this code as the initial first factor to
54 authenticate to the UI to enroll real tokens.
55
56 A registration code can be created by an administrative task with the
57 token/init api like this:
58
59 **Example Authentication Request**:
60
61 .. sourcecode:: http
62
63 POST /token/init HTTP/1.1
64 Host: example.com
65 Accept: application/json
66
67 type=register
68 user=cornelius
69 realm=realm1
70
71 **Example response**:
72
73 .. sourcecode:: http
74
75 HTTP/1.1 200 OK
76 Content-Type: application/json
77
78 {
79 "detail": {
80 "registrationcode": "12345808124095097608"
81 },
82 "id": 1,
83 "jsonrpc": "2.0",
84 "result": {
85 "status": true,
86 "value": true
87 },
88 "version": "privacyIDEA unknown"
89 }
90
91 """
92
93 def __init__(self, aToken):
94 PasswordTokenClass.__init__(self, aToken)
95 self.hKeyRequired = False
96 self.set_type(u"registration")
97 self.otp_len = 24
98
99 @staticmethod
100 def get_class_type():
101 return "registration"
102
103 @staticmethod
104 def get_class_prefix():
105 return "REG"
106
107 @staticmethod
108 @log_with(log)
109 def get_class_info(key=None, ret='all'):
110 """
111 returns a subtree of the token definition
112
113 :param key: subsection identifier
114 :type key: string
115 :param ret: default return value, if nothing is found
116 :type ret: user defined
117 :return: subsection if key exists or user defined
118 :rtype: dict or scalar
119 """
120 res = {'type': 'registration',
121 'title': 'Registration Code Token',
122 'description': _('Registration: A token that creates a '
123 'registration code that '
124 'can be used as a second factor once.'),
125 'init': {},
126 'config': {},
127 'user': [],
128 # This tokentype is enrollable in the UI for...
129 'ui_enroll': ["admin"],
130 'policy': {},
131 }
132
133 if key:
134 ret = res.get(key)
135 else:
136 if ret == 'all':
137 ret = res
138 return ret
139
140 def update(self, param):
141 """
142 This method is called during the initialization process.
143 :param param: parameters from the token init
144 :type param: dict
145 :return: None
146 """
147 if "genkey" in param:
148 # We do not need the genkey! We generate anyway.
149 # Otherwise genkey and otpkey will raise an exception in
150 # PasswordTokenClass
151 del param["genkey"]
152 param["otpkey"] = generate_password(size=self.otp_len)
153 PasswordTokenClass.update(self, param)
154
155 @log_with(log, log_entry=False)
156 @check_token_locked
157 def inc_count_auth_success(self):
158 """
159 Increase the counter, that counts successful authentications
160 In case of successful authentication the token does needs to be deleted.
161 """
162 self.delete_token()
163 return 1
164
165 @log_with(log)
166 def get_init_detail(self, params=None, user=None):
167 """
168 At the end of the initialization we return the registration code.
169 """
170 response_detail = PasswordTokenClass.get_init_detail(self, params, user)
171 params = params or {}
172 secretHOtp = self.token.get_otpkey()
173 registrationcode = secretHOtp.getKey()
174 response_detail["registrationcode"] = to_unicode(registrationcode)
175 return response_detail
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/privacyidea/lib/tokens/registrationtoken.py b/privacyidea/lib/tokens/registrationtoken.py
--- a/privacyidea/lib/tokens/registrationtoken.py
+++ b/privacyidea/lib/tokens/registrationtoken.py
@@ -64,7 +64,7 @@
Host: example.com
Accept: application/json
- type=register
+ type=registration
user=cornelius
realm=realm1
| {"golden_diff": "diff --git a/privacyidea/lib/tokens/registrationtoken.py b/privacyidea/lib/tokens/registrationtoken.py\n--- a/privacyidea/lib/tokens/registrationtoken.py\n+++ b/privacyidea/lib/tokens/registrationtoken.py\n@@ -64,7 +64,7 @@\n Host: example.com\n Accept: application/json\n \n- type=register\n+ type=registration\n user=cornelius\n realm=realm1\n", "issue": "Fix typo in registration token\nThe example of the registration token contains a typo.\r\nThe toketype of course is a \"registration\" token, not a \"register\".\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA\n# Aug 12, 2014 Cornelius K\u00f6lbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# 2015-01-29 Adapt during migration to flask\n# Cornelius K\u00f6lbel <[email protected]>\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis file contains the definition of the RegisterToken class.\n\nThe code is tested in test_lib_tokens_registration.py.\n\"\"\"\n\nimport logging\n\nfrom privacyidea.lib.utils import to_unicode\nfrom privacyidea.lib.tokens.passwordtoken import PasswordTokenClass\nfrom privacyidea.lib.log import log_with\nfrom privacyidea.lib.crypto import generate_password\nfrom privacyidea.lib.decorators import check_token_locked\nfrom privacyidea.lib import _\n\noptional = True\nrequired = False\n\nlog = logging.getLogger(__name__)\n\n\nclass RegistrationTokenClass(PasswordTokenClass):\n \"\"\"\n Token to implement a registration code.\n It can be used to create a registration code or a \"TAN\" which can be used\n once by a user to authenticate somewhere. After this registration code is\n used, the token is automatically deleted.\n\n The idea is to provide a workflow, where the user can get a registration code\n by e.g. postal mail and then use this code as the initial first factor to\n authenticate to the UI to enroll real tokens.\n\n A registration code can be created by an administrative task with the\n token/init api like this:\n\n **Example Authentication Request**:\n\n .. sourcecode:: http\n\n POST /token/init HTTP/1.1\n Host: example.com\n Accept: application/json\n\n type=register\n user=cornelius\n realm=realm1\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Content-Type: application/json\n\n {\n \"detail\": {\n \"registrationcode\": \"12345808124095097608\"\n },\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": {\n \"status\": true,\n \"value\": true\n },\n \"version\": \"privacyIDEA unknown\"\n }\n\n \"\"\"\n\n def __init__(self, aToken):\n PasswordTokenClass.__init__(self, aToken)\n self.hKeyRequired = False\n self.set_type(u\"registration\")\n self.otp_len = 24\n\n @staticmethod\n def get_class_type():\n return \"registration\"\n\n @staticmethod\n def get_class_prefix():\n return \"REG\"\n\n @staticmethod\n @log_with(log)\n def get_class_info(key=None, ret='all'):\n \"\"\"\n returns a subtree of the token definition\n\n :param key: subsection identifier\n :type key: string\n :param ret: default return value, if nothing is found\n :type ret: user defined\n :return: subsection if key exists or user defined\n :rtype: dict or scalar\n \"\"\"\n res = {'type': 'registration',\n 'title': 'Registration Code Token',\n 'description': _('Registration: A token that creates a '\n 'registration code that '\n 'can be used as a second factor once.'),\n 'init': {},\n 'config': {},\n 'user': [],\n # This tokentype is enrollable in the UI for...\n 'ui_enroll': [\"admin\"],\n 'policy': {},\n }\n\n if key:\n ret = res.get(key)\n else:\n if ret == 'all':\n ret = res\n return ret\n\n def update(self, param):\n \"\"\"\n This method is called during the initialization process.\n :param param: parameters from the token init\n :type param: dict\n :return: None\n \"\"\"\n if \"genkey\" in param:\n # We do not need the genkey! We generate anyway.\n # Otherwise genkey and otpkey will raise an exception in\n # PasswordTokenClass\n del param[\"genkey\"]\n param[\"otpkey\"] = generate_password(size=self.otp_len)\n PasswordTokenClass.update(self, param)\n\n @log_with(log, log_entry=False)\n @check_token_locked\n def inc_count_auth_success(self):\n \"\"\"\n Increase the counter, that counts successful authentications\n In case of successful authentication the token does needs to be deleted.\n \"\"\"\n self.delete_token()\n return 1\n\n @log_with(log)\n def get_init_detail(self, params=None, user=None):\n \"\"\"\n At the end of the initialization we return the registration code.\n \"\"\"\n response_detail = PasswordTokenClass.get_init_detail(self, params, user)\n params = params or {}\n secretHOtp = self.token.get_otpkey()\n registrationcode = secretHOtp.getKey()\n response_detail[\"registrationcode\"] = to_unicode(registrationcode)\n return response_detail\n", "path": "privacyidea/lib/tokens/registrationtoken.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA\n# Aug 12, 2014 Cornelius K\u00f6lbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# 2015-01-29 Adapt during migration to flask\n# Cornelius K\u00f6lbel <[email protected]>\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis file contains the definition of the RegisterToken class.\n\nThe code is tested in test_lib_tokens_registration.py.\n\"\"\"\n\nimport logging\n\nfrom privacyidea.lib.utils import to_unicode\nfrom privacyidea.lib.tokens.passwordtoken import PasswordTokenClass\nfrom privacyidea.lib.log import log_with\nfrom privacyidea.lib.crypto import generate_password\nfrom privacyidea.lib.decorators import check_token_locked\nfrom privacyidea.lib import _\n\noptional = True\nrequired = False\n\nlog = logging.getLogger(__name__)\n\n\nclass RegistrationTokenClass(PasswordTokenClass):\n \"\"\"\n Token to implement a registration code.\n It can be used to create a registration code or a \"TAN\" which can be used\n once by a user to authenticate somewhere. After this registration code is\n used, the token is automatically deleted.\n\n The idea is to provide a workflow, where the user can get a registration code\n by e.g. postal mail and then use this code as the initial first factor to\n authenticate to the UI to enroll real tokens.\n\n A registration code can be created by an administrative task with the\n token/init api like this:\n\n **Example Authentication Request**:\n\n .. sourcecode:: http\n\n POST /token/init HTTP/1.1\n Host: example.com\n Accept: application/json\n\n type=registration\n user=cornelius\n realm=realm1\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Content-Type: application/json\n\n {\n \"detail\": {\n \"registrationcode\": \"12345808124095097608\"\n },\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": {\n \"status\": true,\n \"value\": true\n },\n \"version\": \"privacyIDEA unknown\"\n }\n\n \"\"\"\n\n def __init__(self, aToken):\n PasswordTokenClass.__init__(self, aToken)\n self.hKeyRequired = False\n self.set_type(u\"registration\")\n self.otp_len = 24\n\n @staticmethod\n def get_class_type():\n return \"registration\"\n\n @staticmethod\n def get_class_prefix():\n return \"REG\"\n\n @staticmethod\n @log_with(log)\n def get_class_info(key=None, ret='all'):\n \"\"\"\n returns a subtree of the token definition\n\n :param key: subsection identifier\n :type key: string\n :param ret: default return value, if nothing is found\n :type ret: user defined\n :return: subsection if key exists or user defined\n :rtype: dict or scalar\n \"\"\"\n res = {'type': 'registration',\n 'title': 'Registration Code Token',\n 'description': _('Registration: A token that creates a '\n 'registration code that '\n 'can be used as a second factor once.'),\n 'init': {},\n 'config': {},\n 'user': [],\n # This tokentype is enrollable in the UI for...\n 'ui_enroll': [\"admin\"],\n 'policy': {},\n }\n\n if key:\n ret = res.get(key)\n else:\n if ret == 'all':\n ret = res\n return ret\n\n def update(self, param):\n \"\"\"\n This method is called during the initialization process.\n :param param: parameters from the token init\n :type param: dict\n :return: None\n \"\"\"\n if \"genkey\" in param:\n # We do not need the genkey! We generate anyway.\n # Otherwise genkey and otpkey will raise an exception in\n # PasswordTokenClass\n del param[\"genkey\"]\n param[\"otpkey\"] = generate_password(size=self.otp_len)\n PasswordTokenClass.update(self, param)\n\n @log_with(log, log_entry=False)\n @check_token_locked\n def inc_count_auth_success(self):\n \"\"\"\n Increase the counter, that counts successful authentications\n In case of successful authentication the token does needs to be deleted.\n \"\"\"\n self.delete_token()\n return 1\n\n @log_with(log)\n def get_init_detail(self, params=None, user=None):\n \"\"\"\n At the end of the initialization we return the registration code.\n \"\"\"\n response_detail = PasswordTokenClass.get_init_detail(self, params, user)\n params = params or {}\n secretHOtp = self.token.get_otpkey()\n registrationcode = secretHOtp.getKey()\n response_detail[\"registrationcode\"] = to_unicode(registrationcode)\n return response_detail\n", "path": "privacyidea/lib/tokens/registrationtoken.py"}]} | 1,954 | 101 |
gh_patches_debug_31575 | rasdani/github-patches | git_diff | python-discord__bot-475 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tags can't be edited due to using the POST method with the API
Currently, the `!tag edit` subcommand is just an alias of `!tag set`. This means that if we try to edit an existing tag, the bot will use the POST http method to communicate with the API. Since we're not posting a new tag, but editing an existing entry, the API will reject this request.
Instead of using POST, we should be using PATCH, since we're only partially updating the entry in the database.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/cogs/tags.py`
Content:
```
1 import logging
2 import time
3
4 from discord import Colour, Embed
5 from discord.ext.commands import Bot, Cog, Context, group
6
7 from bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles
8 from bot.converters import TagContentConverter, TagNameConverter
9 from bot.decorators import with_role
10 from bot.pagination import LinePaginator
11
12
13 log = logging.getLogger(__name__)
14
15 TEST_CHANNELS = (
16 Channels.devtest,
17 Channels.bot,
18 Channels.helpers
19 )
20
21
22 class Tags(Cog):
23 """Save new tags and fetch existing tags."""
24
25 def __init__(self, bot: Bot):
26 self.bot = bot
27 self.tag_cooldowns = {}
28
29 @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)
30 async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
31 """Show all known tags, a single tag, or run a subcommand."""
32 await ctx.invoke(self.get_command, tag_name=tag_name)
33
34 @tags_group.command(name='get', aliases=('show', 'g'))
35 async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
36 """Get a specified tag, or a list of all tags if no tag is specified."""
37 def _command_on_cooldown(tag_name: str) -> bool:
38 """
39 Check if the command is currently on cooldown, on a per-tag, per-channel basis.
40
41 The cooldown duration is set in constants.py.
42 """
43 now = time.time()
44
45 cooldown_conditions = (
46 tag_name
47 and tag_name in self.tag_cooldowns
48 and (now - self.tag_cooldowns[tag_name]["time"]) < Cooldowns.tags
49 and self.tag_cooldowns[tag_name]["channel"] == ctx.channel.id
50 )
51
52 if cooldown_conditions:
53 return True
54 return False
55
56 if _command_on_cooldown(tag_name):
57 time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name]["time"])
58 log.warning(f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
59 f"Cooldown ends in {time_left:.1f} seconds.")
60 return
61
62 if tag_name is not None:
63 tag = await self.bot.api_client.get(f'bot/tags/{tag_name}')
64 if ctx.channel.id not in TEST_CHANNELS:
65 self.tag_cooldowns[tag_name] = {
66 "time": time.time(),
67 "channel": ctx.channel.id
68 }
69 await ctx.send(embed=Embed.from_dict(tag['embed']))
70
71 else:
72 tags = await self.bot.api_client.get('bot/tags')
73 if not tags:
74 await ctx.send(embed=Embed(
75 description="**There are no tags in the database!**",
76 colour=Colour.red()
77 ))
78 else:
79 embed: Embed = Embed(title="**Current tags**")
80 await LinePaginator.paginate(
81 sorted(f"**»** {tag['title']}" for tag in tags),
82 ctx,
83 embed,
84 footer_text="To show a tag, type !tags <tagname>.",
85 empty=False,
86 max_lines=15
87 )
88
89 @tags_group.command(name='set', aliases=('add', 'edit', 's'))
90 @with_role(*MODERATION_ROLES)
91 async def set_command(
92 self,
93 ctx: Context,
94 tag_name: TagNameConverter,
95 *,
96 tag_content: TagContentConverter,
97 ) -> None:
98 """Create a new tag or update an existing one."""
99 body = {
100 'title': tag_name.lower().strip(),
101 'embed': {
102 'title': tag_name,
103 'description': tag_content
104 }
105 }
106
107 await self.bot.api_client.post('bot/tags', json=body)
108
109 log.debug(f"{ctx.author} successfully added the following tag to our database: \n"
110 f"tag_name: {tag_name}\n"
111 f"tag_content: '{tag_content}'\n")
112
113 await ctx.send(embed=Embed(
114 title="Tag successfully added",
115 description=f"**{tag_name}** added to tag database.",
116 colour=Colour.blurple()
117 ))
118
119 @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))
120 @with_role(Roles.admin, Roles.owner)
121 async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:
122 """Remove a tag from the database."""
123 await self.bot.api_client.delete(f'bot/tags/{tag_name}')
124
125 log.debug(f"{ctx.author} successfully deleted the tag called '{tag_name}'")
126 await ctx.send(embed=Embed(
127 title=tag_name,
128 description=f"Tag successfully removed: {tag_name}.",
129 colour=Colour.blurple()
130 ))
131
132
133 def setup(bot: Bot) -> None:
134 """Tags cog load."""
135 bot.add_cog(Tags(bot))
136 log.info("Cog loaded: Tags")
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py
--- a/bot/cogs/tags.py
+++ b/bot/cogs/tags.py
@@ -86,7 +86,7 @@
max_lines=15
)
- @tags_group.command(name='set', aliases=('add', 'edit', 's'))
+ @tags_group.command(name='set', aliases=('add', 's'))
@with_role(*MODERATION_ROLES)
async def set_command(
self,
@@ -95,7 +95,7 @@
*,
tag_content: TagContentConverter,
) -> None:
- """Create a new tag or update an existing one."""
+ """Create a new tag."""
body = {
'title': tag_name.lower().strip(),
'embed': {
@@ -116,6 +116,35 @@
colour=Colour.blurple()
))
+ @tags_group.command(name='edit', aliases=('e', ))
+ @with_role(*MODERATION_ROLES)
+ async def edit_command(
+ self,
+ ctx: Context,
+ tag_name: TagNameConverter,
+ *,
+ tag_content: TagContentConverter,
+ ) -> None:
+ """Edit an existing tag."""
+ body = {
+ 'embed': {
+ 'title': tag_name,
+ 'description': tag_content
+ }
+ }
+
+ await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)
+
+ log.debug(f"{ctx.author} successfully edited the following tag in our database: \n"
+ f"tag_name: {tag_name}\n"
+ f"tag_content: '{tag_content}'\n")
+
+ await ctx.send(embed=Embed(
+ title="Tag successfully edited",
+ description=f"**{tag_name}** edited in the database.",
+ colour=Colour.blurple()
+ ))
+
@tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))
@with_role(Roles.admin, Roles.owner)
async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:
| {"golden_diff": "diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py\n--- a/bot/cogs/tags.py\n+++ b/bot/cogs/tags.py\n@@ -86,7 +86,7 @@\n max_lines=15\n )\n \n- @tags_group.command(name='set', aliases=('add', 'edit', 's'))\n+ @tags_group.command(name='set', aliases=('add', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n@@ -95,7 +95,7 @@\n *,\n tag_content: TagContentConverter,\n ) -> None:\n- \"\"\"Create a new tag or update an existing one.\"\"\"\n+ \"\"\"Create a new tag.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n@@ -116,6 +116,35 @@\n colour=Colour.blurple()\n ))\n \n+ @tags_group.command(name='edit', aliases=('e', ))\n+ @with_role(*MODERATION_ROLES)\n+ async def edit_command(\n+ self,\n+ ctx: Context,\n+ tag_name: TagNameConverter,\n+ *,\n+ tag_content: TagContentConverter,\n+ ) -> None:\n+ \"\"\"Edit an existing tag.\"\"\"\n+ body = {\n+ 'embed': {\n+ 'title': tag_name,\n+ 'description': tag_content\n+ }\n+ }\n+\n+ await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)\n+\n+ log.debug(f\"{ctx.author} successfully edited the following tag in our database: \\n\"\n+ f\"tag_name: {tag_name}\\n\"\n+ f\"tag_content: '{tag_content}'\\n\")\n+\n+ await ctx.send(embed=Embed(\n+ title=\"Tag successfully edited\",\n+ description=f\"**{tag_name}** edited in the database.\",\n+ colour=Colour.blurple()\n+ ))\n+\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admin, Roles.owner)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n", "issue": "Tags can't be edited due to using the POST method with the API\nCurrently, the `!tag edit` subcommand is just an alias of `!tag set`. This means that if we try to edit an existing tag, the bot will use the POST http method to communicate with the API. Since we're not posting a new tag, but editing an existing entry, the API will reject this request. \r\n\r\nInstead of using POST, we should be using PATCH, since we're only partially updating the entry in the database.\n", "before_files": [{"content": "import logging\nimport time\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Bot, Cog, Context, group\n\nfrom bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles\nfrom bot.converters import TagContentConverter, TagNameConverter\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\n\nlog = logging.getLogger(__name__)\n\nTEST_CHANNELS = (\n Channels.devtest,\n Channels.bot,\n Channels.helpers\n)\n\n\nclass Tags(Cog):\n \"\"\"Save new tags and fetch existing tags.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.tag_cooldowns = {}\n\n @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)\n async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Show all known tags, a single tag, or run a subcommand.\"\"\"\n await ctx.invoke(self.get_command, tag_name=tag_name)\n\n @tags_group.command(name='get', aliases=('show', 'g'))\n async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Get a specified tag, or a list of all tags if no tag is specified.\"\"\"\n def _command_on_cooldown(tag_name: str) -> bool:\n \"\"\"\n Check if the command is currently on cooldown, on a per-tag, per-channel basis.\n\n The cooldown duration is set in constants.py.\n \"\"\"\n now = time.time()\n\n cooldown_conditions = (\n tag_name\n and tag_name in self.tag_cooldowns\n and (now - self.tag_cooldowns[tag_name][\"time\"]) < Cooldowns.tags\n and self.tag_cooldowns[tag_name][\"channel\"] == ctx.channel.id\n )\n\n if cooldown_conditions:\n return True\n return False\n\n if _command_on_cooldown(tag_name):\n time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name][\"time\"])\n log.warning(f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n f\"Cooldown ends in {time_left:.1f} seconds.\")\n return\n\n if tag_name is not None:\n tag = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n if ctx.channel.id not in TEST_CHANNELS:\n self.tag_cooldowns[tag_name] = {\n \"time\": time.time(),\n \"channel\": ctx.channel.id\n }\n await ctx.send(embed=Embed.from_dict(tag['embed']))\n\n else:\n tags = await self.bot.api_client.get('bot/tags')\n if not tags:\n await ctx.send(embed=Embed(\n description=\"**There are no tags in the database!**\",\n colour=Colour.red()\n ))\n else:\n embed: Embed = Embed(title=\"**Current tags**\")\n await LinePaginator.paginate(\n sorted(f\"**\u00bb** {tag['title']}\" for tag in tags),\n ctx,\n embed,\n footer_text=\"To show a tag, type !tags <tagname>.\",\n empty=False,\n max_lines=15\n )\n\n @tags_group.command(name='set', aliases=('add', 'edit', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Create a new tag or update an existing one.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.post('bot/tags', json=body)\n\n log.debug(f\"{ctx.author} successfully added the following tag to our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully added\",\n description=f\"**{tag_name}** added to tag database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admin, Roles.owner)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n \"\"\"Remove a tag from the database.\"\"\"\n await self.bot.api_client.delete(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully deleted the tag called '{tag_name}'\")\n await ctx.send(embed=Embed(\n title=tag_name,\n description=f\"Tag successfully removed: {tag_name}.\",\n colour=Colour.blurple()\n ))\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Tags cog load.\"\"\"\n bot.add_cog(Tags(bot))\n log.info(\"Cog loaded: Tags\")\n", "path": "bot/cogs/tags.py"}], "after_files": [{"content": "import logging\nimport time\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Bot, Cog, Context, group\n\nfrom bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles\nfrom bot.converters import TagContentConverter, TagNameConverter\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\n\nlog = logging.getLogger(__name__)\n\nTEST_CHANNELS = (\n Channels.devtest,\n Channels.bot,\n Channels.helpers\n)\n\n\nclass Tags(Cog):\n \"\"\"Save new tags and fetch existing tags.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.tag_cooldowns = {}\n\n @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)\n async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Show all known tags, a single tag, or run a subcommand.\"\"\"\n await ctx.invoke(self.get_command, tag_name=tag_name)\n\n @tags_group.command(name='get', aliases=('show', 'g'))\n async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Get a specified tag, or a list of all tags if no tag is specified.\"\"\"\n def _command_on_cooldown(tag_name: str) -> bool:\n \"\"\"\n Check if the command is currently on cooldown, on a per-tag, per-channel basis.\n\n The cooldown duration is set in constants.py.\n \"\"\"\n now = time.time()\n\n cooldown_conditions = (\n tag_name\n and tag_name in self.tag_cooldowns\n and (now - self.tag_cooldowns[tag_name][\"time\"]) < Cooldowns.tags\n and self.tag_cooldowns[tag_name][\"channel\"] == ctx.channel.id\n )\n\n if cooldown_conditions:\n return True\n return False\n\n if _command_on_cooldown(tag_name):\n time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name][\"time\"])\n log.warning(f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n f\"Cooldown ends in {time_left:.1f} seconds.\")\n return\n\n if tag_name is not None:\n tag = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n if ctx.channel.id not in TEST_CHANNELS:\n self.tag_cooldowns[tag_name] = {\n \"time\": time.time(),\n \"channel\": ctx.channel.id\n }\n await ctx.send(embed=Embed.from_dict(tag['embed']))\n\n else:\n tags = await self.bot.api_client.get('bot/tags')\n if not tags:\n await ctx.send(embed=Embed(\n description=\"**There are no tags in the database!**\",\n colour=Colour.red()\n ))\n else:\n embed: Embed = Embed(title=\"**Current tags**\")\n await LinePaginator.paginate(\n sorted(f\"**\u00bb** {tag['title']}\" for tag in tags),\n ctx,\n embed,\n footer_text=\"To show a tag, type !tags <tagname>.\",\n empty=False,\n max_lines=15\n )\n\n @tags_group.command(name='set', aliases=('add', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Create a new tag.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.post('bot/tags', json=body)\n\n log.debug(f\"{ctx.author} successfully added the following tag to our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully added\",\n description=f\"**{tag_name}** added to tag database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='edit', aliases=('e', ))\n @with_role(*MODERATION_ROLES)\n async def edit_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Edit an existing tag.\"\"\"\n body = {\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)\n\n log.debug(f\"{ctx.author} successfully edited the following tag in our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully edited\",\n description=f\"**{tag_name}** edited in the database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admin, Roles.owner)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n \"\"\"Remove a tag from the database.\"\"\"\n await self.bot.api_client.delete(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully deleted the tag called '{tag_name}'\")\n await ctx.send(embed=Embed(\n title=tag_name,\n description=f\"Tag successfully removed: {tag_name}.\",\n colour=Colour.blurple()\n ))\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Tags cog load.\"\"\"\n bot.add_cog(Tags(bot))\n log.info(\"Cog loaded: Tags\")\n", "path": "bot/cogs/tags.py"}]} | 1,750 | 486 |
gh_patches_debug_9387 | rasdani/github-patches | git_diff | flairNLP__flair-1679 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
is corpus object reusable across ModelTrainer instances ?
I have three checkpoint files generated from a training run that uses PooledFlair embedding. Say chk10.pt, chk20.pt, chk30.pt.
I finalize using the following code in a for loop to get the F1 predictions out:
trainer: ModelTrainer = ModelTrainer.load_checkpoint(chkfile, corpus)
trainer.train('.', checkpoint = False, train_with_dev=True, max_epochs=epochs)
I set the epochs to the value at which this checkpoint got generated. So 10, 20, 30 etc. So typically it goes straight to creating the final model and emitting the predictions.
This works perfectly fine for the first time in the loop, after which the predictions are quite wrong. Now instead of doing it in the loop, if i simply do just once by restarting the process i get the values i expect. This behavior happens only with PooledFlairEmbedding. Same program runs just fine with ElmoEmbedding, BertEmbedding.
So my question is why is this the case ? Is it because i create the corpus object outside the for loop and keep reusing it across different ModelTrainer instances ?
It happens quite regularly for me. If needed i can make a small program and share.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flair/nn.py`
Content:
```
1 import warnings
2 from pathlib import Path
3
4 import torch.nn
5
6 from abc import abstractmethod
7
8 from typing import Union, List
9
10 from torch.utils.data.dataset import Dataset
11
12 import flair
13 from flair import file_utils
14 from flair.data import DataPoint, Sentence
15 from flair.datasets import DataLoader
16 from flair.training_utils import Result
17
18
19 class Model(torch.nn.Module):
20 """Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.
21 Every new type of model must implement these methods."""
22
23 @abstractmethod
24 def forward_loss(
25 self, data_points: Union[List[DataPoint], DataPoint]
26 ) -> torch.tensor:
27 """Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training."""
28 pass
29
30 @abstractmethod
31 def evaluate(
32 self,
33 sentences: Union[List[DataPoint], Dataset],
34 out_path: Path = None,
35 embedding_storage_mode: str = "none",
36 ) -> (Result, float):
37 """Evaluates the model. Returns a Result object containing evaluation
38 results and a loss value. Implement this to enable evaluation.
39 :param data_loader: DataLoader that iterates over dataset to be evaluated
40 :param out_path: Optional output path to store predictions
41 :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and
42 freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU
43 :return: Returns a Tuple consisting of a Result object and a loss float value
44 """
45 pass
46
47 @abstractmethod
48 def _get_state_dict(self):
49 """Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()
50 functionality."""
51 pass
52
53 @staticmethod
54 @abstractmethod
55 def _init_model_with_state_dict(state):
56 """Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()
57 functionality."""
58 pass
59
60 @staticmethod
61 @abstractmethod
62 def _fetch_model(model_name) -> str:
63 return model_name
64
65 def save(self, model_file: Union[str, Path]):
66 """
67 Saves the current model to the provided file.
68 :param model_file: the model file
69 """
70 model_state = self._get_state_dict()
71
72 torch.save(model_state, str(model_file), pickle_protocol=4)
73
74 @classmethod
75 def load(cls, model: Union[str, Path]):
76 """
77 Loads the model from the given file.
78 :param model: the model file
79 :return: the loaded text classifier model
80 """
81 model_file = cls._fetch_model(str(model))
82
83 with warnings.catch_warnings():
84 warnings.filterwarnings("ignore")
85 # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
86 # see https://github.com/zalandoresearch/flair/issues/351
87 f = file_utils.load_big_file(str(model_file))
88 state = torch.load(f, map_location=flair.device)
89
90 model = cls._init_model_with_state_dict(state)
91
92 model.eval()
93 model.to(flair.device)
94
95 return model
96
97
98 class LockedDropout(torch.nn.Module):
99 """
100 Implementation of locked (or variational) dropout. Randomly drops out entire parameters in embedding space.
101 """
102
103 def __init__(self, dropout_rate=0.5, batch_first=True, inplace=False):
104 super(LockedDropout, self).__init__()
105 self.dropout_rate = dropout_rate
106 self.batch_first = batch_first
107 self.inplace = inplace
108
109 def forward(self, x):
110 if not self.training or not self.dropout_rate:
111 return x
112
113 if not self.batch_first:
114 m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout_rate)
115 else:
116 m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout_rate)
117
118 mask = torch.autograd.Variable(m, requires_grad=False) / (1 - self.dropout_rate)
119 mask = mask.expand_as(x)
120 return mask * x
121
122 def extra_repr(self):
123 inplace_str = ", inplace" if self.inplace else ""
124 return "p={}{}".format(self.dropout_rate, inplace_str)
125
126
127 class WordDropout(torch.nn.Module):
128 """
129 Implementation of word dropout. Randomly drops out entire words (or characters) in embedding space.
130 """
131
132 def __init__(self, dropout_rate=0.05, inplace=False):
133 super(WordDropout, self).__init__()
134 self.dropout_rate = dropout_rate
135 self.inplace = inplace
136
137 def forward(self, x):
138 if not self.training or not self.dropout_rate:
139 return x
140
141 m = x.data.new(x.size(0), x.size(1), 1).bernoulli_(1 - self.dropout_rate)
142
143 mask = torch.autograd.Variable(m, requires_grad=False)
144 return mask * x
145
146 def extra_repr(self):
147 inplace_str = ", inplace" if self.inplace else ""
148 return "p={}{}".format(self.dropout_rate, inplace_str)
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flair/nn.py b/flair/nn.py
--- a/flair/nn.py
+++ b/flair/nn.py
@@ -85,7 +85,7 @@
# load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
# see https://github.com/zalandoresearch/flair/issues/351
f = file_utils.load_big_file(str(model_file))
- state = torch.load(f, map_location=flair.device)
+ state = torch.load(f, map_location='cpu')
model = cls._init_model_with_state_dict(state)
| {"golden_diff": "diff --git a/flair/nn.py b/flair/nn.py\n--- a/flair/nn.py\n+++ b/flair/nn.py\n@@ -85,7 +85,7 @@\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = file_utils.load_big_file(str(model_file))\n- state = torch.load(f, map_location=flair.device)\n+ state = torch.load(f, map_location='cpu')\n \n model = cls._init_model_with_state_dict(state)\n", "issue": "is corpus object reusable across ModelTrainer instances ?\nI have three checkpoint files generated from a training run that uses PooledFlair embedding. Say chk10.pt, chk20.pt, chk30.pt.\r\n\r\nI finalize using the following code in a for loop to get the F1 predictions out:\r\n\r\ntrainer: ModelTrainer = ModelTrainer.load_checkpoint(chkfile, corpus)\r\ntrainer.train('.', checkpoint = False, train_with_dev=True, max_epochs=epochs)\r\n\r\nI set the epochs to the value at which this checkpoint got generated. So 10, 20, 30 etc. So typically it goes straight to creating the final model and emitting the predictions.\r\n\r\nThis works perfectly fine for the first time in the loop, after which the predictions are quite wrong. Now instead of doing it in the loop, if i simply do just once by restarting the process i get the values i expect. This behavior happens only with PooledFlairEmbedding. Same program runs just fine with ElmoEmbedding, BertEmbedding.\r\n\r\nSo my question is why is this the case ? Is it because i create the corpus object outside the for loop and keep reusing it across different ModelTrainer instances ? \r\n\r\nIt happens quite regularly for me. If needed i can make a small program and share.\r\n\n", "before_files": [{"content": "import warnings\nfrom pathlib import Path\n\nimport torch.nn\n\nfrom abc import abstractmethod\n\nfrom typing import Union, List\n\nfrom torch.utils.data.dataset import Dataset\n\nimport flair\nfrom flair import file_utils\nfrom flair.data import DataPoint, Sentence\nfrom flair.datasets import DataLoader\nfrom flair.training_utils import Result\n\n\nclass Model(torch.nn.Module):\n \"\"\"Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.\n Every new type of model must implement these methods.\"\"\"\n\n @abstractmethod\n def forward_loss(\n self, data_points: Union[List[DataPoint], DataPoint]\n ) -> torch.tensor:\n \"\"\"Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training.\"\"\"\n pass\n\n @abstractmethod\n def evaluate(\n self,\n sentences: Union[List[DataPoint], Dataset],\n out_path: Path = None,\n embedding_storage_mode: str = \"none\",\n ) -> (Result, float):\n \"\"\"Evaluates the model. Returns a Result object containing evaluation\n results and a loss value. Implement this to enable evaluation.\n :param data_loader: DataLoader that iterates over dataset to be evaluated\n :param out_path: Optional output path to store predictions\n :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and\n freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU\n :return: Returns a Tuple consisting of a Result object and a loss float value\n \"\"\"\n pass\n\n @abstractmethod\n def _get_state_dict(self):\n \"\"\"Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()\n functionality.\"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def _init_model_with_state_dict(state):\n \"\"\"Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()\n functionality.\"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def _fetch_model(model_name) -> str:\n return model_name\n\n def save(self, model_file: Union[str, Path]):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = self._get_state_dict()\n\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n @classmethod\n def load(cls, model: Union[str, Path]):\n \"\"\"\n Loads the model from the given file.\n :param model: the model file\n :return: the loaded text classifier model\n \"\"\"\n model_file = cls._fetch_model(str(model))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = file_utils.load_big_file(str(model_file))\n state = torch.load(f, map_location=flair.device)\n\n model = cls._init_model_with_state_dict(state)\n\n model.eval()\n model.to(flair.device)\n\n return model\n\n\nclass LockedDropout(torch.nn.Module):\n \"\"\"\n Implementation of locked (or variational) dropout. Randomly drops out entire parameters in embedding space.\n \"\"\"\n\n def __init__(self, dropout_rate=0.5, batch_first=True, inplace=False):\n super(LockedDropout, self).__init__()\n self.dropout_rate = dropout_rate\n self.batch_first = batch_first\n self.inplace = inplace\n\n def forward(self, x):\n if not self.training or not self.dropout_rate:\n return x\n\n if not self.batch_first:\n m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout_rate)\n else:\n m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout_rate)\n\n mask = torch.autograd.Variable(m, requires_grad=False) / (1 - self.dropout_rate)\n mask = mask.expand_as(x)\n return mask * x\n\n def extra_repr(self):\n inplace_str = \", inplace\" if self.inplace else \"\"\n return \"p={}{}\".format(self.dropout_rate, inplace_str)\n\n\nclass WordDropout(torch.nn.Module):\n \"\"\"\n Implementation of word dropout. Randomly drops out entire words (or characters) in embedding space.\n \"\"\"\n\n def __init__(self, dropout_rate=0.05, inplace=False):\n super(WordDropout, self).__init__()\n self.dropout_rate = dropout_rate\n self.inplace = inplace\n\n def forward(self, x):\n if not self.training or not self.dropout_rate:\n return x\n\n m = x.data.new(x.size(0), x.size(1), 1).bernoulli_(1 - self.dropout_rate)\n\n mask = torch.autograd.Variable(m, requires_grad=False)\n return mask * x\n\n def extra_repr(self):\n inplace_str = \", inplace\" if self.inplace else \"\"\n return \"p={}{}\".format(self.dropout_rate, inplace_str)\n", "path": "flair/nn.py"}], "after_files": [{"content": "import warnings\nfrom pathlib import Path\n\nimport torch.nn\n\nfrom abc import abstractmethod\n\nfrom typing import Union, List\n\nfrom torch.utils.data.dataset import Dataset\n\nimport flair\nfrom flair import file_utils\nfrom flair.data import DataPoint, Sentence\nfrom flair.datasets import DataLoader\nfrom flair.training_utils import Result\n\n\nclass Model(torch.nn.Module):\n \"\"\"Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.\n Every new type of model must implement these methods.\"\"\"\n\n @abstractmethod\n def forward_loss(\n self, data_points: Union[List[DataPoint], DataPoint]\n ) -> torch.tensor:\n \"\"\"Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training.\"\"\"\n pass\n\n @abstractmethod\n def evaluate(\n self,\n sentences: Union[List[DataPoint], Dataset],\n out_path: Path = None,\n embedding_storage_mode: str = \"none\",\n ) -> (Result, float):\n \"\"\"Evaluates the model. Returns a Result object containing evaluation\n results and a loss value. Implement this to enable evaluation.\n :param data_loader: DataLoader that iterates over dataset to be evaluated\n :param out_path: Optional output path to store predictions\n :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and\n freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU\n :return: Returns a Tuple consisting of a Result object and a loss float value\n \"\"\"\n pass\n\n @abstractmethod\n def _get_state_dict(self):\n \"\"\"Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()\n functionality.\"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def _init_model_with_state_dict(state):\n \"\"\"Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()\n functionality.\"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def _fetch_model(model_name) -> str:\n return model_name\n\n def save(self, model_file: Union[str, Path]):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = self._get_state_dict()\n\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n @classmethod\n def load(cls, model: Union[str, Path]):\n \"\"\"\n Loads the model from the given file.\n :param model: the model file\n :return: the loaded text classifier model\n \"\"\"\n model_file = cls._fetch_model(str(model))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = file_utils.load_big_file(str(model_file))\n state = torch.load(f, map_location='cpu')\n\n model = cls._init_model_with_state_dict(state)\n\n model.eval()\n model.to(flair.device)\n\n return model\n\n\nclass LockedDropout(torch.nn.Module):\n \"\"\"\n Implementation of locked (or variational) dropout. Randomly drops out entire parameters in embedding space.\n \"\"\"\n\n def __init__(self, dropout_rate=0.5, batch_first=True, inplace=False):\n super(LockedDropout, self).__init__()\n self.dropout_rate = dropout_rate\n self.batch_first = batch_first\n self.inplace = inplace\n\n def forward(self, x):\n if not self.training or not self.dropout_rate:\n return x\n\n if not self.batch_first:\n m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout_rate)\n else:\n m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout_rate)\n\n mask = torch.autograd.Variable(m, requires_grad=False) / (1 - self.dropout_rate)\n mask = mask.expand_as(x)\n return mask * x\n\n def extra_repr(self):\n inplace_str = \", inplace\" if self.inplace else \"\"\n return \"p={}{}\".format(self.dropout_rate, inplace_str)\n\n\nclass WordDropout(torch.nn.Module):\n \"\"\"\n Implementation of word dropout. Randomly drops out entire words (or characters) in embedding space.\n \"\"\"\n\n def __init__(self, dropout_rate=0.05, inplace=False):\n super(WordDropout, self).__init__()\n self.dropout_rate = dropout_rate\n self.inplace = inplace\n\n def forward(self, x):\n if not self.training or not self.dropout_rate:\n return x\n\n m = x.data.new(x.size(0), x.size(1), 1).bernoulli_(1 - self.dropout_rate)\n\n mask = torch.autograd.Variable(m, requires_grad=False)\n return mask * x\n\n def extra_repr(self):\n inplace_str = \", inplace\" if self.inplace else \"\"\n return \"p={}{}\".format(self.dropout_rate, inplace_str)\n", "path": "flair/nn.py"}]} | 2,011 | 144 |
gh_patches_debug_1588 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1804 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Django Component Governance vulnerability
Django 1.11 before 1.11.28, 2.2 before 2.2.10, and 3.0 before 3.0.3 allows SQL Injection if untrusted data is used as a StringAgg delimiter (e.g., in Django applications that offer downloads of data as a series of rows with a user-specified column delimiter). By passing a suitably crafted delimiter to a contrib.postgres.aggregates.StringAgg instance, it was possible to break escaping and inject malicious SQL.
https://dev.azure.com/FuseLabs/SDK_v4/_componentGovernance/112465/alert/2370216?typeId=4354877
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-applicationinsights/setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 REQUIRES = [
8 "applicationinsights==0.11.9",
9 "botbuilder-schema==4.15.0",
10 "botframework-connector==4.15.0",
11 "botbuilder-core==4.15.0",
12 ]
13 TESTS_REQUIRES = [
14 "aiounittest==1.3.0",
15 "django==2.2.6", # For samples
16 "djangorestframework==3.10.3", # For samples
17 "flask==1.1.1", # For samples
18 ]
19
20 root = os.path.abspath(os.path.dirname(__file__))
21
22 with open(os.path.join(root, "botbuilder", "applicationinsights", "about.py")) as f:
23 package_info = {}
24 info = f.read()
25 exec(info, package_info)
26
27 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
28 long_description = f.read()
29
30 setup(
31 name=package_info["__title__"],
32 version=package_info["__version__"],
33 url=package_info["__uri__"],
34 author=package_info["__author__"],
35 description=package_info["__description__"],
36 keywords=[
37 "BotBuilderApplicationInsights",
38 "bots",
39 "ai",
40 "botframework",
41 "botbuilder",
42 ],
43 long_description=long_description,
44 long_description_content_type="text/x-rst",
45 license=package_info["__license__"],
46 packages=[
47 "botbuilder.applicationinsights",
48 "botbuilder.applicationinsights.django",
49 "botbuilder.applicationinsights.flask",
50 "botbuilder.applicationinsights.processor",
51 ],
52 install_requires=REQUIRES + TESTS_REQUIRES,
53 tests_require=TESTS_REQUIRES,
54 include_package_data=True,
55 classifiers=[
56 "Programming Language :: Python :: 3.7",
57 "Intended Audience :: Developers",
58 "License :: OSI Approved :: MIT License",
59 "Operating System :: OS Independent",
60 "Development Status :: 5 - Production/Stable",
61 "Topic :: Scientific/Engineering :: Artificial Intelligence",
62 ],
63 )
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-applicationinsights/setup.py b/libraries/botbuilder-applicationinsights/setup.py
--- a/libraries/botbuilder-applicationinsights/setup.py
+++ b/libraries/botbuilder-applicationinsights/setup.py
@@ -12,7 +12,7 @@
]
TESTS_REQUIRES = [
"aiounittest==1.3.0",
- "django==2.2.6", # For samples
+ "django==2.2.10", # For samples
"djangorestframework==3.10.3", # For samples
"flask==1.1.1", # For samples
]
| {"golden_diff": "diff --git a/libraries/botbuilder-applicationinsights/setup.py b/libraries/botbuilder-applicationinsights/setup.py\n--- a/libraries/botbuilder-applicationinsights/setup.py\n+++ b/libraries/botbuilder-applicationinsights/setup.py\n@@ -12,7 +12,7 @@\n ]\n TESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n- \"django==2.2.6\", # For samples\n+ \"django==2.2.10\", # For samples\n \"djangorestframework==3.10.3\", # For samples\n \"flask==1.1.1\", # For samples\n ]\n", "issue": "Django Component Governance vulnerability\nDjango 1.11 before 1.11.28, 2.2 before 2.2.10, and 3.0 before 3.0.3 allows SQL Injection if untrusted data is used as a StringAgg delimiter (e.g., in Django applications that offer downloads of data as a series of rows with a user-specified column delimiter). By passing a suitably crafted delimiter to a contrib.postgres.aggregates.StringAgg instance, it was possible to break escaping and inject malicious SQL.\r\n\r\nhttps://dev.azure.com/FuseLabs/SDK_v4/_componentGovernance/112465/alert/2370216?typeId=4354877\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights==0.11.9\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n \"django==2.2.6\", # For samples\n \"djangorestframework==3.10.3\", # For samples\n \"flask==1.1.1\", # For samples\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"applicationinsights\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.applicationinsights\",\n \"botbuilder.applicationinsights.django\",\n \"botbuilder.applicationinsights.flask\",\n \"botbuilder.applicationinsights.processor\",\n ],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-applicationinsights/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights==0.11.9\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n \"django==2.2.10\", # For samples\n \"djangorestframework==3.10.3\", # For samples\n \"flask==1.1.1\", # For samples\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"applicationinsights\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.applicationinsights\",\n \"botbuilder.applicationinsights.django\",\n \"botbuilder.applicationinsights.flask\",\n \"botbuilder.applicationinsights.processor\",\n ],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-applicationinsights/setup.py"}]} | 1,032 | 152 |
gh_patches_debug_36006 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1366 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't configure source without arguments
Hello Team,
I'm trying to configure into HomeAssistant the source that I added Mamirolle info, but I have argument issues.
`args` is marked `required`, so if none is passed, the configuration is invalid.
```
Invalid config for [waste_collection_schedule]: required key not provided @ data['waste_collection_schedule']['sources'][0]['args']. Got None. (See /config/configuration.yaml, line 27).
```
If a dummy argument is passed. The configuration is valid but the source setup fails.
```
Error during setup of component waste_collection_schedule
Traceback (most recent call last):
File "/usr/src/homeassistant/homeassistant/setup.py", line 288, in _async_setup_component
result = await task
^^^^^^^^^^
File "/config/custom_components/waste_collection_schedule/__init__.py", line 109, in async_setup
api.add_source_shell(
File "/config/custom_components/waste_collection_schedule/__init__.py", line 202, in add_source_shell
SourceShell.create(
File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 196, in create
source = source_module.Source(**source_args) # type: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: Source() takes no arguments
```
I understand that we want the configuration to fail early but the real error will still be seen only when the source is actually instantiated. Because of that I think the arguments shouldn't be required.
What do you think about this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py`
Content:
```
1 import datetime
2
3 import requests
4 from bs4 import BeautifulSoup
5 from waste_collection_schedule import Collection
6
7 TITLE = "Mairie de Mamirolle"
8 DESCRIPTION = "Source script for mamirolle.info"
9 COUNTRY = "fr"
10 URL = "http://mamirolle.info/"
11
12 TEST_CASES = {"TestSource": {}}
13
14 ICON_MAP = {
15 "Poubelle grise": "mdi:trash-can",
16 "Poubelle jaune": "mdi:recycle",
17 }
18
19 MONTH_NAMES = [
20 "janvier",
21 "février",
22 "mars",
23 "avril",
24 "mai",
25 "juin",
26 "juillet",
27 "août",
28 "septembre",
29 "octobre",
30 "novembre",
31 "décembre",
32 ]
33
34
35 class Source:
36 def fetch(self):
37 now = datetime.datetime.now()
38 # get list of regions and weblinks
39 page = requests.get(URL)
40 # A lenient HTML parser is need
41 soup = BeautifulSoup(page.text.replace("<![endif]", ""), "html.parser")
42 trash_domestic = soup.find("i", class_="poubelle-grise")
43 _, day, month = trash_domestic.next_sibling.string.split()
44 date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
45 if date_domestic < now.date():
46 date_domestic = date_domestic.replace(year=date_domestic.year + 1)
47
48 trash_recycle = soup.find("i", class_="poubelle-jaune")
49 _, day, month = trash_recycle.next_sibling.string.split()
50 date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
51 if date_recycle < now.date():
52 date_recycle = date_recycle.replace(year=date_recycle.year + 1)
53
54 entries = [
55 Collection(
56 date=date_domestic,
57 t="Poubelle grise",
58 icon=ICON_MAP.get("Poubelle grise"),
59 ),
60 Collection(
61 date=date_recycle,
62 t="Poubelle jaune",
63 icon=ICON_MAP.get("Poubelle jaune"),
64 ),
65 ] # List that holds collection schedule
66
67 return entries
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py
@@ -9,7 +9,12 @@
COUNTRY = "fr"
URL = "http://mamirolle.info/"
-TEST_CASES = {"TestSource": {}}
+TEST_CASES = {
+ "TestSource": {},
+ "IgnoredArgument": {
+ "_": ""
+ }
+}
ICON_MAP = {
"Poubelle grise": "mdi:trash-can",
@@ -33,6 +38,9 @@
class Source:
+ def __init__(self, _=None):
+ pass
+
def fetch(self):
now = datetime.datetime.now()
# get list of regions and weblinks
@@ -40,28 +48,19 @@
# A lenient HTML parser is need
soup = BeautifulSoup(page.text.replace("<![endif]", ""), "html.parser")
trash_domestic = soup.find("i", class_="poubelle-grise")
- _, day, month = trash_domestic.next_sibling.string.split()
- date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
- if date_domestic < now.date():
- date_domestic = date_domestic.replace(year=date_domestic.year + 1)
-
trash_recycle = soup.find("i", class_="poubelle-jaune")
- _, day, month = trash_recycle.next_sibling.string.split()
- date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
- if date_recycle < now.date():
- date_recycle = date_recycle.replace(year=date_recycle.year + 1)
- entries = [
- Collection(
- date=date_domestic,
- t="Poubelle grise",
- icon=ICON_MAP.get("Poubelle grise"),
- ),
- Collection(
- date=date_recycle,
- t="Poubelle jaune",
- icon=ICON_MAP.get("Poubelle jaune"),
- ),
- ] # List that holds collection schedule
+ entries = [] # List that holds collection schedule
+ for trash, label in [(trash_domestic, "Poubelle grise"), (trash_recycle, "Poubelle jaune")]:
+ _, day, month = trash.next_sibling.string.split()
+ date = now.replace(month=MONTH_NAMES.index(month) + 1, day=int(day)).date()
+ if date < now.date():
+ date = date.replace(year=date.year + 1)
+
+ entries.append(Collection(
+ date=date,
+ t=label,
+ icon=ICON_MAP.get(label),
+ ))
return entries
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py\n@@ -9,7 +9,12 @@\n COUNTRY = \"fr\"\n URL = \"http://mamirolle.info/\"\n \n-TEST_CASES = {\"TestSource\": {}}\n+TEST_CASES = {\n+ \"TestSource\": {},\n+ \"IgnoredArgument\": {\n+ \"_\": \"\"\n+ }\n+}\n \n ICON_MAP = {\n \"Poubelle grise\": \"mdi:trash-can\",\n@@ -33,6 +38,9 @@\n \n \n class Source:\n+ def __init__(self, _=None):\n+ pass\n+\n def fetch(self):\n now = datetime.datetime.now()\n # get list of regions and weblinks\n@@ -40,28 +48,19 @@\n # A lenient HTML parser is need\n soup = BeautifulSoup(page.text.replace(\"<![endif]\", \"\"), \"html.parser\")\n trash_domestic = soup.find(\"i\", class_=\"poubelle-grise\")\n- _, day, month = trash_domestic.next_sibling.string.split()\n- date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n- if date_domestic < now.date():\n- date_domestic = date_domestic.replace(year=date_domestic.year + 1)\n-\n trash_recycle = soup.find(\"i\", class_=\"poubelle-jaune\")\n- _, day, month = trash_recycle.next_sibling.string.split()\n- date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n- if date_recycle < now.date():\n- date_recycle = date_recycle.replace(year=date_recycle.year + 1)\n \n- entries = [\n- Collection(\n- date=date_domestic,\n- t=\"Poubelle grise\",\n- icon=ICON_MAP.get(\"Poubelle grise\"),\n- ),\n- Collection(\n- date=date_recycle,\n- t=\"Poubelle jaune\",\n- icon=ICON_MAP.get(\"Poubelle jaune\"),\n- ),\n- ] # List that holds collection schedule\n+ entries = [] # List that holds collection schedule\n+ for trash, label in [(trash_domestic, \"Poubelle grise\"), (trash_recycle, \"Poubelle jaune\")]:\n+ _, day, month = trash.next_sibling.string.split()\n+ date = now.replace(month=MONTH_NAMES.index(month) + 1, day=int(day)).date()\n+ if date < now.date():\n+ date = date.replace(year=date.year + 1)\n+\n+ entries.append(Collection(\n+ date=date,\n+ t=label,\n+ icon=ICON_MAP.get(label),\n+ ))\n \n return entries\n", "issue": "Can't configure source without arguments\nHello Team,\r\nI'm trying to configure into HomeAssistant the source that I added Mamirolle info, but I have argument issues.\r\n\r\n`args` is marked `required`, so if none is passed, the configuration is invalid.\r\n\r\n```\r\nInvalid config for [waste_collection_schedule]: required key not provided @ data['waste_collection_schedule']['sources'][0]['args']. Got None. (See /config/configuration.yaml, line 27). \r\n```\r\n\r\nIf a dummy argument is passed. The configuration is valid but the source setup fails.\r\n```\r\nError during setup of component waste_collection_schedule\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/src/homeassistant/homeassistant/setup.py\", line 288, in _async_setup_component\r\n result = await task\r\n ^^^^^^^^^^\r\n File \"/config/custom_components/waste_collection_schedule/__init__.py\", line 109, in async_setup\r\n api.add_source_shell(\r\n File \"/config/custom_components/waste_collection_schedule/__init__.py\", line 202, in add_source_shell\r\n SourceShell.create(\r\n File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 196, in create\r\n source = source_module.Source(**source_args) # type: ignore\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nTypeError: Source() takes no arguments\r\n```\r\nI understand that we want the configuration to fail early but the real error will still be seen only when the source is actually instantiated. Because of that I think the arguments shouldn't be required.\r\n\r\nWhat do you think about this?\n", "before_files": [{"content": "import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Mairie de Mamirolle\"\nDESCRIPTION = \"Source script for mamirolle.info\"\nCOUNTRY = \"fr\"\nURL = \"http://mamirolle.info/\"\n\nTEST_CASES = {\"TestSource\": {}}\n\nICON_MAP = {\n \"Poubelle grise\": \"mdi:trash-can\",\n \"Poubelle jaune\": \"mdi:recycle\",\n}\n\nMONTH_NAMES = [\n \"janvier\",\n \"f\u00e9vrier\",\n \"mars\",\n \"avril\",\n \"mai\",\n \"juin\",\n \"juillet\",\n \"ao\u00fbt\",\n \"septembre\",\n \"octobre\",\n \"novembre\",\n \"d\u00e9cembre\",\n]\n\n\nclass Source:\n def fetch(self):\n now = datetime.datetime.now()\n # get list of regions and weblinks\n page = requests.get(URL)\n # A lenient HTML parser is need\n soup = BeautifulSoup(page.text.replace(\"<![endif]\", \"\"), \"html.parser\")\n trash_domestic = soup.find(\"i\", class_=\"poubelle-grise\")\n _, day, month = trash_domestic.next_sibling.string.split()\n date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n if date_domestic < now.date():\n date_domestic = date_domestic.replace(year=date_domestic.year + 1)\n\n trash_recycle = soup.find(\"i\", class_=\"poubelle-jaune\")\n _, day, month = trash_recycle.next_sibling.string.split()\n date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n if date_recycle < now.date():\n date_recycle = date_recycle.replace(year=date_recycle.year + 1)\n\n entries = [\n Collection(\n date=date_domestic,\n t=\"Poubelle grise\",\n icon=ICON_MAP.get(\"Poubelle grise\"),\n ),\n Collection(\n date=date_recycle,\n t=\"Poubelle jaune\",\n icon=ICON_MAP.get(\"Poubelle jaune\"),\n ),\n ] # List that holds collection schedule\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py"}], "after_files": [{"content": "import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Mairie de Mamirolle\"\nDESCRIPTION = \"Source script for mamirolle.info\"\nCOUNTRY = \"fr\"\nURL = \"http://mamirolle.info/\"\n\nTEST_CASES = {\n \"TestSource\": {},\n \"IgnoredArgument\": {\n \"_\": \"\"\n }\n}\n\nICON_MAP = {\n \"Poubelle grise\": \"mdi:trash-can\",\n \"Poubelle jaune\": \"mdi:recycle\",\n}\n\nMONTH_NAMES = [\n \"janvier\",\n \"f\u00e9vrier\",\n \"mars\",\n \"avril\",\n \"mai\",\n \"juin\",\n \"juillet\",\n \"ao\u00fbt\",\n \"septembre\",\n \"octobre\",\n \"novembre\",\n \"d\u00e9cembre\",\n]\n\n\nclass Source:\n def __init__(self, _=None):\n pass\n\n def fetch(self):\n now = datetime.datetime.now()\n # get list of regions and weblinks\n page = requests.get(URL)\n # A lenient HTML parser is need\n soup = BeautifulSoup(page.text.replace(\"<![endif]\", \"\"), \"html.parser\")\n trash_domestic = soup.find(\"i\", class_=\"poubelle-grise\")\n trash_recycle = soup.find(\"i\", class_=\"poubelle-jaune\")\n\n entries = [] # List that holds collection schedule\n for trash, label in [(trash_domestic, \"Poubelle grise\"), (trash_recycle, \"Poubelle jaune\")]:\n _, day, month = trash.next_sibling.string.split()\n date = now.replace(month=MONTH_NAMES.index(month) + 1, day=int(day)).date()\n if date < now.date():\n date = date.replace(year=date.year + 1)\n\n entries.append(Collection(\n date=date,\n t=label,\n icon=ICON_MAP.get(label),\n ))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py"}]} | 1,224 | 665 |
gh_patches_debug_5170 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2781 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Traceback appears in Status Bar, when trying to replay live flow
##### Steps to reproduce the problem:
1. Run **pathod** : `pathod -a "/=200:p0,10"`
2. Run mitmproxy.
3. Send _get request_ to pathod through mitmproxy using **pathoc**:
`pathoc -c localhost:9999 localhost:8080 'get:/'`
4. Try to replay the corresponding live flow in mitmproxy by pressing `r`.
I am seeing:

##### Any other comments? What have you tried so far?
This issue is relevant for the situations, when server didn't have time to send a response yet, but a user tries to replay the corresponding flow.
I also faced this issue, when trying to replay `mitm.it` flow from onboardingapp.
##### System information
Mitmproxy: 3.0.0.dev1101 (commit d9d4d15) binary
Python: 3.5.2
OpenSSL: OpenSSL 1.1.0g 2 Nov 2017
Platform: Linux-4.4.0-104-generic-x86_64-with-debian-stretch-sid
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/clientplayback.py`
Content:
```
1 from mitmproxy import exceptions
2 from mitmproxy import ctx
3 from mitmproxy import io
4 from mitmproxy import flow
5 from mitmproxy import command
6 import mitmproxy.types
7
8 import typing
9
10
11 class ClientPlayback:
12 def __init__(self):
13 self.flows = [] # type: typing.List[flow.Flow]
14 self.current_thread = None
15 self.configured = False
16
17 def count(self) -> int:
18 if self.current_thread:
19 current = 1
20 else:
21 current = 0
22 return current + len(self.flows)
23
24 @command.command("replay.client.stop")
25 def stop_replay(self) -> None:
26 """
27 Stop client replay.
28 """
29 self.flows = []
30 ctx.log.alert("Client replay stopped.")
31 ctx.master.addons.trigger("update", [])
32
33 @command.command("replay.client")
34 def start_replay(self, flows: typing.Sequence[flow.Flow]) -> None:
35 """
36 Replay requests from flows.
37 """
38 self.flows = list(flows)
39 ctx.log.alert("Replaying %s flows." % len(self.flows))
40 ctx.master.addons.trigger("update", [])
41
42 @command.command("replay.client.file")
43 def load_file(self, path: mitmproxy.types.Path) -> None:
44 try:
45 flows = io.read_flows_from_paths([path])
46 except exceptions.FlowReadException as e:
47 raise exceptions.CommandError(str(e))
48 ctx.log.alert("Replaying %s flows." % len(self.flows))
49 self.flows = flows
50 ctx.master.addons.trigger("update", [])
51
52 def configure(self, updated):
53 if not self.configured and ctx.options.client_replay:
54 self.configured = True
55 ctx.log.info("Client Replay: {}".format(ctx.options.client_replay))
56 try:
57 flows = io.read_flows_from_paths(ctx.options.client_replay)
58 except exceptions.FlowReadException as e:
59 raise exceptions.OptionsError(str(e))
60 self.start_replay(flows)
61
62 def tick(self):
63 current_is_done = self.current_thread and not self.current_thread.is_alive()
64 can_start_new = not self.current_thread or current_is_done
65 will_start_new = can_start_new and self.flows
66
67 if current_is_done:
68 self.current_thread = None
69 ctx.master.addons.trigger("update", [])
70 if will_start_new:
71 f = self.flows.pop(0)
72 self.current_thread = ctx.master.replay_request(f)
73 ctx.master.addons.trigger("update", [f])
74 if current_is_done and not will_start_new:
75 ctx.master.addons.trigger("processing_complete")
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/clientplayback.py b/mitmproxy/addons/clientplayback.py
--- a/mitmproxy/addons/clientplayback.py
+++ b/mitmproxy/addons/clientplayback.py
@@ -35,6 +35,9 @@
"""
Replay requests from flows.
"""
+ for f in flows:
+ if f.live:
+ raise exceptions.CommandError("Can't replay live flow.")
self.flows = list(flows)
ctx.log.alert("Replaying %s flows." % len(self.flows))
ctx.master.addons.trigger("update", [])
| {"golden_diff": "diff --git a/mitmproxy/addons/clientplayback.py b/mitmproxy/addons/clientplayback.py\n--- a/mitmproxy/addons/clientplayback.py\n+++ b/mitmproxy/addons/clientplayback.py\n@@ -35,6 +35,9 @@\n \"\"\"\n Replay requests from flows.\n \"\"\"\n+ for f in flows:\n+ if f.live:\n+ raise exceptions.CommandError(\"Can't replay live flow.\")\n self.flows = list(flows)\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n ctx.master.addons.trigger(\"update\", [])\n", "issue": "Traceback appears in Status Bar, when trying to replay live flow\n##### Steps to reproduce the problem:\r\n\r\n1. Run **pathod** : `pathod -a \"/=200:p0,10\"`\r\n2. Run mitmproxy.\r\n3. Send _get request_ to pathod through mitmproxy using **pathoc**: \r\n`pathoc -c localhost:9999 localhost:8080 'get:/'`\r\n4. Try to replay the corresponding live flow in mitmproxy by pressing `r`.\r\n\r\nI am seeing:\r\n\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\nThis issue is relevant for the situations, when server didn't have time to send a response yet, but a user tries to replay the corresponding flow.\r\nI also faced this issue, when trying to replay `mitm.it` flow from onboardingapp.\r\n\r\n\r\n##### System information\r\n\r\nMitmproxy: 3.0.0.dev1101 (commit d9d4d15) binary\r\nPython: 3.5.2\r\nOpenSSL: OpenSSL 1.1.0g 2 Nov 2017\r\nPlatform: Linux-4.4.0-104-generic-x86_64-with-debian-stretch-sid\r\n\r\n \n", "before_files": [{"content": "from mitmproxy import exceptions\nfrom mitmproxy import ctx\nfrom mitmproxy import io\nfrom mitmproxy import flow\nfrom mitmproxy import command\nimport mitmproxy.types\n\nimport typing\n\n\nclass ClientPlayback:\n def __init__(self):\n self.flows = [] # type: typing.List[flow.Flow]\n self.current_thread = None\n self.configured = False\n\n def count(self) -> int:\n if self.current_thread:\n current = 1\n else:\n current = 0\n return current + len(self.flows)\n\n @command.command(\"replay.client.stop\")\n def stop_replay(self) -> None:\n \"\"\"\n Stop client replay.\n \"\"\"\n self.flows = []\n ctx.log.alert(\"Client replay stopped.\")\n ctx.master.addons.trigger(\"update\", [])\n\n @command.command(\"replay.client\")\n def start_replay(self, flows: typing.Sequence[flow.Flow]) -> None:\n \"\"\"\n Replay requests from flows.\n \"\"\"\n self.flows = list(flows)\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n ctx.master.addons.trigger(\"update\", [])\n\n @command.command(\"replay.client.file\")\n def load_file(self, path: mitmproxy.types.Path) -> None:\n try:\n flows = io.read_flows_from_paths([path])\n except exceptions.FlowReadException as e:\n raise exceptions.CommandError(str(e))\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n self.flows = flows\n ctx.master.addons.trigger(\"update\", [])\n\n def configure(self, updated):\n if not self.configured and ctx.options.client_replay:\n self.configured = True\n ctx.log.info(\"Client Replay: {}\".format(ctx.options.client_replay))\n try:\n flows = io.read_flows_from_paths(ctx.options.client_replay)\n except exceptions.FlowReadException as e:\n raise exceptions.OptionsError(str(e))\n self.start_replay(flows)\n\n def tick(self):\n current_is_done = self.current_thread and not self.current_thread.is_alive()\n can_start_new = not self.current_thread or current_is_done\n will_start_new = can_start_new and self.flows\n\n if current_is_done:\n self.current_thread = None\n ctx.master.addons.trigger(\"update\", [])\n if will_start_new:\n f = self.flows.pop(0)\n self.current_thread = ctx.master.replay_request(f)\n ctx.master.addons.trigger(\"update\", [f])\n if current_is_done and not will_start_new:\n ctx.master.addons.trigger(\"processing_complete\")\n", "path": "mitmproxy/addons/clientplayback.py"}], "after_files": [{"content": "from mitmproxy import exceptions\nfrom mitmproxy import ctx\nfrom mitmproxy import io\nfrom mitmproxy import flow\nfrom mitmproxy import command\nimport mitmproxy.types\n\nimport typing\n\n\nclass ClientPlayback:\n def __init__(self):\n self.flows = [] # type: typing.List[flow.Flow]\n self.current_thread = None\n self.configured = False\n\n def count(self) -> int:\n if self.current_thread:\n current = 1\n else:\n current = 0\n return current + len(self.flows)\n\n @command.command(\"replay.client.stop\")\n def stop_replay(self) -> None:\n \"\"\"\n Stop client replay.\n \"\"\"\n self.flows = []\n ctx.log.alert(\"Client replay stopped.\")\n ctx.master.addons.trigger(\"update\", [])\n\n @command.command(\"replay.client\")\n def start_replay(self, flows: typing.Sequence[flow.Flow]) -> None:\n \"\"\"\n Replay requests from flows.\n \"\"\"\n for f in flows:\n if f.live:\n raise exceptions.CommandError(\"Can't replay live flow.\")\n self.flows = list(flows)\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n ctx.master.addons.trigger(\"update\", [])\n\n @command.command(\"replay.client.file\")\n def load_file(self, path: mitmproxy.types.Path) -> None:\n try:\n flows = io.read_flows_from_paths([path])\n except exceptions.FlowReadException as e:\n raise exceptions.CommandError(str(e))\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n self.flows = flows\n ctx.master.addons.trigger(\"update\", [])\n\n def configure(self, updated):\n if not self.configured and ctx.options.client_replay:\n self.configured = True\n ctx.log.info(\"Client Replay: {}\".format(ctx.options.client_replay))\n try:\n flows = io.read_flows_from_paths(ctx.options.client_replay)\n except exceptions.FlowReadException as e:\n raise exceptions.OptionsError(str(e))\n self.start_replay(flows)\n\n def tick(self):\n current_is_done = self.current_thread and not self.current_thread.is_alive()\n can_start_new = not self.current_thread or current_is_done\n will_start_new = can_start_new and self.flows\n\n if current_is_done:\n self.current_thread = None\n ctx.master.addons.trigger(\"update\", [])\n if will_start_new:\n f = self.flows.pop(0)\n self.current_thread = ctx.master.replay_request(f)\n ctx.master.addons.trigger(\"update\", [f])\n if current_is_done and not will_start_new:\n ctx.master.addons.trigger(\"processing_complete\")\n", "path": "mitmproxy/addons/clientplayback.py"}]} | 1,333 | 132 |
gh_patches_debug_9395 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-741 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix versioning on dependencies
Fix dependency package versions to be consistent with the rest of the libraries
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 REQUIRES = [
8 "applicationinsights>=0.11.9",
9 "botbuilder-schema>=4.4.0b1",
10 "botframework-connector>=4.4.0b1",
11 "botbuilder-core>=4.4.0b1",
12 "botbuilder-applicationinsights>=4.4.0b1",
13 ]
14 TESTS_REQUIRES = [
15 "aiounittest==1.3.0",
16 "aiohttp==3.5.4",
17 ]
18
19 root = os.path.abspath(os.path.dirname(__file__))
20
21 with open(
22 os.path.join(
23 root, "botbuilder", "integration", "applicationinsights", "aiohttp", "about.py"
24 )
25 ) as f:
26 package_info = {}
27 info = f.read()
28 exec(info, package_info)
29
30 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
31 long_description = f.read()
32
33 setup(
34 name=package_info["__title__"],
35 version=package_info["__version__"],
36 url=package_info["__uri__"],
37 author=package_info["__author__"],
38 description=package_info["__description__"],
39 keywords=[
40 "BotBuilderApplicationInsights",
41 "bots",
42 "ai",
43 "botframework",
44 "botbuilder",
45 "aiohttp",
46 ],
47 long_description=long_description,
48 long_description_content_type="text/x-rst",
49 license=package_info["__license__"],
50 packages=["botbuilder.integration.applicationinsights.aiohttp"],
51 install_requires=REQUIRES + TESTS_REQUIRES,
52 tests_require=TESTS_REQUIRES,
53 include_package_data=True,
54 classifiers=[
55 "Programming Language :: Python :: 3.7",
56 "Intended Audience :: Developers",
57 "License :: OSI Approved :: MIT License",
58 "Operating System :: OS Independent",
59 "Development Status :: 5 - Production/Stable",
60 "Topic :: Scientific/Engineering :: Artificial Intelligence",
61 ],
62 )
63
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py
--- a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py
+++ b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py
@@ -6,14 +6,14 @@
REQUIRES = [
"applicationinsights>=0.11.9",
- "botbuilder-schema>=4.4.0b1",
- "botframework-connector>=4.4.0b1",
- "botbuilder-core>=4.4.0b1",
- "botbuilder-applicationinsights>=4.4.0b1",
+ "aiohttp==3.6.2",
+ "botbuilder-schema>=4.7.1",
+ "botframework-connector>=4.7.1",
+ "botbuilder-core>=4.7.1",
+ "botbuilder-applicationinsights>=4.7.1",
]
TESTS_REQUIRES = [
"aiounittest==1.3.0",
- "aiohttp==3.5.4",
]
root = os.path.abspath(os.path.dirname(__file__))
| {"golden_diff": "diff --git a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n--- a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n+++ b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n@@ -6,14 +6,14 @@\n \n REQUIRES = [\n \"applicationinsights>=0.11.9\",\n- \"botbuilder-schema>=4.4.0b1\",\n- \"botframework-connector>=4.4.0b1\",\n- \"botbuilder-core>=4.4.0b1\",\n- \"botbuilder-applicationinsights>=4.4.0b1\",\n+ \"aiohttp==3.6.2\",\n+ \"botbuilder-schema>=4.7.1\",\n+ \"botframework-connector>=4.7.1\",\n+ \"botbuilder-core>=4.7.1\",\n+ \"botbuilder-applicationinsights>=4.7.1\",\n ]\n TESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n- \"aiohttp==3.5.4\",\n ]\n \n root = os.path.abspath(os.path.dirname(__file__))\n", "issue": "Fix versioning on dependencies\nFix dependency package versions to be consistent with the rest of the libraries\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights>=0.11.9\",\n \"botbuilder-schema>=4.4.0b1\",\n \"botframework-connector>=4.4.0b1\",\n \"botbuilder-core>=4.4.0b1\",\n \"botbuilder-applicationinsights>=4.4.0b1\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n \"aiohttp==3.5.4\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(\n os.path.join(\n root, \"botbuilder\", \"integration\", \"applicationinsights\", \"aiohttp\", \"about.py\"\n )\n) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n \"aiohttp\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.integration.applicationinsights.aiohttp\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights>=0.11.9\",\n \"aiohttp==3.6.2\",\n \"botbuilder-schema>=4.7.1\",\n \"botframework-connector>=4.7.1\",\n \"botbuilder-core>=4.7.1\",\n \"botbuilder-applicationinsights>=4.7.1\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(\n os.path.join(\n root, \"botbuilder\", \"integration\", \"applicationinsights\", \"aiohttp\", \"about.py\"\n )\n) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n \"aiohttp\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.integration.applicationinsights.aiohttp\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py"}]} | 865 | 278 |
gh_patches_debug_726 | rasdani/github-patches | git_diff | dotkom__onlineweb4-425 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"Startet studie" in Profile -> Medlemskap requires defined format without specifying it
"Started studie" is a datefield. The problem is that most browsers (like FF, Chrome) don't render these fields with any additional tools which makes filling them out a pain in the ass (Safari@iOS has that fancy datepicker-shit).
The field requires the format 'yyyy-mm-dd', but does not specify this anywhere. This should be fixed somehow.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/profiles/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from django import forms
4 from django.utils.translation import ugettext as _
5
6 from apps.profiles.models import Privacy
7 from apps.authentication.models import OnlineUser, FIELD_OF_STUDY_CHOICES
8
9 class ProfileForm(forms.ModelForm):
10
11 class Meta:
12 model = OnlineUser
13
14 fields = ['nickname', 'website', 'phone_number', 'address', 'zip_code', 'allergies', 'mark_rules', ]
15 widgets = {
16 'allergies' : forms.Textarea(attrs={'id' : 'allergies'}),
17 }
18
19 def clean(self):
20 super(ProfileForm, self).clean()
21
22 cleaned_data = self.cleaned_data
23
24 # ZIP code digits only
25 zip_code = cleaned_data['zip_code']
26 if len(zip_code) != 0 and (len(zip_code) != 4 or not zip_code.isdigit()):
27 self._errors['zip_code'] = self.error_class([_(u"Postnummer må bestå av fire siffer.")])
28
29 return cleaned_data
30
31 class ImageForm(forms.ModelForm):
32
33 class Meta:
34 model = OnlineUser
35
36 fields = ['image']
37 widgets = {
38 'image': forms.FileInput(attrs={'class' : 'hidden-input', 'id' : 'image'}),
39 }
40
41 class PrivacyForm(forms.ModelForm):
42
43 class Meta:
44 model = Privacy
45 exclude = ['user']
46
47
48 class MailSettingsForm(forms.ModelForm):
49
50 class Meta:
51 model = OnlineUser
52 fields = ['infomail', ]
53
54
55 class MembershipSettingsForm(forms.ModelForm):
56
57 def __init__(self, *args, **kwargs):
58 super(MembershipSettingsForm, self).__init__(*args, **kwargs)
59 self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'
60
61 class Meta:
62 model = OnlineUser
63 fields = ['field_of_study', 'started_date', ]
64
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py
--- a/apps/profiles/forms.py
+++ b/apps/profiles/forms.py
@@ -61,3 +61,7 @@
class Meta:
model = OnlineUser
fields = ['field_of_study', 'started_date', ]
+
+ widgets = {
+ 'started_date' : forms.TextInput(attrs={'placeholder' : 'YYYY-MM-DD'}),
+ }
| {"golden_diff": "diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py\n--- a/apps/profiles/forms.py\n+++ b/apps/profiles/forms.py\n@@ -61,3 +61,7 @@\n class Meta:\n model = OnlineUser\n fields = ['field_of_study', 'started_date', ]\n+\n+ widgets = {\n+ 'started_date' : forms.TextInput(attrs={'placeholder' : 'YYYY-MM-DD'}),\n+ }\n", "issue": "\"Startet studie\" in Profile -> Medlemskap requires defined format without specifying it\n\"Started studie\" is a datefield. The problem is that most browsers (like FF, Chrome) don't render these fields with any additional tools which makes filling them out a pain in the ass (Safari@iOS has that fancy datepicker-shit).\n\nThe field requires the format 'yyyy-mm-dd', but does not specify this anywhere. This should be fixed somehow.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\n\nfrom apps.profiles.models import Privacy\nfrom apps.authentication.models import OnlineUser, FIELD_OF_STUDY_CHOICES\n\nclass ProfileForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n\n fields = ['nickname', 'website', 'phone_number', 'address', 'zip_code', 'allergies', 'mark_rules', ]\n widgets = {\n 'allergies' : forms.Textarea(attrs={'id' : 'allergies'}),\n }\n\n def clean(self):\n super(ProfileForm, self).clean()\n\n cleaned_data = self.cleaned_data\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 0 and (len(zip_code) != 4 or not zip_code.isdigit()):\n self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data\n\nclass ImageForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n\n fields = ['image']\n widgets = {\n 'image': forms.FileInput(attrs={'class' : 'hidden-input', 'id' : 'image'}),\n }\n\nclass PrivacyForm(forms.ModelForm):\n\n class Meta:\n model = Privacy\n exclude = ['user']\n\n\nclass MailSettingsForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta:\n model = OnlineUser\n fields = ['field_of_study', 'started_date', ]\n", "path": "apps/profiles/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\n\nfrom apps.profiles.models import Privacy\nfrom apps.authentication.models import OnlineUser, FIELD_OF_STUDY_CHOICES\n\nclass ProfileForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n\n fields = ['nickname', 'website', 'phone_number', 'address', 'zip_code', 'allergies', 'mark_rules', ]\n widgets = {\n 'allergies' : forms.Textarea(attrs={'id' : 'allergies'}),\n }\n\n def clean(self):\n super(ProfileForm, self).clean()\n\n cleaned_data = self.cleaned_data\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 0 and (len(zip_code) != 4 or not zip_code.isdigit()):\n self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data\n\nclass ImageForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n\n fields = ['image']\n widgets = {\n 'image': forms.FileInput(attrs={'class' : 'hidden-input', 'id' : 'image'}),\n }\n\nclass PrivacyForm(forms.ModelForm):\n\n class Meta:\n model = Privacy\n exclude = ['user']\n\n\nclass MailSettingsForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta:\n model = OnlineUser\n fields = ['field_of_study', 'started_date', ]\n\n widgets = {\n 'started_date' : forms.TextInput(attrs={'placeholder' : 'YYYY-MM-DD'}),\n }\n", "path": "apps/profiles/forms.py"}]} | 882 | 95 |
gh_patches_debug_35056 | rasdani/github-patches | git_diff | opsdroid__opsdroid-142 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make crontab parser timezone aware
The crontab matcher should take a timezone as a kwarg. It should also be possible to set a global timezone in the config. Default should be UTC.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/matchers.py`
Content:
```
1 """Decorator functions to use when creating skill modules."""
2
3 import logging
4
5 from opsdroid.helper import get_opsdroid
6 from opsdroid.web import Web
7
8
9 _LOGGER = logging.getLogger(__name__)
10
11
12 def match_regex(regex):
13 """Return regex match decorator."""
14 def matcher(func):
15 """Add decorated function to skills list for regex matching."""
16 opsdroid = get_opsdroid()
17 opsdroid.skills.append({"regex": regex, "skill": func,
18 "config":
19 opsdroid.loader.current_import_config})
20 return func
21 return matcher
22
23
24 def match_apiai_action(action):
25 """Return apiai action match decorator."""
26 def matcher(func):
27 """Add decorated function to skills list for apiai matching."""
28 opsdroid = get_opsdroid()
29 opsdroid.skills.append({"apiai_action": action, "skill": func,
30 "config":
31 opsdroid.loader.current_import_config})
32 return func
33 return matcher
34
35
36 def match_apiai_intent(intent):
37 """Return apiai intent match decorator."""
38 def matcher(func):
39 """Add decorated function to skills list for apiai matching."""
40 opsdroid = get_opsdroid()
41 opsdroid.skills.append({"apiai_intent": intent, "skill": func,
42 "config":
43 opsdroid.loader.current_import_config})
44 return func
45 return matcher
46
47
48 def match_crontab(crontab):
49 """Return crontab match decorator."""
50 def matcher(func):
51 """Add decorated function to skills list for crontab matching."""
52 opsdroid = get_opsdroid()
53 opsdroid.skills.append({"crontab": crontab, "skill": func,
54 "config":
55 opsdroid.loader.current_import_config})
56 return func
57 return matcher
58
59
60 def match_webhook(webhook):
61 """Return webhook match decorator."""
62 def matcher(func):
63 """Add decorated function to skills list for webhook matching."""
64 opsdroid = get_opsdroid()
65 config = opsdroid.loader.current_import_config
66 opsdroid.skills.append({"webhook": webhook, "skill": func,
67 "config": config})
68
69 async def wrapper(req, opsdroid=opsdroid, config=config):
70 """Wrap up the aiohttp handler."""
71 _LOGGER.info("Running skill %s via webhook", webhook)
72 opsdroid.stats["webhooks_called"] = \
73 opsdroid.stats["webhooks_called"] + 1
74 await func(opsdroid, config, req)
75 return Web.build_response(200, {"called_skill": webhook})
76
77 opsdroid.web_server.web_app.router.add_post(
78 "/skill/{}/{}".format(config["name"], webhook), wrapper)
79 opsdroid.web_server.web_app.router.add_post(
80 "/skill/{}/{}/".format(config["name"], webhook), wrapper)
81
82 return func
83 return matcher
84
```
Path: `opsdroid/parsers/crontab.py`
Content:
```
1 """A helper function for parsing and executing crontab skills."""
2
3 import logging
4 import asyncio
5 from datetime import datetime
6
7 import pycron
8
9
10 _LOGGER = logging.getLogger(__name__)
11
12
13 async def parse_crontab(opsdroid):
14 """Parse all crontab skills against the current time."""
15 # pylint: disable=broad-except
16 # We want to catch all exceptions coming from a skill module and not
17 # halt the application. If a skill throws an exception it just doesn't
18 # give a response to the user, so an error response should be given.
19 while opsdroid.eventloop.is_running():
20 await asyncio.sleep(60 - datetime.now().time().second)
21 _LOGGER.debug("Running crontab skills")
22 for skill in opsdroid.skills:
23 if "crontab" in skill and pycron.is_now(skill["crontab"]):
24 try:
25 await skill["skill"](opsdroid, skill["config"], None)
26 except Exception:
27 _LOGGER.exception("Exception when executing cron skill.")
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/matchers.py b/opsdroid/matchers.py
--- a/opsdroid/matchers.py
+++ b/opsdroid/matchers.py
@@ -45,14 +45,14 @@
return matcher
-def match_crontab(crontab):
+def match_crontab(crontab, timezone=None):
"""Return crontab match decorator."""
def matcher(func):
"""Add decorated function to skills list for crontab matching."""
opsdroid = get_opsdroid()
+ config = opsdroid.loader.current_import_config
opsdroid.skills.append({"crontab": crontab, "skill": func,
- "config":
- opsdroid.loader.current_import_config})
+ "config": config, "timezone": timezone})
return func
return matcher
diff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py
--- a/opsdroid/parsers/crontab.py
+++ b/opsdroid/parsers/crontab.py
@@ -1,9 +1,9 @@
"""A helper function for parsing and executing crontab skills."""
-import logging
import asyncio
-from datetime import datetime
+import logging
+import arrow
import pycron
@@ -17,11 +17,17 @@
# halt the application. If a skill throws an exception it just doesn't
# give a response to the user, so an error response should be given.
while opsdroid.eventloop.is_running():
- await asyncio.sleep(60 - datetime.now().time().second)
+ await asyncio.sleep(60 - arrow.now().time().second)
_LOGGER.debug("Running crontab skills")
for skill in opsdroid.skills:
- if "crontab" in skill and pycron.is_now(skill["crontab"]):
- try:
- await skill["skill"](opsdroid, skill["config"], None)
- except Exception:
- _LOGGER.exception("Exception when executing cron skill.")
+ if "crontab" in skill:
+ if skill["timezone"] is not None:
+ timezone = skill["timezone"]
+ else:
+ timezone = opsdroid.config.get("timezone", "UTC")
+ if pycron.is_now(skill["crontab"], arrow.now(tz=timezone)):
+ try:
+ await skill["skill"](opsdroid, skill["config"], None)
+ except Exception:
+ _LOGGER.exception(
+ "Exception when executing cron skill.")
| {"golden_diff": "diff --git a/opsdroid/matchers.py b/opsdroid/matchers.py\n--- a/opsdroid/matchers.py\n+++ b/opsdroid/matchers.py\n@@ -45,14 +45,14 @@\n return matcher\n \n \n-def match_crontab(crontab):\n+def match_crontab(crontab, timezone=None):\n \"\"\"Return crontab match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for crontab matching.\"\"\"\n opsdroid = get_opsdroid()\n+ config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"crontab\": crontab, \"skill\": func,\n- \"config\":\n- opsdroid.loader.current_import_config})\n+ \"config\": config, \"timezone\": timezone})\n return func\n return matcher\n \ndiff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py\n--- a/opsdroid/parsers/crontab.py\n+++ b/opsdroid/parsers/crontab.py\n@@ -1,9 +1,9 @@\n \"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n \n-import logging\n import asyncio\n-from datetime import datetime\n+import logging\n \n+import arrow\n import pycron\n \n \n@@ -17,11 +17,17 @@\n # halt the application. If a skill throws an exception it just doesn't\n # give a response to the user, so an error response should be given.\n while opsdroid.eventloop.is_running():\n- await asyncio.sleep(60 - datetime.now().time().second)\n+ await asyncio.sleep(60 - arrow.now().time().second)\n _LOGGER.debug(\"Running crontab skills\")\n for skill in opsdroid.skills:\n- if \"crontab\" in skill and pycron.is_now(skill[\"crontab\"]):\n- try:\n- await skill[\"skill\"](opsdroid, skill[\"config\"], None)\n- except Exception:\n- _LOGGER.exception(\"Exception when executing cron skill.\")\n+ if \"crontab\" in skill:\n+ if skill[\"timezone\"] is not None:\n+ timezone = skill[\"timezone\"]\n+ else:\n+ timezone = opsdroid.config.get(\"timezone\", \"UTC\")\n+ if pycron.is_now(skill[\"crontab\"], arrow.now(tz=timezone)):\n+ try:\n+ await skill[\"skill\"](opsdroid, skill[\"config\"], None)\n+ except Exception:\n+ _LOGGER.exception(\n+ \"Exception when executing cron skill.\")\n", "issue": "Make crontab parser timezone aware\nThe crontab matcher should take a timezone as a kwarg. It should also be possible to set a global timezone in the config. Default should be UTC.\n", "before_files": [{"content": "\"\"\"Decorator functions to use when creating skill modules.\"\"\"\n\nimport logging\n\nfrom opsdroid.helper import get_opsdroid\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef match_regex(regex):\n \"\"\"Return regex match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for regex matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"regex\": regex, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_action(action):\n \"\"\"Return apiai action match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_action\": action, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_intent(intent):\n \"\"\"Return apiai intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_intent\": intent, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_crontab(crontab):\n \"\"\"Return crontab match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for crontab matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"crontab\": crontab, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_webhook(webhook):\n \"\"\"Return webhook match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for webhook matching.\"\"\"\n opsdroid = get_opsdroid()\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"webhook\": webhook, \"skill\": func,\n \"config\": config})\n\n async def wrapper(req, opsdroid=opsdroid, config=config):\n \"\"\"Wrap up the aiohttp handler.\"\"\"\n _LOGGER.info(\"Running skill %s via webhook\", webhook)\n opsdroid.stats[\"webhooks_called\"] = \\\n opsdroid.stats[\"webhooks_called\"] + 1\n await func(opsdroid, config, req)\n return Web.build_response(200, {\"called_skill\": webhook})\n\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}\".format(config[\"name\"], webhook), wrapper)\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}/\".format(config[\"name\"], webhook), wrapper)\n\n return func\n return matcher\n", "path": "opsdroid/matchers.py"}, {"content": "\"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n\nimport logging\nimport asyncio\nfrom datetime import datetime\n\nimport pycron\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_crontab(opsdroid):\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n # pylint: disable=broad-except\n # We want to catch all exceptions coming from a skill module and not\n # halt the application. If a skill throws an exception it just doesn't\n # give a response to the user, so an error response should be given.\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - datetime.now().time().second)\n _LOGGER.debug(\"Running crontab skills\")\n for skill in opsdroid.skills:\n if \"crontab\" in skill and pycron.is_now(skill[\"crontab\"]):\n try:\n await skill[\"skill\"](opsdroid, skill[\"config\"], None)\n except Exception:\n _LOGGER.exception(\"Exception when executing cron skill.\")\n", "path": "opsdroid/parsers/crontab.py"}], "after_files": [{"content": "\"\"\"Decorator functions to use when creating skill modules.\"\"\"\n\nimport logging\n\nfrom opsdroid.helper import get_opsdroid\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef match_regex(regex):\n \"\"\"Return regex match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for regex matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"regex\": regex, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_action(action):\n \"\"\"Return apiai action match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_action\": action, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_intent(intent):\n \"\"\"Return apiai intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_intent\": intent, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_crontab(crontab, timezone=None):\n \"\"\"Return crontab match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for crontab matching.\"\"\"\n opsdroid = get_opsdroid()\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"crontab\": crontab, \"skill\": func,\n \"config\": config, \"timezone\": timezone})\n return func\n return matcher\n\n\ndef match_webhook(webhook):\n \"\"\"Return webhook match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for webhook matching.\"\"\"\n opsdroid = get_opsdroid()\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"webhook\": webhook, \"skill\": func,\n \"config\": config})\n\n async def wrapper(req, opsdroid=opsdroid, config=config):\n \"\"\"Wrap up the aiohttp handler.\"\"\"\n _LOGGER.info(\"Running skill %s via webhook\", webhook)\n opsdroid.stats[\"webhooks_called\"] = \\\n opsdroid.stats[\"webhooks_called\"] + 1\n await func(opsdroid, config, req)\n return Web.build_response(200, {\"called_skill\": webhook})\n\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}\".format(config[\"name\"], webhook), wrapper)\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}/\".format(config[\"name\"], webhook), wrapper)\n\n return func\n return matcher\n", "path": "opsdroid/matchers.py"}, {"content": "\"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n\nimport asyncio\nimport logging\n\nimport arrow\nimport pycron\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_crontab(opsdroid):\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n # pylint: disable=broad-except\n # We want to catch all exceptions coming from a skill module and not\n # halt the application. If a skill throws an exception it just doesn't\n # give a response to the user, so an error response should be given.\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - arrow.now().time().second)\n _LOGGER.debug(\"Running crontab skills\")\n for skill in opsdroid.skills:\n if \"crontab\" in skill:\n if skill[\"timezone\"] is not None:\n timezone = skill[\"timezone\"]\n else:\n timezone = opsdroid.config.get(\"timezone\", \"UTC\")\n if pycron.is_now(skill[\"crontab\"], arrow.now(tz=timezone)):\n try:\n await skill[\"skill\"](opsdroid, skill[\"config\"], None)\n except Exception:\n _LOGGER.exception(\n \"Exception when executing cron skill.\")\n", "path": "opsdroid/parsers/crontab.py"}]} | 1,380 | 573 |
gh_patches_debug_37008 | rasdani/github-patches | git_diff | great-expectations__great_expectations-2966 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/rule_based_profiler/profiler.py`
Content:
```
1 import uuid
2 from typing import Dict, List, Optional, Union
3
4 import great_expectations.exceptions as ge_exceptions
5 from great_expectations import DataContext
6 from great_expectations.core import ExpectationConfiguration, ExpectationSuite
7 from great_expectations.data_context.util import instantiate_class_from_config
8 from great_expectations.rule_based_profiler.domain_builder.domain_builder import (
9 DomainBuilder,
10 )
11 from great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import (
12 ExpectationConfigurationBuilder,
13 )
14 from great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (
15 ParameterBuilder,
16 )
17 from great_expectations.rule_based_profiler.parameter_builder.parameter_container import (
18 ParameterContainer,
19 build_parameter_container_for_variables,
20 )
21 from great_expectations.rule_based_profiler.rule.rule import Rule
22
23
24 class Profiler:
25 """
26 Profiler object serves to profile, or automatically evaluate a set of rules, upon a given
27 batch / multiple batches of data.
28 """
29
30 def __init__(
31 self,
32 *,
33 profiler_config: Optional[Dict[str, Dict[str, Dict]]] = None,
34 data_context: Optional[DataContext] = None,
35 ):
36 """
37 Create a new Profiler using configured rules.
38 For a rule or an item in a rule configuration, instantiates the following if
39 available: a domain builder, a parameter builder, and a configuration builder.
40 These will be used to define profiler computation patterns.
41
42 Args:
43 profiler_config: Variables and Rules configuration as a dictionary
44 data_context: DataContext object that defines a full runtime environment (data access, etc.)
45 """
46 self._data_context = data_context
47 self._rules = []
48
49 rules_configs: Dict[str, Dict] = profiler_config.get("rules", {})
50 rule_name: str
51 rule_config: dict
52
53 for rule_name, rule_config in rules_configs.items():
54 domain_builder_config: dict = rule_config.get("domain_builder")
55
56 if domain_builder_config is None:
57 raise ge_exceptions.ProfilerConfigurationError(
58 message=f'Invalid rule "{rule_name}": no domain_builder found.'
59 )
60
61 domain_builder: DomainBuilder = instantiate_class_from_config(
62 config=domain_builder_config,
63 runtime_environment={"data_context": data_context},
64 config_defaults={
65 "module_name": "great_expectations.rule_based_profiler.domain_builder"
66 },
67 )
68
69 parameter_builders: List[ParameterBuilder] = []
70
71 parameter_builder_configs: dict = rule_config.get("parameter_builders")
72
73 if parameter_builder_configs:
74 parameter_builder_config: dict
75 for parameter_builder_config in parameter_builder_configs:
76 parameter_builders.append(
77 instantiate_class_from_config(
78 config=parameter_builder_config,
79 runtime_environment={"data_context": data_context},
80 config_defaults={
81 "module_name": "great_expectations.rule_based_profiler.parameter_builder"
82 },
83 )
84 )
85
86 expectation_configuration_builders: List[
87 ExpectationConfigurationBuilder
88 ] = []
89
90 expectation_configuration_builder_configs: dict = rule_config.get(
91 "expectation_configuration_builders"
92 )
93
94 if expectation_configuration_builder_configs:
95 expectation_configuration_builder_config: dict
96 for (
97 expectation_configuration_builder_config
98 ) in expectation_configuration_builder_configs:
99 expectation_configuration_builders.append(
100 instantiate_class_from_config(
101 config=expectation_configuration_builder_config,
102 runtime_environment={},
103 config_defaults={
104 "class_name": "DefaultExpectationConfigurationBuilder",
105 "module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
106 },
107 )
108 )
109
110 variables_configs: Dict[str, Dict] = profiler_config.get("variables", {})
111 variables: Optional[ParameterContainer] = None
112
113 if variables_configs:
114 variables = build_parameter_container_for_variables(
115 variables_configs=variables_configs
116 )
117
118 self._rules.append(
119 Rule(
120 name=rule_name,
121 domain_builder=domain_builder,
122 parameter_builders=parameter_builders,
123 expectation_configuration_builders=expectation_configuration_builders,
124 variables=variables,
125 )
126 )
127
128 def profile(
129 self,
130 *,
131 expectation_suite_name: Optional[str] = None,
132 ) -> ExpectationSuite:
133 """
134 Args:
135 :param expectation_suite_name: A name for returned Expectation suite.
136 :return: Set of rule evaluation results in the form of an ExpectationSuite
137 """
138 if expectation_suite_name is None:
139 expectation_suite_name = (
140 f"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}"
141 )
142
143 expectation_suite: ExpectationSuite = ExpectationSuite(
144 expectation_suite_name=expectation_suite_name
145 )
146
147 rule: Rule
148 for rule in self._rules:
149 expectation_configurations: List[ExpectationConfiguration] = rule.generate()
150 expectation_configuration: ExpectationConfiguration
151 for expectation_configuration in expectation_configurations:
152 expectation_suite.add_expectation(
153 expectation_configuration=expectation_configuration
154 )
155
156 return expectation_suite
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/great_expectations/rule_based_profiler/profiler.py b/great_expectations/rule_based_profiler/profiler.py
--- a/great_expectations/rule_based_profiler/profiler.py
+++ b/great_expectations/rule_based_profiler/profiler.py
@@ -43,10 +43,11 @@
profiler_config: Variables and Rules configuration as a dictionary
data_context: DataContext object that defines a full runtime environment (data access, etc.)
"""
+ self._profiler_config = profiler_config
self._data_context = data_context
self._rules = []
- rules_configs: Dict[str, Dict] = profiler_config.get("rules", {})
+ rules_configs: Dict[str, Dict] = self._profiler_config.get("rules", {})
rule_name: str
rule_config: dict
@@ -107,7 +108,9 @@
)
)
- variables_configs: Dict[str, Dict] = profiler_config.get("variables", {})
+ variables_configs: Dict[str, Dict] = self._profiler_config.get(
+ "variables", {}
+ )
variables: Optional[ParameterContainer] = None
if variables_configs:
@@ -129,10 +132,12 @@
self,
*,
expectation_suite_name: Optional[str] = None,
+ include_citation: bool = True,
) -> ExpectationSuite:
"""
Args:
:param expectation_suite_name: A name for returned Expectation suite.
+ :param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler
:return: Set of rule evaluation results in the form of an ExpectationSuite
"""
if expectation_suite_name is None:
@@ -144,6 +149,12 @@
expectation_suite_name=expectation_suite_name
)
+ if include_citation:
+ expectation_suite.add_citation(
+ comment="Suite created by Rule-Based Profiler with the following config",
+ profiler_config=self._profiler_config,
+ )
+
rule: Rule
for rule in self._rules:
expectation_configurations: List[ExpectationConfiguration] = rule.generate()
| {"golden_diff": "diff --git a/great_expectations/rule_based_profiler/profiler.py b/great_expectations/rule_based_profiler/profiler.py\n--- a/great_expectations/rule_based_profiler/profiler.py\n+++ b/great_expectations/rule_based_profiler/profiler.py\n@@ -43,10 +43,11 @@\n profiler_config: Variables and Rules configuration as a dictionary\n data_context: DataContext object that defines a full runtime environment (data access, etc.)\n \"\"\"\n+ self._profiler_config = profiler_config\n self._data_context = data_context\n self._rules = []\n \n- rules_configs: Dict[str, Dict] = profiler_config.get(\"rules\", {})\n+ rules_configs: Dict[str, Dict] = self._profiler_config.get(\"rules\", {})\n rule_name: str\n rule_config: dict\n \n@@ -107,7 +108,9 @@\n )\n )\n \n- variables_configs: Dict[str, Dict] = profiler_config.get(\"variables\", {})\n+ variables_configs: Dict[str, Dict] = self._profiler_config.get(\n+ \"variables\", {}\n+ )\n variables: Optional[ParameterContainer] = None\n \n if variables_configs:\n@@ -129,10 +132,12 @@\n self,\n *,\n expectation_suite_name: Optional[str] = None,\n+ include_citation: bool = True,\n ) -> ExpectationSuite:\n \"\"\"\n Args:\n :param expectation_suite_name: A name for returned Expectation suite.\n+ :param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler\n :return: Set of rule evaluation results in the form of an ExpectationSuite\n \"\"\"\n if expectation_suite_name is None:\n@@ -144,6 +149,12 @@\n expectation_suite_name=expectation_suite_name\n )\n \n+ if include_citation:\n+ expectation_suite.add_citation(\n+ comment=\"Suite created by Rule-Based Profiler with the following config\",\n+ profiler_config=self._profiler_config,\n+ )\n+\n rule: Rule\n for rule in self._rules:\n expectation_configurations: List[ExpectationConfiguration] = rule.generate()\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import uuid\nfrom typing import Dict, List, Optional, Union\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations import DataContext\nfrom great_expectations.core import ExpectationConfiguration, ExpectationSuite\nfrom great_expectations.data_context.util import instantiate_class_from_config\nfrom great_expectations.rule_based_profiler.domain_builder.domain_builder import (\n DomainBuilder,\n)\nfrom great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import (\n ExpectationConfigurationBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (\n ParameterBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_container import (\n ParameterContainer,\n build_parameter_container_for_variables,\n)\nfrom great_expectations.rule_based_profiler.rule.rule import Rule\n\n\nclass Profiler:\n \"\"\"\n Profiler object serves to profile, or automatically evaluate a set of rules, upon a given\n batch / multiple batches of data.\n \"\"\"\n\n def __init__(\n self,\n *,\n profiler_config: Optional[Dict[str, Dict[str, Dict]]] = None,\n data_context: Optional[DataContext] = None,\n ):\n \"\"\"\n Create a new Profiler using configured rules.\n For a rule or an item in a rule configuration, instantiates the following if\n available: a domain builder, a parameter builder, and a configuration builder.\n These will be used to define profiler computation patterns.\n\n Args:\n profiler_config: Variables and Rules configuration as a dictionary\n data_context: DataContext object that defines a full runtime environment (data access, etc.)\n \"\"\"\n self._data_context = data_context\n self._rules = []\n\n rules_configs: Dict[str, Dict] = profiler_config.get(\"rules\", {})\n rule_name: str\n rule_config: dict\n\n for rule_name, rule_config in rules_configs.items():\n domain_builder_config: dict = rule_config.get(\"domain_builder\")\n\n if domain_builder_config is None:\n raise ge_exceptions.ProfilerConfigurationError(\n message=f'Invalid rule \"{rule_name}\": no domain_builder found.'\n )\n\n domain_builder: DomainBuilder = instantiate_class_from_config(\n config=domain_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.domain_builder\"\n },\n )\n\n parameter_builders: List[ParameterBuilder] = []\n\n parameter_builder_configs: dict = rule_config.get(\"parameter_builders\")\n\n if parameter_builder_configs:\n parameter_builder_config: dict\n for parameter_builder_config in parameter_builder_configs:\n parameter_builders.append(\n instantiate_class_from_config(\n config=parameter_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.parameter_builder\"\n },\n )\n )\n\n expectation_configuration_builders: List[\n ExpectationConfigurationBuilder\n ] = []\n\n expectation_configuration_builder_configs: dict = rule_config.get(\n \"expectation_configuration_builders\"\n )\n\n if expectation_configuration_builder_configs:\n expectation_configuration_builder_config: dict\n for (\n expectation_configuration_builder_config\n ) in expectation_configuration_builder_configs:\n expectation_configuration_builders.append(\n instantiate_class_from_config(\n config=expectation_configuration_builder_config,\n runtime_environment={},\n config_defaults={\n \"class_name\": \"DefaultExpectationConfigurationBuilder\",\n \"module_name\": \"great_expectations.rule_based_profiler.expectation_configuration_builder\",\n },\n )\n )\n\n variables_configs: Dict[str, Dict] = profiler_config.get(\"variables\", {})\n variables: Optional[ParameterContainer] = None\n\n if variables_configs:\n variables = build_parameter_container_for_variables(\n variables_configs=variables_configs\n )\n\n self._rules.append(\n Rule(\n name=rule_name,\n domain_builder=domain_builder,\n parameter_builders=parameter_builders,\n expectation_configuration_builders=expectation_configuration_builders,\n variables=variables,\n )\n )\n\n def profile(\n self,\n *,\n expectation_suite_name: Optional[str] = None,\n ) -> ExpectationSuite:\n \"\"\"\n Args:\n :param expectation_suite_name: A name for returned Expectation suite.\n :return: Set of rule evaluation results in the form of an ExpectationSuite\n \"\"\"\n if expectation_suite_name is None:\n expectation_suite_name = (\n f\"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}\"\n )\n\n expectation_suite: ExpectationSuite = ExpectationSuite(\n expectation_suite_name=expectation_suite_name\n )\n\n rule: Rule\n for rule in self._rules:\n expectation_configurations: List[ExpectationConfiguration] = rule.generate()\n expectation_configuration: ExpectationConfiguration\n for expectation_configuration in expectation_configurations:\n expectation_suite.add_expectation(\n expectation_configuration=expectation_configuration\n )\n\n return expectation_suite\n", "path": "great_expectations/rule_based_profiler/profiler.py"}], "after_files": [{"content": "import uuid\nfrom typing import Dict, List, Optional, Union\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations import DataContext\nfrom great_expectations.core import ExpectationConfiguration, ExpectationSuite\nfrom great_expectations.data_context.util import instantiate_class_from_config\nfrom great_expectations.rule_based_profiler.domain_builder.domain_builder import (\n DomainBuilder,\n)\nfrom great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import (\n ExpectationConfigurationBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (\n ParameterBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_container import (\n ParameterContainer,\n build_parameter_container_for_variables,\n)\nfrom great_expectations.rule_based_profiler.rule.rule import Rule\n\n\nclass Profiler:\n \"\"\"\n Profiler object serves to profile, or automatically evaluate a set of rules, upon a given\n batch / multiple batches of data.\n \"\"\"\n\n def __init__(\n self,\n *,\n profiler_config: Optional[Dict[str, Dict[str, Dict]]] = None,\n data_context: Optional[DataContext] = None,\n ):\n \"\"\"\n Create a new Profiler using configured rules.\n For a rule or an item in a rule configuration, instantiates the following if\n available: a domain builder, a parameter builder, and a configuration builder.\n These will be used to define profiler computation patterns.\n\n Args:\n profiler_config: Variables and Rules configuration as a dictionary\n data_context: DataContext object that defines a full runtime environment (data access, etc.)\n \"\"\"\n self._profiler_config = profiler_config\n self._data_context = data_context\n self._rules = []\n\n rules_configs: Dict[str, Dict] = self._profiler_config.get(\"rules\", {})\n rule_name: str\n rule_config: dict\n\n for rule_name, rule_config in rules_configs.items():\n domain_builder_config: dict = rule_config.get(\"domain_builder\")\n\n if domain_builder_config is None:\n raise ge_exceptions.ProfilerConfigurationError(\n message=f'Invalid rule \"{rule_name}\": no domain_builder found.'\n )\n\n domain_builder: DomainBuilder = instantiate_class_from_config(\n config=domain_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.domain_builder\"\n },\n )\n\n parameter_builders: List[ParameterBuilder] = []\n\n parameter_builder_configs: dict = rule_config.get(\"parameter_builders\")\n\n if parameter_builder_configs:\n parameter_builder_config: dict\n for parameter_builder_config in parameter_builder_configs:\n parameter_builders.append(\n instantiate_class_from_config(\n config=parameter_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.parameter_builder\"\n },\n )\n )\n\n expectation_configuration_builders: List[\n ExpectationConfigurationBuilder\n ] = []\n\n expectation_configuration_builder_configs: dict = rule_config.get(\n \"expectation_configuration_builders\"\n )\n\n if expectation_configuration_builder_configs:\n expectation_configuration_builder_config: dict\n for (\n expectation_configuration_builder_config\n ) in expectation_configuration_builder_configs:\n expectation_configuration_builders.append(\n instantiate_class_from_config(\n config=expectation_configuration_builder_config,\n runtime_environment={},\n config_defaults={\n \"class_name\": \"DefaultExpectationConfigurationBuilder\",\n \"module_name\": \"great_expectations.rule_based_profiler.expectation_configuration_builder\",\n },\n )\n )\n\n variables_configs: Dict[str, Dict] = self._profiler_config.get(\n \"variables\", {}\n )\n variables: Optional[ParameterContainer] = None\n\n if variables_configs:\n variables = build_parameter_container_for_variables(\n variables_configs=variables_configs\n )\n\n self._rules.append(\n Rule(\n name=rule_name,\n domain_builder=domain_builder,\n parameter_builders=parameter_builders,\n expectation_configuration_builders=expectation_configuration_builders,\n variables=variables,\n )\n )\n\n def profile(\n self,\n *,\n expectation_suite_name: Optional[str] = None,\n include_citation: bool = True,\n ) -> ExpectationSuite:\n \"\"\"\n Args:\n :param expectation_suite_name: A name for returned Expectation suite.\n :param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler\n :return: Set of rule evaluation results in the form of an ExpectationSuite\n \"\"\"\n if expectation_suite_name is None:\n expectation_suite_name = (\n f\"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}\"\n )\n\n expectation_suite: ExpectationSuite = ExpectationSuite(\n expectation_suite_name=expectation_suite_name\n )\n\n if include_citation:\n expectation_suite.add_citation(\n comment=\"Suite created by Rule-Based Profiler with the following config\",\n profiler_config=self._profiler_config,\n )\n\n rule: Rule\n for rule in self._rules:\n expectation_configurations: List[ExpectationConfiguration] = rule.generate()\n expectation_configuration: ExpectationConfiguration\n for expectation_configuration in expectation_configurations:\n expectation_suite.add_expectation(\n expectation_configuration=expectation_configuration\n )\n\n return expectation_suite\n", "path": "great_expectations/rule_based_profiler/profiler.py"}]} | 1,712 | 493 |
gh_patches_debug_36047 | rasdani/github-patches | git_diff | ivy-llc__ivy-15973 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Sparse Array Functions to Paddle Backend
Add [Sparse Array Functions](https://www.paddlepaddle.org.cn/documentation/docs/en/api/index\_en.html) to Paddle backend
\_
>Please keep in mind that the proper way to link an issue to this list is to comment "- [ ] #issue\_number" while the issue's title only includes the name of the function you've chosen.
\_
## Experimental
- [x] is\_native\_sparse\_array
- [x] native\_sparse\_array
- [x] native\_sparse\_array\_to\_indices\_values\_and\_shape
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/backends/paddle/experimental/sparse_array.py`
Content:
```
1 from ivy.utils.exceptions import IvyNotImplementedException
2 import paddle
3
4
5 def is_native_sparse_array(x: paddle.Tensor) -> bool:
6 return x.is_sparse_coo() or x.is_sparse_csr()
7
8
9 def native_sparse_array(
10 data=None,
11 *,
12 coo_indices=None,
13 crow_indices=None,
14 col_indices=None,
15 ccol_indices=None,
16 row_indices=None,
17 values=None,
18 dense_shape=None,
19 format="coo",
20 ):
21 raise IvyNotImplementedException()
22
23
24 def native_sparse_array_to_indices_values_and_shape(x):
25 raise IvyNotImplementedException()
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/backends/paddle/experimental/sparse_array.py b/ivy/functional/backends/paddle/experimental/sparse_array.py
--- a/ivy/functional/backends/paddle/experimental/sparse_array.py
+++ b/ivy/functional/backends/paddle/experimental/sparse_array.py
@@ -1,11 +1,26 @@
+import ivy
+from ivy.functional.ivy.experimental.sparse_array import (
+ _verify_coo_components,
+ _verify_csr_components,
+ _is_data_not_indices_values_and_shape,
+)
+from ivy.func_wrapper import (
+ with_unsupported_device_and_dtypes,
+)
from ivy.utils.exceptions import IvyNotImplementedException
import paddle
+# local
+from .. import backend_version
+
def is_native_sparse_array(x: paddle.Tensor) -> bool:
return x.is_sparse_coo() or x.is_sparse_csr()
+@with_unsupported_device_and_dtypes(
+ {"2.4.2 and below": {"cpu": ("int8",)}}, backend_version
+)
def native_sparse_array(
data=None,
*,
@@ -17,9 +32,67 @@
values=None,
dense_shape=None,
format="coo",
-):
- raise IvyNotImplementedException()
+) -> paddle.Tensor:
+ format = format.lower()
+
+ if format not in ["coo", "csr"]:
+ raise IvyNotImplementedException(
+ "paddle only supports 'coo' and 'csr' sparse formats."
+ )
+
+ if _is_data_not_indices_values_and_shape(
+ data,
+ coo_indices,
+ crow_indices,
+ col_indices,
+ ccol_indices,
+ row_indices,
+ values,
+ dense_shape,
+ ):
+ ivy.utils.assertions.check_true(
+ ivy.is_native_sparse_array(data), message="not a sparse array"
+ )
+ return data
+
+ if format == "coo":
+ _verify_coo_components(
+ indices=coo_indices, values=values, dense_shape=dense_shape
+ )
+ return paddle.sparse.sparse_coo_tensor(
+ indices=coo_indices,
+ values=values,
+ shape=dense_shape,
+ dtype=dtype,
+ place=device,
+ stop_gradient=not requires_grad,
+ )
+ else:
+ _verify_csr_components(
+ crow_indices=crow_indices,
+ col_indices=col_indices,
+ values=values,
+ dense_shape=dense_shape,
+ )
+ return paddle.sparse.sparse_csr_tensor(
+ crows=crow_indices,
+ cols=col_indices,
+ values=values,
+ shape=dense_shape,
+ dtype=dtype,
+ place=device,
+ stop_gradient=not requires_grad,
+ )
def native_sparse_array_to_indices_values_and_shape(x):
- raise IvyNotImplementedException()
+ if not is_native_sparse_array(x):
+ raise ivy.utils.exceptions.IvyException("not a Paddle Sparse Array")
+ if x.is_sparse_coo():
+ return {"coo_indices": x.indices()}, x.values(), x.shape
+ else:
+ return (
+ {"crow_indices": x.crows(), "col_indices": x.cols()},
+ x.values(),
+ x.shape,
+ )
| {"golden_diff": "diff --git a/ivy/functional/backends/paddle/experimental/sparse_array.py b/ivy/functional/backends/paddle/experimental/sparse_array.py\n--- a/ivy/functional/backends/paddle/experimental/sparse_array.py\n+++ b/ivy/functional/backends/paddle/experimental/sparse_array.py\n@@ -1,11 +1,26 @@\n+import ivy\n+from ivy.functional.ivy.experimental.sparse_array import (\n+ _verify_coo_components,\n+ _verify_csr_components,\n+ _is_data_not_indices_values_and_shape,\n+)\n+from ivy.func_wrapper import (\n+ with_unsupported_device_and_dtypes,\n+)\n from ivy.utils.exceptions import IvyNotImplementedException\n import paddle\n \n+# local\n+from .. import backend_version\n+\n \n def is_native_sparse_array(x: paddle.Tensor) -> bool:\n return x.is_sparse_coo() or x.is_sparse_csr()\n \n \n+@with_unsupported_device_and_dtypes(\n+ {\"2.4.2 and below\": {\"cpu\": (\"int8\",)}}, backend_version\n+)\n def native_sparse_array(\n data=None,\n *,\n@@ -17,9 +32,67 @@\n values=None,\n dense_shape=None,\n format=\"coo\",\n-):\n- raise IvyNotImplementedException()\n+) -> paddle.Tensor:\n+ format = format.lower()\n+\n+ if format not in [\"coo\", \"csr\"]:\n+ raise IvyNotImplementedException(\n+ \"paddle only supports 'coo' and 'csr' sparse formats.\"\n+ )\n+\n+ if _is_data_not_indices_values_and_shape(\n+ data,\n+ coo_indices,\n+ crow_indices,\n+ col_indices,\n+ ccol_indices,\n+ row_indices,\n+ values,\n+ dense_shape,\n+ ):\n+ ivy.utils.assertions.check_true(\n+ ivy.is_native_sparse_array(data), message=\"not a sparse array\"\n+ )\n+ return data\n+\n+ if format == \"coo\":\n+ _verify_coo_components(\n+ indices=coo_indices, values=values, dense_shape=dense_shape\n+ )\n+ return paddle.sparse.sparse_coo_tensor(\n+ indices=coo_indices,\n+ values=values,\n+ shape=dense_shape,\n+ dtype=dtype,\n+ place=device,\n+ stop_gradient=not requires_grad,\n+ )\n+ else:\n+ _verify_csr_components(\n+ crow_indices=crow_indices,\n+ col_indices=col_indices,\n+ values=values,\n+ dense_shape=dense_shape,\n+ )\n+ return paddle.sparse.sparse_csr_tensor(\n+ crows=crow_indices,\n+ cols=col_indices,\n+ values=values,\n+ shape=dense_shape,\n+ dtype=dtype,\n+ place=device,\n+ stop_gradient=not requires_grad,\n+ )\n \n \n def native_sparse_array_to_indices_values_and_shape(x):\n- raise IvyNotImplementedException()\n+ if not is_native_sparse_array(x):\n+ raise ivy.utils.exceptions.IvyException(\"not a Paddle Sparse Array\")\n+ if x.is_sparse_coo():\n+ return {\"coo_indices\": x.indices()}, x.values(), x.shape\n+ else:\n+ return (\n+ {\"crow_indices\": x.crows(), \"col_indices\": x.cols()},\n+ x.values(),\n+ x.shape,\n+ )\n", "issue": "Add Sparse Array Functions to Paddle Backend\nAdd [Sparse Array Functions](https://www.paddlepaddle.org.cn/documentation/docs/en/api/index\\_en.html) to Paddle backend\r\n\r\n\\_\r\n\r\n>Please keep in mind that the proper way to link an issue to this list is to comment \"- [ ] #issue\\_number\" while the issue's title only includes the name of the function you've chosen.\r\n\r\n\\_\r\n\r\n## Experimental\r\n\r\n- [x] is\\_native\\_sparse\\_array\r\n- [x] native\\_sparse\\_array\r\n- [x] native\\_sparse\\_array\\_to\\_indices\\_values\\_and\\_shape\n", "before_files": [{"content": "from ivy.utils.exceptions import IvyNotImplementedException\nimport paddle\n\n\ndef is_native_sparse_array(x: paddle.Tensor) -> bool:\n return x.is_sparse_coo() or x.is_sparse_csr()\n\n\ndef native_sparse_array(\n data=None,\n *,\n coo_indices=None,\n crow_indices=None,\n col_indices=None,\n ccol_indices=None,\n row_indices=None,\n values=None,\n dense_shape=None,\n format=\"coo\",\n):\n raise IvyNotImplementedException()\n\n\ndef native_sparse_array_to_indices_values_and_shape(x):\n raise IvyNotImplementedException()\n", "path": "ivy/functional/backends/paddle/experimental/sparse_array.py"}], "after_files": [{"content": "import ivy\nfrom ivy.functional.ivy.experimental.sparse_array import (\n _verify_coo_components,\n _verify_csr_components,\n _is_data_not_indices_values_and_shape,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_device_and_dtypes,\n)\nfrom ivy.utils.exceptions import IvyNotImplementedException\nimport paddle\n\n# local\nfrom .. import backend_version\n\n\ndef is_native_sparse_array(x: paddle.Tensor) -> bool:\n return x.is_sparse_coo() or x.is_sparse_csr()\n\n\n@with_unsupported_device_and_dtypes(\n {\"2.4.2 and below\": {\"cpu\": (\"int8\",)}}, backend_version\n)\ndef native_sparse_array(\n data=None,\n *,\n coo_indices=None,\n crow_indices=None,\n col_indices=None,\n ccol_indices=None,\n row_indices=None,\n values=None,\n dense_shape=None,\n format=\"coo\",\n) -> paddle.Tensor:\n format = format.lower()\n\n if format not in [\"coo\", \"csr\"]:\n raise IvyNotImplementedException(\n \"paddle only supports 'coo' and 'csr' sparse formats.\"\n )\n\n if _is_data_not_indices_values_and_shape(\n data,\n coo_indices,\n crow_indices,\n col_indices,\n ccol_indices,\n row_indices,\n values,\n dense_shape,\n ):\n ivy.utils.assertions.check_true(\n ivy.is_native_sparse_array(data), message=\"not a sparse array\"\n )\n return data\n\n if format == \"coo\":\n _verify_coo_components(\n indices=coo_indices, values=values, dense_shape=dense_shape\n )\n return paddle.sparse.sparse_coo_tensor(\n indices=coo_indices,\n values=values,\n shape=dense_shape,\n dtype=dtype,\n place=device,\n stop_gradient=not requires_grad,\n )\n else:\n _verify_csr_components(\n crow_indices=crow_indices,\n col_indices=col_indices,\n values=values,\n dense_shape=dense_shape,\n )\n return paddle.sparse.sparse_csr_tensor(\n crows=crow_indices,\n cols=col_indices,\n values=values,\n shape=dense_shape,\n dtype=dtype,\n place=device,\n stop_gradient=not requires_grad,\n )\n\n\ndef native_sparse_array_to_indices_values_and_shape(x):\n if not is_native_sparse_array(x):\n raise ivy.utils.exceptions.IvyException(\"not a Paddle Sparse Array\")\n if x.is_sparse_coo():\n return {\"coo_indices\": x.indices()}, x.values(), x.shape\n else:\n return (\n {\"crow_indices\": x.crows(), \"col_indices\": x.cols()},\n x.values(),\n x.shape,\n )\n", "path": "ivy/functional/backends/paddle/experimental/sparse_array.py"}]} | 566 | 742 |
gh_patches_debug_30271 | rasdani/github-patches | git_diff | rasterio__rasterio-886 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rio overview --ls should not modify file
Currently running `rio overview --ls` to inspect the overviews modifies the file. We could detect the `--ls` option and open in read-only mode.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/rio/overview.py`
Content:
```
1 # coding: utf-8
2 """Manage overviews of a dataset."""
3
4 from functools import reduce
5 import logging
6 import operator
7
8 import click
9
10 from . import options
11 import rasterio
12 from rasterio.enums import Resampling
13
14
15 def build_handler(ctx, param, value):
16 if value:
17 try:
18 if '^' in value:
19 base, exp_range = value.split('^')
20 exp_min, exp_max = (int(v) for v in exp_range.split('..'))
21 value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]
22 else:
23 value = [int(v) for v in value.split(',')]
24 except Exception:
25 raise click.BadParameter(u"must match 'n,n,n,…' or 'n^n..n'.")
26 return value
27
28
29 @click.command('overview', short_help="Construct overviews in an existing dataset.")
30 @options.file_in_arg
31 @click.option('--build', callback=build_handler, metavar=u"f1,f2,…|b^min..max",
32 help="A sequence of decimation factors specied as "
33 "comma-separated list of numbers or a base and range of "
34 "exponents.")
35 @click.option('--ls', help="Print the overviews for each band.",
36 is_flag=True, default=False)
37 @click.option('--rebuild', help="Reconstruct existing overviews.",
38 is_flag=True, default=False)
39 @click.option('--resampling', help="Resampling algorithm.",
40 type=click.Choice(
41 [it.name for it in Resampling if it.value in [0, 2, 5, 6, 7]]),
42 default='nearest', show_default=True)
43 @click.pass_context
44 def overview(ctx, input, build, ls, rebuild, resampling):
45 """Construct overviews in an existing dataset.
46
47 A pyramid of overviews computed once and stored in the dataset can
48 improve performance in some applications.
49
50 The decimation levels at which to build overviews can be specified as
51 a comma separated list
52
53 rio overview --build 2,4,8,16
54
55 or a base and range of exponents.
56
57 rio overview --build 2^1..4
58
59 Note that overviews can not currently be removed and are not
60 automatically updated when the dataset's primary bands are
61 modified.
62
63 Information about existing overviews can be printed using the --ls
64 option.
65
66 rio overview --ls
67
68 """
69 with ctx.obj['env']:
70 with rasterio.open(input, 'r+') as dst:
71
72 if ls:
73 resampling_method = dst.tags(
74 ns='rio_overview').get('resampling') or 'unknown'
75
76 click.echo("Overview factors:")
77 for idx in dst.indexes:
78 click.echo(" Band %d: %s (method: '%s')" % (
79 idx, dst.overviews(idx) or 'None', resampling_method))
80
81 elif rebuild:
82 # Build the same overviews for all bands.
83 factors = reduce(
84 operator.or_,
85 [set(dst.overviews(i)) for i in dst.indexes])
86
87 # Attempt to recover the resampling method from dataset tags.
88 resampling_method = dst.tags(
89 ns='rio_overview').get('resampling') or resampling
90
91 dst.build_overviews(
92 list(factors), Resampling[resampling_method])
93
94 elif build:
95 dst.build_overviews(build, Resampling[resampling])
96
97 # Save the resampling method to a tag.
98 dst.update_tags(ns='rio_overview', resampling=resampling)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/rio/overview.py b/rasterio/rio/overview.py
--- a/rasterio/rio/overview.py
+++ b/rasterio/rio/overview.py
@@ -67,9 +67,8 @@
"""
with ctx.obj['env']:
- with rasterio.open(input, 'r+') as dst:
-
- if ls:
+ if ls:
+ with rasterio.open(input, 'r') as dst:
resampling_method = dst.tags(
ns='rio_overview').get('resampling') or 'unknown'
@@ -77,8 +76,8 @@
for idx in dst.indexes:
click.echo(" Band %d: %s (method: '%s')" % (
idx, dst.overviews(idx) or 'None', resampling_method))
-
- elif rebuild:
+ elif rebuild:
+ with rasterio.open(input, 'r+') as dst:
# Build the same overviews for all bands.
factors = reduce(
operator.or_,
@@ -91,8 +90,13 @@
dst.build_overviews(
list(factors), Resampling[resampling_method])
- elif build:
+ elif build:
+ with rasterio.open(input, 'r+') as dst:
dst.build_overviews(build, Resampling[resampling])
# Save the resampling method to a tag.
dst.update_tags(ns='rio_overview', resampling=resampling)
+
+ else:
+ raise click.UsageError(
+ "Please specify --ls, --rebuild, or --build ...")
| {"golden_diff": "diff --git a/rasterio/rio/overview.py b/rasterio/rio/overview.py\n--- a/rasterio/rio/overview.py\n+++ b/rasterio/rio/overview.py\n@@ -67,9 +67,8 @@\n \n \"\"\"\n with ctx.obj['env']:\n- with rasterio.open(input, 'r+') as dst:\n-\n- if ls:\n+ if ls:\n+ with rasterio.open(input, 'r') as dst:\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or 'unknown'\n \n@@ -77,8 +76,8 @@\n for idx in dst.indexes:\n click.echo(\" Band %d: %s (method: '%s')\" % (\n idx, dst.overviews(idx) or 'None', resampling_method))\n-\n- elif rebuild:\n+ elif rebuild:\n+ with rasterio.open(input, 'r+') as dst:\n # Build the same overviews for all bands.\n factors = reduce(\n operator.or_,\n@@ -91,8 +90,13 @@\n dst.build_overviews(\n list(factors), Resampling[resampling_method])\n \n- elif build:\n+ elif build:\n+ with rasterio.open(input, 'r+') as dst:\n dst.build_overviews(build, Resampling[resampling])\n \n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n+\n+ else:\n+ raise click.UsageError(\n+ \"Please specify --ls, --rebuild, or --build ...\")\n", "issue": "rio overview --ls should not modify file\nCurrently running `rio overview --ls` to inspect the overviews modifies the file. We could detect the `--ls` option and open in read-only mode. \n\n", "before_files": [{"content": "# coding: utf-8\n\"\"\"Manage overviews of a dataset.\"\"\"\n\nfrom functools import reduce\nimport logging\nimport operator\n\nimport click\n\nfrom . import options\nimport rasterio\nfrom rasterio.enums import Resampling\n\n\ndef build_handler(ctx, param, value):\n if value:\n try:\n if '^' in value:\n base, exp_range = value.split('^')\n exp_min, exp_max = (int(v) for v in exp_range.split('..'))\n value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]\n else:\n value = [int(v) for v in value.split(',')]\n except Exception:\n raise click.BadParameter(u\"must match 'n,n,n,\u2026' or 'n^n..n'.\")\n return value\n\n\[email protected]('overview', short_help=\"Construct overviews in an existing dataset.\")\[email protected]_in_arg\[email protected]('--build', callback=build_handler, metavar=u\"f1,f2,\u2026|b^min..max\",\n help=\"A sequence of decimation factors specied as \"\n \"comma-separated list of numbers or a base and range of \"\n \"exponents.\")\[email protected]('--ls', help=\"Print the overviews for each band.\",\n is_flag=True, default=False)\[email protected]('--rebuild', help=\"Reconstruct existing overviews.\",\n is_flag=True, default=False)\[email protected]('--resampling', help=\"Resampling algorithm.\",\n type=click.Choice(\n [it.name for it in Resampling if it.value in [0, 2, 5, 6, 7]]),\n default='nearest', show_default=True)\[email protected]_context\ndef overview(ctx, input, build, ls, rebuild, resampling):\n \"\"\"Construct overviews in an existing dataset.\n\n A pyramid of overviews computed once and stored in the dataset can\n improve performance in some applications.\n\n The decimation levels at which to build overviews can be specified as\n a comma separated list\n\n rio overview --build 2,4,8,16\n\n or a base and range of exponents.\n\n rio overview --build 2^1..4\n\n Note that overviews can not currently be removed and are not\n automatically updated when the dataset's primary bands are\n modified.\n\n Information about existing overviews can be printed using the --ls\n option.\n\n rio overview --ls\n\n \"\"\"\n with ctx.obj['env']:\n with rasterio.open(input, 'r+') as dst:\n\n if ls:\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or 'unknown'\n\n click.echo(\"Overview factors:\")\n for idx in dst.indexes:\n click.echo(\" Band %d: %s (method: '%s')\" % (\n idx, dst.overviews(idx) or 'None', resampling_method))\n\n elif rebuild:\n # Build the same overviews for all bands.\n factors = reduce(\n operator.or_,\n [set(dst.overviews(i)) for i in dst.indexes])\n\n # Attempt to recover the resampling method from dataset tags.\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or resampling\n\n dst.build_overviews(\n list(factors), Resampling[resampling_method])\n\n elif build:\n dst.build_overviews(build, Resampling[resampling])\n\n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n", "path": "rasterio/rio/overview.py"}], "after_files": [{"content": "# coding: utf-8\n\"\"\"Manage overviews of a dataset.\"\"\"\n\nfrom functools import reduce\nimport logging\nimport operator\n\nimport click\n\nfrom . import options\nimport rasterio\nfrom rasterio.enums import Resampling\n\n\ndef build_handler(ctx, param, value):\n if value:\n try:\n if '^' in value:\n base, exp_range = value.split('^')\n exp_min, exp_max = (int(v) for v in exp_range.split('..'))\n value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]\n else:\n value = [int(v) for v in value.split(',')]\n except Exception:\n raise click.BadParameter(u\"must match 'n,n,n,\u2026' or 'n^n..n'.\")\n return value\n\n\[email protected]('overview', short_help=\"Construct overviews in an existing dataset.\")\[email protected]_in_arg\[email protected]('--build', callback=build_handler, metavar=u\"f1,f2,\u2026|b^min..max\",\n help=\"A sequence of decimation factors specied as \"\n \"comma-separated list of numbers or a base and range of \"\n \"exponents.\")\[email protected]('--ls', help=\"Print the overviews for each band.\",\n is_flag=True, default=False)\[email protected]('--rebuild', help=\"Reconstruct existing overviews.\",\n is_flag=True, default=False)\[email protected]('--resampling', help=\"Resampling algorithm.\",\n type=click.Choice(\n [it.name for it in Resampling if it.value in [0, 2, 5, 6, 7]]),\n default='nearest', show_default=True)\[email protected]_context\ndef overview(ctx, input, build, ls, rebuild, resampling):\n \"\"\"Construct overviews in an existing dataset.\n\n A pyramid of overviews computed once and stored in the dataset can\n improve performance in some applications.\n\n The decimation levels at which to build overviews can be specified as\n a comma separated list\n\n rio overview --build 2,4,8,16\n\n or a base and range of exponents.\n\n rio overview --build 2^1..4\n\n Note that overviews can not currently be removed and are not\n automatically updated when the dataset's primary bands are\n modified.\n\n Information about existing overviews can be printed using the --ls\n option.\n\n rio overview --ls\n\n \"\"\"\n with ctx.obj['env']:\n if ls:\n with rasterio.open(input, 'r') as dst:\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or 'unknown'\n\n click.echo(\"Overview factors:\")\n for idx in dst.indexes:\n click.echo(\" Band %d: %s (method: '%s')\" % (\n idx, dst.overviews(idx) or 'None', resampling_method))\n elif rebuild:\n with rasterio.open(input, 'r+') as dst:\n # Build the same overviews for all bands.\n factors = reduce(\n operator.or_,\n [set(dst.overviews(i)) for i in dst.indexes])\n\n # Attempt to recover the resampling method from dataset tags.\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or resampling\n\n dst.build_overviews(\n list(factors), Resampling[resampling_method])\n\n elif build:\n with rasterio.open(input, 'r+') as dst:\n dst.build_overviews(build, Resampling[resampling])\n\n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n\n else:\n raise click.UsageError(\n \"Please specify --ls, --rebuild, or --build ...\")\n", "path": "rasterio/rio/overview.py"}]} | 1,269 | 353 |
gh_patches_debug_20748 | rasdani/github-patches | git_diff | WordPress__openverse-api-318 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add audio to the ingestion server tests
Audio is currently not included in the ingestion server integration or unit tests. We should update these tests to include support for audio. Separate PRs for unit and integration tests would be best. Below is some information on these tests and how to work with them.
## Running the tests
To run the tests and get a sense of what they do, do the following steps:
```bash
cd ingestion_server
pipenv install
pipenv run python3 test/integration_tests.py
```
This is currently blocked by #143. I would've liked to run the tests to learn a bit more about how they work but this isn't yet possible.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sample_data/make_sample_pop.py`
Content:
```
1 import csv
2 import random
3
4
5 in_tsv = open("sample_data.csv", "r")
6 out_tsv = open("sample_popularity_data.csv", "w+")
7 output_fields = ["identifier", "normalized_popularity"]
8 reader = csv.DictReader(in_tsv, delimiter=",")
9 writer = csv.DictWriter(out_tsv, delimiter=",", fieldnames=output_fields)
10 writer.writeheader()
11 for row in reader:
12 pop = random.uniform(0, 100)
13 out_row = {"identifier": row["identifier"], "normalized_popularity": pop}
14 writer.writerow(out_row)
15
```
Path: `ingestion_server/ingestion_server/tasks.py`
Content:
```
1 """
2 Simple in-memory tracking of executed tasks.
3 """
4
5 import datetime as dt
6 import logging
7 from enum import Enum
8 from multiprocessing import Process
9
10 import requests
11
12 from ingestion_server.indexer import TableIndexer, elasticsearch_connect
13 from ingestion_server.ingest import reload_upstream
14
15
16 class TaskTypes(Enum):
17 # Completely reindex all data for a given model.
18 REINDEX = 0
19 # Reindex updates to a model from the database since a certain date.
20 UPDATE_INDEX = 1
21 # Download the latest copy of the data from the upstream database, then
22 # completely reindex the newly imported data.
23 INGEST_UPSTREAM = 2
24 # Create indices in Elasticsearch for QA tests.
25 # This is not intended for production use, but can be safely executed in a
26 # production environment without consequence.
27 LOAD_TEST_DATA = 3
28
29
30 class TaskTracker:
31 def __init__(self):
32 self.id_task = {}
33 self.id_action = {}
34 self.id_progress = {}
35 self.id_start_time = {}
36 self.id_finish_time = {}
37
38 def add_task(self, task, task_id, action, progress, finish_time):
39 self._prune_old_tasks()
40 self.id_task[task_id] = task
41 self.id_action[task_id] = action
42 self.id_progress[task_id] = progress
43 self.id_start_time[task_id] = dt.datetime.utcnow().timestamp()
44 self.id_finish_time[task_id] = finish_time
45 return task_id
46
47 def _prune_old_tasks(self):
48 pass
49
50 def list_task_statuses(self):
51 self._prune_old_tasks()
52 results = []
53 for _id, task in self.id_task.items():
54 percent_completed = self.id_progress[_id].value
55 active = task.is_alive()
56 start_time = self.id_start_time[_id]
57 finish_time = self.id_finish_time[_id].value
58 results.append(
59 {
60 "task_id": _id,
61 "active": active,
62 "action": self.id_action[_id],
63 "progress": percent_completed,
64 "error": percent_completed < 100 and not active,
65 "start_time": start_time,
66 "finish_time": finish_time,
67 }
68 )
69 sorted_results = sorted(results, key=lambda x: x["finish_time"])
70
71 to_utc = dt.datetime.utcfromtimestamp
72
73 def render_date(x):
74 return to_utc(x) if x != 0.0 else None
75
76 # Convert date to a readable format
77 for idx, task in enumerate(sorted_results):
78 start_time = task["start_time"]
79 finish_time = task["finish_time"]
80 sorted_results[idx]["start_time"] = str(render_date(start_time))
81 sorted_results[idx]["finish_time"] = str(render_date(finish_time))
82
83 return sorted_results
84
85
86 class Task(Process):
87 def __init__(
88 self, model, task_type, since_date, progress, task_id, finish_time, callback_url
89 ):
90 Process.__init__(self)
91 self.model = model
92 self.task_type = task_type
93 self.since_date = since_date
94 self.progress = progress
95 self.task_id = task_id
96 self.finish_time = finish_time
97 self.callback_url = callback_url
98
99 def run(self):
100 # Map task types to actions.
101 elasticsearch = elasticsearch_connect()
102 indexer = TableIndexer(
103 elasticsearch, self.model, self.progress, self.finish_time
104 )
105 if self.task_type == TaskTypes.REINDEX:
106 indexer.reindex(self.model)
107 elif self.task_type == TaskTypes.UPDATE_INDEX:
108 indexer.update(self.model, self.since_date)
109 elif self.task_type == TaskTypes.INGEST_UPSTREAM:
110 reload_upstream(self.model)
111 if self.model == "audio":
112 reload_upstream("audioset", approach="basic")
113 indexer.reindex(self.model)
114 elif self.task_type == TaskTypes.LOAD_TEST_DATA:
115 indexer.load_test_data(self.model)
116 logging.info(f"Task {self.task_id} exited.")
117 if self.callback_url:
118 try:
119 requests.post(self.callback_url)
120 except requests.exceptions.RequestException as e:
121 logging.error("Failed to send callback!")
122 logging.error(e)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ingestion_server/ingestion_server/tasks.py b/ingestion_server/ingestion_server/tasks.py
--- a/ingestion_server/ingestion_server/tasks.py
+++ b/ingestion_server/ingestion_server/tasks.py
@@ -116,7 +116,9 @@
logging.info(f"Task {self.task_id} exited.")
if self.callback_url:
try:
- requests.post(self.callback_url)
+ logging.info("Sending callback request")
+ res = requests.post(self.callback_url)
+ logging.info(f"Response: {res.text}")
except requests.exceptions.RequestException as e:
logging.error("Failed to send callback!")
logging.error(e)
diff --git a/sample_data/make_sample_pop.py b/sample_data/make_sample_pop.py
deleted file mode 100644
--- a/sample_data/make_sample_pop.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import csv
-import random
-
-
-in_tsv = open("sample_data.csv", "r")
-out_tsv = open("sample_popularity_data.csv", "w+")
-output_fields = ["identifier", "normalized_popularity"]
-reader = csv.DictReader(in_tsv, delimiter=",")
-writer = csv.DictWriter(out_tsv, delimiter=",", fieldnames=output_fields)
-writer.writeheader()
-for row in reader:
- pop = random.uniform(0, 100)
- out_row = {"identifier": row["identifier"], "normalized_popularity": pop}
- writer.writerow(out_row)
| {"golden_diff": "diff --git a/ingestion_server/ingestion_server/tasks.py b/ingestion_server/ingestion_server/tasks.py\n--- a/ingestion_server/ingestion_server/tasks.py\n+++ b/ingestion_server/ingestion_server/tasks.py\n@@ -116,7 +116,9 @@\n logging.info(f\"Task {self.task_id} exited.\")\n if self.callback_url:\n try:\n- requests.post(self.callback_url)\n+ logging.info(\"Sending callback request\")\n+ res = requests.post(self.callback_url)\n+ logging.info(f\"Response: {res.text}\")\n except requests.exceptions.RequestException as e:\n logging.error(\"Failed to send callback!\")\n logging.error(e)\ndiff --git a/sample_data/make_sample_pop.py b/sample_data/make_sample_pop.py\ndeleted file mode 100644\n--- a/sample_data/make_sample_pop.py\n+++ /dev/null\n@@ -1,14 +0,0 @@\n-import csv\n-import random\n-\n-\n-in_tsv = open(\"sample_data.csv\", \"r\")\n-out_tsv = open(\"sample_popularity_data.csv\", \"w+\")\n-output_fields = [\"identifier\", \"normalized_popularity\"]\n-reader = csv.DictReader(in_tsv, delimiter=\",\")\n-writer = csv.DictWriter(out_tsv, delimiter=\",\", fieldnames=output_fields)\n-writer.writeheader()\n-for row in reader:\n- pop = random.uniform(0, 100)\n- out_row = {\"identifier\": row[\"identifier\"], \"normalized_popularity\": pop}\n- writer.writerow(out_row)\n", "issue": "Add audio to the ingestion server tests\nAudio is currently not included in the ingestion server integration or unit tests. We should update these tests to include support for audio. Separate PRs for unit and integration tests would be best. Below is some information on these tests and how to work with them.\r\n\r\n## Running the tests \r\n\r\nTo run the tests and get a sense of what they do, do the following steps:\r\n\r\n```bash\r\ncd ingestion_server\r\npipenv install\r\npipenv run python3 test/integration_tests.py\r\n```\r\n\r\nThis is currently blocked by #143. I would've liked to run the tests to learn a bit more about how they work but this isn't yet possible.\n", "before_files": [{"content": "import csv\nimport random\n\n\nin_tsv = open(\"sample_data.csv\", \"r\")\nout_tsv = open(\"sample_popularity_data.csv\", \"w+\")\noutput_fields = [\"identifier\", \"normalized_popularity\"]\nreader = csv.DictReader(in_tsv, delimiter=\",\")\nwriter = csv.DictWriter(out_tsv, delimiter=\",\", fieldnames=output_fields)\nwriter.writeheader()\nfor row in reader:\n pop = random.uniform(0, 100)\n out_row = {\"identifier\": row[\"identifier\"], \"normalized_popularity\": pop}\n writer.writerow(out_row)\n", "path": "sample_data/make_sample_pop.py"}, {"content": "\"\"\"\nSimple in-memory tracking of executed tasks.\n\"\"\"\n\nimport datetime as dt\nimport logging\nfrom enum import Enum\nfrom multiprocessing import Process\n\nimport requests\n\nfrom ingestion_server.indexer import TableIndexer, elasticsearch_connect\nfrom ingestion_server.ingest import reload_upstream\n\n\nclass TaskTypes(Enum):\n # Completely reindex all data for a given model.\n REINDEX = 0\n # Reindex updates to a model from the database since a certain date.\n UPDATE_INDEX = 1\n # Download the latest copy of the data from the upstream database, then\n # completely reindex the newly imported data.\n INGEST_UPSTREAM = 2\n # Create indices in Elasticsearch for QA tests.\n # This is not intended for production use, but can be safely executed in a\n # production environment without consequence.\n LOAD_TEST_DATA = 3\n\n\nclass TaskTracker:\n def __init__(self):\n self.id_task = {}\n self.id_action = {}\n self.id_progress = {}\n self.id_start_time = {}\n self.id_finish_time = {}\n\n def add_task(self, task, task_id, action, progress, finish_time):\n self._prune_old_tasks()\n self.id_task[task_id] = task\n self.id_action[task_id] = action\n self.id_progress[task_id] = progress\n self.id_start_time[task_id] = dt.datetime.utcnow().timestamp()\n self.id_finish_time[task_id] = finish_time\n return task_id\n\n def _prune_old_tasks(self):\n pass\n\n def list_task_statuses(self):\n self._prune_old_tasks()\n results = []\n for _id, task in self.id_task.items():\n percent_completed = self.id_progress[_id].value\n active = task.is_alive()\n start_time = self.id_start_time[_id]\n finish_time = self.id_finish_time[_id].value\n results.append(\n {\n \"task_id\": _id,\n \"active\": active,\n \"action\": self.id_action[_id],\n \"progress\": percent_completed,\n \"error\": percent_completed < 100 and not active,\n \"start_time\": start_time,\n \"finish_time\": finish_time,\n }\n )\n sorted_results = sorted(results, key=lambda x: x[\"finish_time\"])\n\n to_utc = dt.datetime.utcfromtimestamp\n\n def render_date(x):\n return to_utc(x) if x != 0.0 else None\n\n # Convert date to a readable format\n for idx, task in enumerate(sorted_results):\n start_time = task[\"start_time\"]\n finish_time = task[\"finish_time\"]\n sorted_results[idx][\"start_time\"] = str(render_date(start_time))\n sorted_results[idx][\"finish_time\"] = str(render_date(finish_time))\n\n return sorted_results\n\n\nclass Task(Process):\n def __init__(\n self, model, task_type, since_date, progress, task_id, finish_time, callback_url\n ):\n Process.__init__(self)\n self.model = model\n self.task_type = task_type\n self.since_date = since_date\n self.progress = progress\n self.task_id = task_id\n self.finish_time = finish_time\n self.callback_url = callback_url\n\n def run(self):\n # Map task types to actions.\n elasticsearch = elasticsearch_connect()\n indexer = TableIndexer(\n elasticsearch, self.model, self.progress, self.finish_time\n )\n if self.task_type == TaskTypes.REINDEX:\n indexer.reindex(self.model)\n elif self.task_type == TaskTypes.UPDATE_INDEX:\n indexer.update(self.model, self.since_date)\n elif self.task_type == TaskTypes.INGEST_UPSTREAM:\n reload_upstream(self.model)\n if self.model == \"audio\":\n reload_upstream(\"audioset\", approach=\"basic\")\n indexer.reindex(self.model)\n elif self.task_type == TaskTypes.LOAD_TEST_DATA:\n indexer.load_test_data(self.model)\n logging.info(f\"Task {self.task_id} exited.\")\n if self.callback_url:\n try:\n requests.post(self.callback_url)\n except requests.exceptions.RequestException as e:\n logging.error(\"Failed to send callback!\")\n logging.error(e)\n", "path": "ingestion_server/ingestion_server/tasks.py"}], "after_files": [{"content": null, "path": "sample_data/make_sample_pop.py"}, {"content": "\"\"\"\nSimple in-memory tracking of executed tasks.\n\"\"\"\n\nimport datetime as dt\nimport logging\nfrom enum import Enum\nfrom multiprocessing import Process\n\nimport requests\n\nfrom ingestion_server.indexer import TableIndexer, elasticsearch_connect\nfrom ingestion_server.ingest import reload_upstream\n\n\nclass TaskTypes(Enum):\n # Completely reindex all data for a given model.\n REINDEX = 0\n # Reindex updates to a model from the database since a certain date.\n UPDATE_INDEX = 1\n # Download the latest copy of the data from the upstream database, then\n # completely reindex the newly imported data.\n INGEST_UPSTREAM = 2\n # Create indices in Elasticsearch for QA tests.\n # This is not intended for production use, but can be safely executed in a\n # production environment without consequence.\n LOAD_TEST_DATA = 3\n\n\nclass TaskTracker:\n def __init__(self):\n self.id_task = {}\n self.id_action = {}\n self.id_progress = {}\n self.id_start_time = {}\n self.id_finish_time = {}\n\n def add_task(self, task, task_id, action, progress, finish_time):\n self._prune_old_tasks()\n self.id_task[task_id] = task\n self.id_action[task_id] = action\n self.id_progress[task_id] = progress\n self.id_start_time[task_id] = dt.datetime.utcnow().timestamp()\n self.id_finish_time[task_id] = finish_time\n return task_id\n\n def _prune_old_tasks(self):\n pass\n\n def list_task_statuses(self):\n self._prune_old_tasks()\n results = []\n for _id, task in self.id_task.items():\n percent_completed = self.id_progress[_id].value\n active = task.is_alive()\n start_time = self.id_start_time[_id]\n finish_time = self.id_finish_time[_id].value\n results.append(\n {\n \"task_id\": _id,\n \"active\": active,\n \"action\": self.id_action[_id],\n \"progress\": percent_completed,\n \"error\": percent_completed < 100 and not active,\n \"start_time\": start_time,\n \"finish_time\": finish_time,\n }\n )\n sorted_results = sorted(results, key=lambda x: x[\"finish_time\"])\n\n to_utc = dt.datetime.utcfromtimestamp\n\n def render_date(x):\n return to_utc(x) if x != 0.0 else None\n\n # Convert date to a readable format\n for idx, task in enumerate(sorted_results):\n start_time = task[\"start_time\"]\n finish_time = task[\"finish_time\"]\n sorted_results[idx][\"start_time\"] = str(render_date(start_time))\n sorted_results[idx][\"finish_time\"] = str(render_date(finish_time))\n\n return sorted_results\n\n\nclass Task(Process):\n def __init__(\n self, model, task_type, since_date, progress, task_id, finish_time, callback_url\n ):\n Process.__init__(self)\n self.model = model\n self.task_type = task_type\n self.since_date = since_date\n self.progress = progress\n self.task_id = task_id\n self.finish_time = finish_time\n self.callback_url = callback_url\n\n def run(self):\n # Map task types to actions.\n elasticsearch = elasticsearch_connect()\n indexer = TableIndexer(\n elasticsearch, self.model, self.progress, self.finish_time\n )\n if self.task_type == TaskTypes.REINDEX:\n indexer.reindex(self.model)\n elif self.task_type == TaskTypes.UPDATE_INDEX:\n indexer.update(self.model, self.since_date)\n elif self.task_type == TaskTypes.INGEST_UPSTREAM:\n reload_upstream(self.model)\n if self.model == \"audio\":\n reload_upstream(\"audioset\", approach=\"basic\")\n indexer.reindex(self.model)\n elif self.task_type == TaskTypes.LOAD_TEST_DATA:\n indexer.load_test_data(self.model)\n logging.info(f\"Task {self.task_id} exited.\")\n if self.callback_url:\n try:\n logging.info(\"Sending callback request\")\n res = requests.post(self.callback_url)\n logging.info(f\"Response: {res.text}\")\n except requests.exceptions.RequestException as e:\n logging.error(\"Failed to send callback!\")\n logging.error(e)\n", "path": "ingestion_server/ingestion_server/tasks.py"}]} | 1,738 | 335 |
gh_patches_debug_41905 | rasdani/github-patches | git_diff | pytorch__ignite-478 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve create_supervised_trainer with optional output_transform
Following [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:
```python
def default_output_transform(x, y, y_pred, loss):
return loss.item()
def create_supervised_trainer(model, optimizer, loss_fn,
device=None, non_blocking=False, prepare_batch=_prepare_batch,
output_transform=default_output_transform):
if device:
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
return output_transform(x, y, y_pred, loss)
return Engine(_update)
```
cc @IlyaOvodov
Improve create_supervised_trainer with optional output_transform
Following [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:
```python
def default_output_transform(x, y, y_pred, loss):
return loss.item()
def create_supervised_trainer(model, optimizer, loss_fn,
device=None, non_blocking=False, prepare_batch=_prepare_batch,
output_transform=default_output_transform):
if device:
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
return output_transform(x, y, y_pred, loss)
return Engine(_update)
```
cc @IlyaOvodov
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/engine/__init__.py`
Content:
```
1 import torch
2
3 from ignite.engine.engine import Engine, State, Events
4 from ignite.utils import convert_tensor
5
6
7 def _prepare_batch(batch, device=None, non_blocking=False):
8 """Prepare batch for training: pass to a device with options.
9
10 """
11 x, y = batch
12 return (convert_tensor(x, device=device, non_blocking=non_blocking),
13 convert_tensor(y, device=device, non_blocking=non_blocking))
14
15
16 def create_supervised_trainer(model, optimizer, loss_fn,
17 device=None, non_blocking=False,
18 prepare_batch=_prepare_batch):
19 """
20 Factory function for creating a trainer for supervised models.
21
22 Args:
23 model (`torch.nn.Module`): the model to train.
24 optimizer (`torch.optim.Optimizer`): the optimizer to use.
25 loss_fn (torch.nn loss function): the loss function to use.
26 device (str, optional): device type specification (default: None).
27 Applies to both model and batches.
28 non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
29 with respect to the host. For other cases, this argument has no effect.
30 prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
31 tuple of tensors `(batch_x, batch_y)`.
32
33 Note: `engine.state.output` for this engine is the loss of the processed batch.
34
35 Returns:
36 Engine: a trainer engine with supervised update function.
37 """
38 if device:
39 model.to(device)
40
41 def _update(engine, batch):
42 model.train()
43 optimizer.zero_grad()
44 x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
45 y_pred = model(x)
46 loss = loss_fn(y_pred, y)
47 loss.backward()
48 optimizer.step()
49 return loss.item()
50
51 return Engine(_update)
52
53
54 def create_supervised_evaluator(model, metrics={},
55 device=None, non_blocking=False,
56 prepare_batch=_prepare_batch):
57 """
58 Factory function for creating an evaluator for supervised models.
59
60 Args:
61 model (`torch.nn.Module`): the model to train.
62 metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.
63 device (str, optional): device type specification (default: None).
64 Applies to both model and batches.
65 non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
66 with respect to the host. For other cases, this argument has no effect.
67 prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
68 tuple of tensors `(batch_x, batch_y)`.
69
70 Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.
71
72 Returns:
73 Engine: an evaluator engine with supervised inference function.
74 """
75 if device:
76 model.to(device)
77
78 def _inference(engine, batch):
79 model.eval()
80 with torch.no_grad():
81 x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
82 y_pred = model(x)
83 return y_pred, y
84
85 engine = Engine(_inference)
86
87 for name, metric in metrics.items():
88 metric.attach(engine, name)
89
90 return engine
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py
--- a/ignite/engine/__init__.py
+++ b/ignite/engine/__init__.py
@@ -15,7 +15,8 @@
def create_supervised_trainer(model, optimizer, loss_fn,
device=None, non_blocking=False,
- prepare_batch=_prepare_batch):
+ prepare_batch=_prepare_batch,
+ output_transform=lambda x, y, y_pred, loss: loss.item()):
"""
Factory function for creating a trainer for supervised models.
@@ -29,8 +30,11 @@
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value
+ to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
- Note: `engine.state.output` for this engine is the loss of the processed batch.
+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss
+ of the processed batch by default.
Returns:
Engine: a trainer engine with supervised update function.
@@ -46,14 +50,15 @@
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
- return loss.item()
+ return output_transform(x, y, y_pred, loss)
return Engine(_update)
def create_supervised_evaluator(model, metrics={},
device=None, non_blocking=False,
- prepare_batch=_prepare_batch):
+ prepare_batch=_prepare_batch,
+ output_transform=lambda x, y, y_pred: (y_pred, y,)):
"""
Factory function for creating an evaluator for supervised models.
@@ -66,8 +71,12 @@
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value
+ to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
+ output expected by metrics. If you change it you should use `output_transform` in metrics.
- Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.
+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is
+ a tuple of `(batch_pred, batch_y)` by default.
Returns:
Engine: an evaluator engine with supervised inference function.
@@ -80,7 +89,7 @@
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
- return y_pred, y
+ return output_transform(x, y, y_pred)
engine = Engine(_inference)
| {"golden_diff": "diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py\n--- a/ignite/engine/__init__.py\n+++ b/ignite/engine/__init__.py\n@@ -15,7 +15,8 @@\n \n def create_supervised_trainer(model, optimizer, loss_fn,\n device=None, non_blocking=False,\n- prepare_batch=_prepare_batch):\n+ prepare_batch=_prepare_batch,\n+ output_transform=lambda x, y, y_pred, loss: loss.item()):\n \"\"\"\n Factory function for creating a trainer for supervised models.\n \n@@ -29,8 +30,11 @@\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value\n+ to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.\n \n- Note: `engine.state.output` for this engine is the loss of the processed batch.\n+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss\n+ of the processed batch by default.\n \n Returns:\n Engine: a trainer engine with supervised update function.\n@@ -46,14 +50,15 @@\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n- return loss.item()\n+ return output_transform(x, y, y_pred, loss)\n \n return Engine(_update)\n \n \n def create_supervised_evaluator(model, metrics={},\n device=None, non_blocking=False,\n- prepare_batch=_prepare_batch):\n+ prepare_batch=_prepare_batch,\n+ output_transform=lambda x, y, y_pred: (y_pred, y,)):\n \"\"\"\n Factory function for creating an evaluator for supervised models.\n \n@@ -66,8 +71,12 @@\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value\n+ to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits\n+ output expected by metrics. If you change it you should use `output_transform` in metrics.\n \n- Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.\n+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is\n+ a tuple of `(batch_pred, batch_y)` by default.\n \n Returns:\n Engine: an evaluator engine with supervised inference function.\n@@ -80,7 +89,7 @@\n with torch.no_grad():\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n- return y_pred, y\n+ return output_transform(x, y, y_pred)\n \n engine = Engine(_inference)\n", "issue": "Improve create_supervised_trainer with optional output_transform\nFollowing [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:\r\n```python\r\ndef default_output_transform(x, y, y_pred, loss):\r\n return loss.item() \r\n\r\n\r\ndef create_supervised_trainer(model, optimizer, loss_fn,\r\n device=None, non_blocking=False, prepare_batch=_prepare_batch, \r\n output_transform=default_output_transform):\r\n if device:\r\n model.to(device)\r\n\r\n def _update(engine, batch):\r\n model.train()\r\n optimizer.zero_grad()\r\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\r\n y_pred = model(x)\r\n loss = loss_fn(y_pred, y)\r\n loss.backward()\r\n optimizer.step()\r\n return output_transform(x, y, y_pred, loss)\r\n\r\n return Engine(_update) \r\n```\r\n\r\ncc @IlyaOvodov\nImprove create_supervised_trainer with optional output_transform\nFollowing [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:\r\n```python\r\ndef default_output_transform(x, y, y_pred, loss):\r\n return loss.item() \r\n\r\n\r\ndef create_supervised_trainer(model, optimizer, loss_fn,\r\n device=None, non_blocking=False, prepare_batch=_prepare_batch, \r\n output_transform=default_output_transform):\r\n if device:\r\n model.to(device)\r\n\r\n def _update(engine, batch):\r\n model.train()\r\n optimizer.zero_grad()\r\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\r\n y_pred = model(x)\r\n loss = loss_fn(y_pred, y)\r\n loss.backward()\r\n optimizer.step()\r\n return output_transform(x, y, y_pred, loss)\r\n\r\n return Engine(_update) \r\n```\r\n\r\ncc @IlyaOvodov\n", "before_files": [{"content": "import torch\n\nfrom ignite.engine.engine import Engine, State, Events\nfrom ignite.utils import convert_tensor\n\n\ndef _prepare_batch(batch, device=None, non_blocking=False):\n \"\"\"Prepare batch for training: pass to a device with options.\n\n \"\"\"\n x, y = batch\n return (convert_tensor(x, device=device, non_blocking=non_blocking),\n convert_tensor(y, device=device, non_blocking=non_blocking))\n\n\ndef create_supervised_trainer(model, optimizer, loss_fn,\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch):\n \"\"\"\n Factory function for creating a trainer for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n optimizer (`torch.optim.Optimizer`): the optimizer to use.\n loss_fn (torch.nn loss function): the loss function to use.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n\n Note: `engine.state.output` for this engine is the loss of the processed batch.\n\n Returns:\n Engine: a trainer engine with supervised update function.\n \"\"\"\n if device:\n model.to(device)\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return loss.item()\n\n return Engine(_update)\n\n\ndef create_supervised_evaluator(model, metrics={},\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch):\n \"\"\"\n Factory function for creating an evaluator for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n\n Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n if device:\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n return y_pred, y\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine\n", "path": "ignite/engine/__init__.py"}], "after_files": [{"content": "import torch\n\nfrom ignite.engine.engine import Engine, State, Events\nfrom ignite.utils import convert_tensor\n\n\ndef _prepare_batch(batch, device=None, non_blocking=False):\n \"\"\"Prepare batch for training: pass to a device with options.\n\n \"\"\"\n x, y = batch\n return (convert_tensor(x, device=device, non_blocking=non_blocking),\n convert_tensor(y, device=device, non_blocking=non_blocking))\n\n\ndef create_supervised_trainer(model, optimizer, loss_fn,\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch,\n output_transform=lambda x, y, y_pred, loss: loss.item()):\n \"\"\"\n Factory function for creating a trainer for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n optimizer (`torch.optim.Optimizer`): the optimizer to use.\n loss_fn (torch.nn loss function): the loss function to use.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.\n\n Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss\n of the processed batch by default.\n\n Returns:\n Engine: a trainer engine with supervised update function.\n \"\"\"\n if device:\n model.to(device)\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return output_transform(x, y, y_pred, loss)\n\n return Engine(_update)\n\n\ndef create_supervised_evaluator(model, metrics={},\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch,\n output_transform=lambda x, y, y_pred: (y_pred, y,)):\n \"\"\"\n Factory function for creating an evaluator for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits\n output expected by metrics. If you change it you should use `output_transform` in metrics.\n\n Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is\n a tuple of `(batch_pred, batch_y)` by default.\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n if device:\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n return output_transform(x, y, y_pred)\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine\n", "path": "ignite/engine/__init__.py"}]} | 1,590 | 728 |
gh_patches_debug_3543 | rasdani/github-patches | git_diff | beeware__toga-1634 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Source installs no longer working
#1614 made some changes to the packaging of modules to support the release package workflow.
The wheels generated from this process appear to work fine; however, source installs don't appear to be working. I've had problems on both macOS and Android.
**To Reproduce**
Steps to reproduce the behavior:
1. `briefcase run` or `briefcase run android` on Tutorial 0.
**Expected behavior**
App should start.
**Environment:**
- Operating System: macOS
- Python version: 3.10
- Software versions:
- Briefcase: 0.3.11
- Toga: 96881f093
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/web/setup.py`
Content:
```
1 #!/usr/bin/env python
2 import re
3
4 from setuptools import setup
5
6 # Version handline needs to be programatic because
7 # we can't import toga_web to compute the version;
8 # and to support versioned subpackage dependencies
9 with open('src/toga_web/__init__.py', encoding='utf8') as version_file:
10 version_match = re.search(
11 r"^__version__ = ['\"]([^'\"]*)['\"]",
12 version_file.read(),
13 re.M
14 )
15 if version_match:
16 version = version_match.group(1)
17 else:
18 raise RuntimeError("Unable to find version string.")
19
20 setup(
21 version=version,
22 install_requires=[
23 # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name
24 # toga-core is ambigous when on the package hasn't been published to
25 # PyPI. As a workaround, don't specify the dependency, and manually
26 # ensure that toga-core is installed.
27 # 'toga-core==%s' % version,
28 ],
29 )
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/web/setup.py b/src/web/setup.py
--- a/src/web/setup.py
+++ b/src/web/setup.py
@@ -20,10 +20,6 @@
setup(
version=version,
install_requires=[
- # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name
- # toga-core is ambigous when on the package hasn't been published to
- # PyPI. As a workaround, don't specify the dependency, and manually
- # ensure that toga-core is installed.
- # 'toga-core==%s' % version,
+ 'toga-core==%s' % version,
],
)
| {"golden_diff": "diff --git a/src/web/setup.py b/src/web/setup.py\n--- a/src/web/setup.py\n+++ b/src/web/setup.py\n@@ -20,10 +20,6 @@\n setup(\n version=version,\n install_requires=[\n- # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name\n- # toga-core is ambigous when on the package hasn't been published to\n- # PyPI. As a workaround, don't specify the dependency, and manually\n- # ensure that toga-core is installed.\n- # 'toga-core==%s' % version,\n+ 'toga-core==%s' % version,\n ],\n )\n", "issue": "Source installs no longer working\n#1614 made some changes to the packaging of modules to support the release package workflow.\r\n\r\nThe wheels generated from this process appear to work fine; however, source installs don't appear to be working. I've had problems on both macOS and Android.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. `briefcase run` or `briefcase run android` on Tutorial 0.\r\n\r\n**Expected behavior**\r\n\r\nApp should start.\r\n\r\n**Environment:**\r\n - Operating System: macOS\r\n - Python version: 3.10\r\n - Software versions:\r\n - Briefcase: 0.3.11\r\n - Toga: 96881f093\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_web to compute the version;\n# and to support versioned subpackage dependencies\nwith open('src/toga_web/__init__.py', encoding='utf8') as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file.read(),\n re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n # TODO: Due to https://github.com/pyodide/pyodide/issues/2408, the name\n # toga-core is ambigous when on the package hasn't been published to\n # PyPI. As a workaround, don't specify the dependency, and manually\n # ensure that toga-core is installed.\n # 'toga-core==%s' % version,\n ],\n)\n", "path": "src/web/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_web to compute the version;\n# and to support versioned subpackage dependencies\nwith open('src/toga_web/__init__.py', encoding='utf8') as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file.read(),\n re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n 'toga-core==%s' % version,\n ],\n)\n", "path": "src/web/setup.py"}]} | 694 | 159 |
gh_patches_debug_2990 | rasdani/github-patches | git_diff | aio-libs-abandoned__aioredis-py-535 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a BUSYGROUP reply error
The XGROUP CREATE command can return a BUSYGROUP error when a group already exists: https://redis.io/commands/xgroup
I think the `ReplyError` subclass for matching it would look like this:
```py
class BusyGroupError(ReplyError):
MATCH_REPLY = "BUSYGROUP Consumer Group name already exists"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `aioredis/errors.py`
Content:
```
1 __all__ = [
2 'RedisError',
3 'ProtocolError',
4 'ReplyError',
5 'MaxClientsError',
6 'AuthError',
7 'PipelineError',
8 'MultiExecError',
9 'WatchVariableError',
10 'ChannelClosedError',
11 'ConnectionClosedError',
12 'ConnectionForcedCloseError',
13 'PoolClosedError',
14 'MasterNotFoundError',
15 'SlaveNotFoundError',
16 'ReadOnlyError',
17 ]
18
19
20 class RedisError(Exception):
21 """Base exception class for aioredis exceptions."""
22
23
24 class ProtocolError(RedisError):
25 """Raised when protocol error occurs."""
26
27
28 class ReplyError(RedisError):
29 """Raised for redis error replies (-ERR)."""
30
31 MATCH_REPLY = None
32
33 def __new__(cls, msg, *args):
34 for klass in cls.__subclasses__():
35 if msg and klass.MATCH_REPLY and msg.startswith(klass.MATCH_REPLY):
36 return klass(msg, *args)
37 return super().__new__(cls, msg, *args)
38
39
40 class MaxClientsError(ReplyError):
41 """Raised for redis server when the maximum number of client has been
42 reached."""
43
44 MATCH_REPLY = "ERR max number of clients reached"
45
46
47 class AuthError(ReplyError):
48 """Raised when authentication errors occurs."""
49
50 MATCH_REPLY = ("NOAUTH ", "ERR invalid password")
51
52
53 class PipelineError(RedisError):
54 """Raised if command within pipeline raised error."""
55
56 def __init__(self, errors):
57 super().__init__('{} errors:'.format(self.__class__.__name__), errors)
58
59
60 class MultiExecError(PipelineError):
61 """Raised if command within MULTI/EXEC block caused error."""
62
63
64 class WatchVariableError(MultiExecError):
65 """Raised if watched variable changed (EXEC returns None)."""
66
67
68 class ChannelClosedError(RedisError):
69 """Raised when Pub/Sub channel is unsubscribed and messages queue is empty.
70 """
71
72
73 class ReadOnlyError(RedisError):
74 """Raised from slave when read-only mode is enabled"""
75
76
77 class MasterNotFoundError(RedisError):
78 """Raised for sentinel master not found error."""
79
80
81 class SlaveNotFoundError(RedisError):
82 """Raised for sentinel slave not found error."""
83
84
85 class MasterReplyError(RedisError):
86 """Raised by sentinel client for master error replies."""
87
88
89 class SlaveReplyError(RedisError):
90 """Raised by sentinel client for slave error replies."""
91
92
93 class ConnectionClosedError(RedisError):
94 """Raised if connection to server was closed."""
95
96
97 class ConnectionForcedCloseError(ConnectionClosedError):
98 """Raised if connection was closed with .close() method."""
99
100
101 class PoolClosedError(RedisError):
102 """Raised if pool is closed."""
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/aioredis/errors.py b/aioredis/errors.py
--- a/aioredis/errors.py
+++ b/aioredis/errors.py
@@ -50,6 +50,12 @@
MATCH_REPLY = ("NOAUTH ", "ERR invalid password")
+class BusyGroupError(ReplyError):
+ """Raised if Consumer Group name already exists."""
+
+ MATCH_REPLY = "BUSYGROUP Consumer Group name already exists"
+
+
class PipelineError(RedisError):
"""Raised if command within pipeline raised error."""
| {"golden_diff": "diff --git a/aioredis/errors.py b/aioredis/errors.py\n--- a/aioredis/errors.py\n+++ b/aioredis/errors.py\n@@ -50,6 +50,12 @@\n MATCH_REPLY = (\"NOAUTH \", \"ERR invalid password\")\n \n \n+class BusyGroupError(ReplyError):\n+ \"\"\"Raised if Consumer Group name already exists.\"\"\"\n+\n+ MATCH_REPLY = \"BUSYGROUP Consumer Group name already exists\"\n+\n+\n class PipelineError(RedisError):\n \"\"\"Raised if command within pipeline raised error.\"\"\"\n", "issue": "Add a BUSYGROUP reply error\nThe XGROUP CREATE command can return a BUSYGROUP error when a group already exists: https://redis.io/commands/xgroup\r\n\r\nI think the `ReplyError` subclass for matching it would look like this:\r\n\r\n```py\r\nclass BusyGroupError(ReplyError):\r\n MATCH_REPLY = \"BUSYGROUP Consumer Group name already exists\"\r\n```\n", "before_files": [{"content": "__all__ = [\n 'RedisError',\n 'ProtocolError',\n 'ReplyError',\n 'MaxClientsError',\n 'AuthError',\n 'PipelineError',\n 'MultiExecError',\n 'WatchVariableError',\n 'ChannelClosedError',\n 'ConnectionClosedError',\n 'ConnectionForcedCloseError',\n 'PoolClosedError',\n 'MasterNotFoundError',\n 'SlaveNotFoundError',\n 'ReadOnlyError',\n ]\n\n\nclass RedisError(Exception):\n \"\"\"Base exception class for aioredis exceptions.\"\"\"\n\n\nclass ProtocolError(RedisError):\n \"\"\"Raised when protocol error occurs.\"\"\"\n\n\nclass ReplyError(RedisError):\n \"\"\"Raised for redis error replies (-ERR).\"\"\"\n\n MATCH_REPLY = None\n\n def __new__(cls, msg, *args):\n for klass in cls.__subclasses__():\n if msg and klass.MATCH_REPLY and msg.startswith(klass.MATCH_REPLY):\n return klass(msg, *args)\n return super().__new__(cls, msg, *args)\n\n\nclass MaxClientsError(ReplyError):\n \"\"\"Raised for redis server when the maximum number of client has been\n reached.\"\"\"\n\n MATCH_REPLY = \"ERR max number of clients reached\"\n\n\nclass AuthError(ReplyError):\n \"\"\"Raised when authentication errors occurs.\"\"\"\n\n MATCH_REPLY = (\"NOAUTH \", \"ERR invalid password\")\n\n\nclass PipelineError(RedisError):\n \"\"\"Raised if command within pipeline raised error.\"\"\"\n\n def __init__(self, errors):\n super().__init__('{} errors:'.format(self.__class__.__name__), errors)\n\n\nclass MultiExecError(PipelineError):\n \"\"\"Raised if command within MULTI/EXEC block caused error.\"\"\"\n\n\nclass WatchVariableError(MultiExecError):\n \"\"\"Raised if watched variable changed (EXEC returns None).\"\"\"\n\n\nclass ChannelClosedError(RedisError):\n \"\"\"Raised when Pub/Sub channel is unsubscribed and messages queue is empty.\n \"\"\"\n\n\nclass ReadOnlyError(RedisError):\n \"\"\"Raised from slave when read-only mode is enabled\"\"\"\n\n\nclass MasterNotFoundError(RedisError):\n \"\"\"Raised for sentinel master not found error.\"\"\"\n\n\nclass SlaveNotFoundError(RedisError):\n \"\"\"Raised for sentinel slave not found error.\"\"\"\n\n\nclass MasterReplyError(RedisError):\n \"\"\"Raised by sentinel client for master error replies.\"\"\"\n\n\nclass SlaveReplyError(RedisError):\n \"\"\"Raised by sentinel client for slave error replies.\"\"\"\n\n\nclass ConnectionClosedError(RedisError):\n \"\"\"Raised if connection to server was closed.\"\"\"\n\n\nclass ConnectionForcedCloseError(ConnectionClosedError):\n \"\"\"Raised if connection was closed with .close() method.\"\"\"\n\n\nclass PoolClosedError(RedisError):\n \"\"\"Raised if pool is closed.\"\"\"\n", "path": "aioredis/errors.py"}], "after_files": [{"content": "__all__ = [\n 'RedisError',\n 'ProtocolError',\n 'ReplyError',\n 'MaxClientsError',\n 'AuthError',\n 'PipelineError',\n 'MultiExecError',\n 'WatchVariableError',\n 'ChannelClosedError',\n 'ConnectionClosedError',\n 'ConnectionForcedCloseError',\n 'PoolClosedError',\n 'MasterNotFoundError',\n 'SlaveNotFoundError',\n 'ReadOnlyError',\n ]\n\n\nclass RedisError(Exception):\n \"\"\"Base exception class for aioredis exceptions.\"\"\"\n\n\nclass ProtocolError(RedisError):\n \"\"\"Raised when protocol error occurs.\"\"\"\n\n\nclass ReplyError(RedisError):\n \"\"\"Raised for redis error replies (-ERR).\"\"\"\n\n MATCH_REPLY = None\n\n def __new__(cls, msg, *args):\n for klass in cls.__subclasses__():\n if msg and klass.MATCH_REPLY and msg.startswith(klass.MATCH_REPLY):\n return klass(msg, *args)\n return super().__new__(cls, msg, *args)\n\n\nclass MaxClientsError(ReplyError):\n \"\"\"Raised for redis server when the maximum number of client has been\n reached.\"\"\"\n\n MATCH_REPLY = \"ERR max number of clients reached\"\n\n\nclass AuthError(ReplyError):\n \"\"\"Raised when authentication errors occurs.\"\"\"\n\n MATCH_REPLY = (\"NOAUTH \", \"ERR invalid password\")\n\n\nclass BusyGroupError(ReplyError):\n \"\"\"Raised if Consumer Group name already exists.\"\"\"\n\n MATCH_REPLY = \"BUSYGROUP Consumer Group name already exists\"\n\n\nclass PipelineError(RedisError):\n \"\"\"Raised if command within pipeline raised error.\"\"\"\n\n def __init__(self, errors):\n super().__init__('{} errors:'.format(self.__class__.__name__), errors)\n\n\nclass MultiExecError(PipelineError):\n \"\"\"Raised if command within MULTI/EXEC block caused error.\"\"\"\n\n\nclass WatchVariableError(MultiExecError):\n \"\"\"Raised if watched variable changed (EXEC returns None).\"\"\"\n\n\nclass ChannelClosedError(RedisError):\n \"\"\"Raised when Pub/Sub channel is unsubscribed and messages queue is empty.\n \"\"\"\n\n\nclass ReadOnlyError(RedisError):\n \"\"\"Raised from slave when read-only mode is enabled\"\"\"\n\n\nclass MasterNotFoundError(RedisError):\n \"\"\"Raised for sentinel master not found error.\"\"\"\n\n\nclass SlaveNotFoundError(RedisError):\n \"\"\"Raised for sentinel slave not found error.\"\"\"\n\n\nclass MasterReplyError(RedisError):\n \"\"\"Raised by sentinel client for master error replies.\"\"\"\n\n\nclass SlaveReplyError(RedisError):\n \"\"\"Raised by sentinel client for slave error replies.\"\"\"\n\n\nclass ConnectionClosedError(RedisError):\n \"\"\"Raised if connection to server was closed.\"\"\"\n\n\nclass ConnectionForcedCloseError(ConnectionClosedError):\n \"\"\"Raised if connection was closed with .close() method.\"\"\"\n\n\nclass PoolClosedError(RedisError):\n \"\"\"Raised if pool is closed.\"\"\"\n", "path": "aioredis/errors.py"}]} | 1,107 | 118 |
gh_patches_debug_1898 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1813 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OpenTelemetry distro as a default distro for OpenTelemetry Instrumentation
The `opentelemetry-instrumentation` auto instrumentation doesn't work without installing `opentelemetry-distro` as the components initialisation is done in distro package. How does a regular user know about this and shouldn't openetemetry distro be the default and can give an option to let user use others?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/getting_started/otlpcollector_example.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # otcollector.py
16 import time
17
18 from opentelemetry import trace
19 from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
20 OTLPSpanExporter,
21 )
22 from opentelemetry.sdk.trace import TracerProvider
23 from opentelemetry.sdk.trace.export import BatchSpanProcessor
24
25 span_exporter = OTLPSpanExporter(
26 # optional
27 # endpoint:="myCollectorURL:4317",
28 # credentials=ChannelCredentials(credentials),
29 # headers=(("metadata", "metadata")),
30 )
31 tracer_provider = TracerProvider()
32 trace.set_tracer_provider(tracer_provider)
33 span_processor = BatchSpanProcessor(span_exporter)
34 tracer_provider.add_span_processor(span_processor)
35
36 # Configure the tracer to use the collector exporter
37 tracer = trace.get_tracer_provider().get_tracer(__name__)
38
39 with tracer.start_as_current_span("foo"):
40 print("Hello world!")
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/getting_started/otlpcollector_example.py b/docs/getting_started/otlpcollector_example.py
--- a/docs/getting_started/otlpcollector_example.py
+++ b/docs/getting_started/otlpcollector_example.py
@@ -24,7 +24,7 @@
span_exporter = OTLPSpanExporter(
# optional
- # endpoint:="myCollectorURL:4317",
+ # endpoint="myCollectorURL:4317",
# credentials=ChannelCredentials(credentials),
# headers=(("metadata", "metadata")),
)
| {"golden_diff": "diff --git a/docs/getting_started/otlpcollector_example.py b/docs/getting_started/otlpcollector_example.py\n--- a/docs/getting_started/otlpcollector_example.py\n+++ b/docs/getting_started/otlpcollector_example.py\n@@ -24,7 +24,7 @@\n \n span_exporter = OTLPSpanExporter(\n # optional\n- # endpoint:=\"myCollectorURL:4317\",\n+ # endpoint=\"myCollectorURL:4317\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n )\n", "issue": "OpenTelemetry distro as a default distro for OpenTelemetry Instrumentation\nThe `opentelemetry-instrumentation` auto instrumentation doesn't work without installing `opentelemetry-distro` as the components initialisation is done in distro package. How does a regular user know about this and shouldn't openetemetry distro be the default and can give an option to let user use others? \n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# otcollector.py\nimport time\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (\n OTLPSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nspan_exporter = OTLPSpanExporter(\n # optional\n # endpoint:=\"myCollectorURL:4317\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n)\ntracer_provider = TracerProvider()\ntrace.set_tracer_provider(tracer_provider)\nspan_processor = BatchSpanProcessor(span_exporter)\ntracer_provider.add_span_processor(span_processor)\n\n# Configure the tracer to use the collector exporter\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n", "path": "docs/getting_started/otlpcollector_example.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# otcollector.py\nimport time\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (\n OTLPSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nspan_exporter = OTLPSpanExporter(\n # optional\n # endpoint=\"myCollectorURL:4317\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n)\ntracer_provider = TracerProvider()\ntrace.set_tracer_provider(tracer_provider)\nspan_processor = BatchSpanProcessor(span_exporter)\ntracer_provider.add_span_processor(span_processor)\n\n# Configure the tracer to use the collector exporter\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n", "path": "docs/getting_started/otlpcollector_example.py"}]} | 735 | 127 |
gh_patches_debug_22463 | rasdani/github-patches | git_diff | feast-dev__feast-3514 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feast ui does not work on proxy subpath
## Expected Behavior
Feast UI should work when it is served behind a proxy, on a subpath e.g. `/feast-ui`
## Current Behavior
Parts of the feast UI works behind a subpath, but not entirely (nothing is displayed, just the feast logo with a "404" text - refer to screenshot). No requests in the network tab of the web browser are hitting 404.

## Steps to reproduce
Serve feast UI as you would e.g. `feature_store.serve_ui()`, optionally passing in the `root_path` parameter (it does not help).
Set up an nginx pod with the following configuration (i.e. the nginx pod should have `/etc/nginx/conf.d/default.conf` with the following contents - `dummy_project` is the project name, and `http://feast-ui-service:8080` is where the feast UI can be accessed from your nginx pod / container):
```
server {
listen 80 default_server;
location = /feast-ui/ {
rewrite (.*) /feast-ui/p/dummy_project permanent;
}
location /feast-ui/ {
proxy_pass http://feast-ui-service:8080/;
}
location / {
proxy_pass http://feast-ui-service:8080/;
}
}
```
This configuration works on localhost when nginx can listen on the root path `/`. However, note that the URL after all the redirects is wrong (it does not have the prefix).
- The first block is required to force a redirect to the `/p/{project_name}`. Without this, the page will display 404 as above.
- The second block is required to strip away `/feast-ui` so the UI app does not receive that path that it is not aware of
- The third block is a trick to make this setup work in a local environment, because the app itself will redirect the user back to `/p/dummy_project` (without the prefix), which we then proxy into the feast UI app. However, in an actual environment, this setup does not work, because when the url does not contain the `/feast-ui` prefix, the ingress will not route it to the nginx pod, so the nginx pod cannot proxy the connection to the right place.
Ideally, if the feast ui app is capable of being served on a subpath, only the second `location` block should be required in the nginx configuration. The first and third `location` blocks are workarounds.
### Specifications
- Version: 0.29.0
## Possible Solution
The app should redirect to relative and not absolute paths
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/ui_server.py`
Content:
```
1 import json
2 import threading
3 from typing import Callable, Optional
4
5 import pkg_resources
6 import uvicorn
7 from fastapi import FastAPI, Response
8 from fastapi.middleware.cors import CORSMiddleware
9 from fastapi.staticfiles import StaticFiles
10
11 import feast
12
13
14 def get_app(
15 store: "feast.FeatureStore",
16 get_registry_dump: Callable,
17 project_id: str,
18 registry_ttl_secs: int,
19 host: str,
20 port: int,
21 ):
22 app = FastAPI()
23
24 app.add_middleware(
25 CORSMiddleware,
26 allow_origins=["*"],
27 allow_credentials=True,
28 allow_methods=["*"],
29 allow_headers=["*"],
30 )
31
32 # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down
33 registry_proto = None
34 shutting_down = False
35 active_timer: Optional[threading.Timer] = None
36
37 def async_refresh():
38 store.refresh_registry()
39 nonlocal registry_proto
40 registry_proto = store.registry.proto()
41 if shutting_down:
42 return
43 nonlocal active_timer
44 active_timer = threading.Timer(registry_ttl_secs, async_refresh)
45 active_timer.start()
46
47 @app.on_event("shutdown")
48 def shutdown_event():
49 nonlocal shutting_down
50 shutting_down = True
51 if active_timer:
52 active_timer.cancel()
53
54 async_refresh()
55
56 ui_dir = pkg_resources.resource_filename(__name__, "ui/build/")
57 # Initialize with the projects-list.json file
58 with open(ui_dir + "projects-list.json", mode="w") as f:
59 projects_dict = {
60 "projects": [
61 {
62 "name": "Project",
63 "description": "Test project",
64 "id": project_id,
65 "registryPath": "/registry",
66 }
67 ]
68 }
69 f.write(json.dumps(projects_dict))
70
71 @app.get("/registry")
72 def read_registry():
73 return Response(
74 content=registry_proto.SerializeToString(),
75 media_type="application/octet-stream",
76 )
77
78 # For all other paths (such as paths that would otherwise be handled by react router), pass to React
79 @app.api_route("/p/{path_name:path}", methods=["GET"])
80 def catch_all():
81 filename = ui_dir + "index.html"
82
83 with open(filename) as f:
84 content = f.read()
85
86 return Response(content, media_type="text/html")
87
88 app.mount(
89 "/",
90 StaticFiles(directory=ui_dir, html=True),
91 name="site",
92 )
93
94 return app
95
96
97 def start_server(
98 store: "feast.FeatureStore",
99 host: str,
100 port: int,
101 get_registry_dump: Callable,
102 project_id: str,
103 registry_ttl_sec: int,
104 root_path: str = "",
105 ):
106 app = get_app(
107 store,
108 get_registry_dump,
109 project_id,
110 registry_ttl_sec,
111 host,
112 port,
113 )
114 assert root_path is not None
115 uvicorn.run(app, host=host, port=port, root_path=root_path)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py
--- a/sdk/python/feast/ui_server.py
+++ b/sdk/python/feast/ui_server.py
@@ -13,11 +13,9 @@
def get_app(
store: "feast.FeatureStore",
- get_registry_dump: Callable,
project_id: str,
registry_ttl_secs: int,
- host: str,
- port: int,
+ root_path: str = "",
):
app = FastAPI()
@@ -62,7 +60,7 @@
"name": "Project",
"description": "Test project",
"id": project_id,
- "registryPath": "/registry",
+ "registryPath": f"{root_path}/registry",
}
]
}
@@ -105,11 +103,8 @@
):
app = get_app(
store,
- get_registry_dump,
project_id,
registry_ttl_sec,
- host,
- port,
+ root_path,
)
- assert root_path is not None
- uvicorn.run(app, host=host, port=port, root_path=root_path)
+ uvicorn.run(app, host=host, port=port)
| {"golden_diff": "diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py\n--- a/sdk/python/feast/ui_server.py\n+++ b/sdk/python/feast/ui_server.py\n@@ -13,11 +13,9 @@\n \n def get_app(\n store: \"feast.FeatureStore\",\n- get_registry_dump: Callable,\n project_id: str,\n registry_ttl_secs: int,\n- host: str,\n- port: int,\n+ root_path: str = \"\",\n ):\n app = FastAPI()\n \n@@ -62,7 +60,7 @@\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n- \"registryPath\": \"/registry\",\n+ \"registryPath\": f\"{root_path}/registry\",\n }\n ]\n }\n@@ -105,11 +103,8 @@\n ):\n app = get_app(\n store,\n- get_registry_dump,\n project_id,\n registry_ttl_sec,\n- host,\n- port,\n+ root_path,\n )\n- assert root_path is not None\n- uvicorn.run(app, host=host, port=port, root_path=root_path)\n+ uvicorn.run(app, host=host, port=port)\n", "issue": "feast ui does not work on proxy subpath\n## Expected Behavior \r\n\r\nFeast UI should work when it is served behind a proxy, on a subpath e.g. `/feast-ui`\r\n\r\n## Current Behavior\r\n\r\nParts of the feast UI works behind a subpath, but not entirely (nothing is displayed, just the feast logo with a \"404\" text - refer to screenshot). No requests in the network tab of the web browser are hitting 404.\r\n\r\n\r\n\r\n## Steps to reproduce\r\n\r\nServe feast UI as you would e.g. `feature_store.serve_ui()`, optionally passing in the `root_path` parameter (it does not help).\r\n\r\nSet up an nginx pod with the following configuration (i.e. the nginx pod should have `/etc/nginx/conf.d/default.conf` with the following contents - `dummy_project` is the project name, and `http://feast-ui-service:8080` is where the feast UI can be accessed from your nginx pod / container):\r\n\r\n```\r\nserver {\r\n listen 80 default_server;\r\n\r\n location = /feast-ui/ {\r\n rewrite (.*) /feast-ui/p/dummy_project permanent;\r\n }\r\n\r\n location /feast-ui/ {\r\n proxy_pass http://feast-ui-service:8080/;\r\n }\r\n\r\n location / {\r\n proxy_pass http://feast-ui-service:8080/;\r\n }\r\n}\r\n```\r\n\r\nThis configuration works on localhost when nginx can listen on the root path `/`. However, note that the URL after all the redirects is wrong (it does not have the prefix).\r\n\r\n- The first block is required to force a redirect to the `/p/{project_name}`. Without this, the page will display 404 as above.\r\n- The second block is required to strip away `/feast-ui` so the UI app does not receive that path that it is not aware of\r\n- The third block is a trick to make this setup work in a local environment, because the app itself will redirect the user back to `/p/dummy_project` (without the prefix), which we then proxy into the feast UI app. However, in an actual environment, this setup does not work, because when the url does not contain the `/feast-ui` prefix, the ingress will not route it to the nginx pod, so the nginx pod cannot proxy the connection to the right place.\r\n\r\nIdeally, if the feast ui app is capable of being served on a subpath, only the second `location` block should be required in the nginx configuration. The first and third `location` blocks are workarounds.\r\n\r\n### Specifications\r\n\r\n- Version: 0.29.0\r\n\r\n## Possible Solution\r\n\r\nThe app should redirect to relative and not absolute paths\n", "before_files": [{"content": "import json\nimport threading\nfrom typing import Callable, Optional\n\nimport pkg_resources\nimport uvicorn\nfrom fastapi import FastAPI, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.staticfiles import StaticFiles\n\nimport feast\n\n\ndef get_app(\n store: \"feast.FeatureStore\",\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_secs: int,\n host: str,\n port: int,\n):\n app = FastAPI()\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down\n registry_proto = None\n shutting_down = False\n active_timer: Optional[threading.Timer] = None\n\n def async_refresh():\n store.refresh_registry()\n nonlocal registry_proto\n registry_proto = store.registry.proto()\n if shutting_down:\n return\n nonlocal active_timer\n active_timer = threading.Timer(registry_ttl_secs, async_refresh)\n active_timer.start()\n\n @app.on_event(\"shutdown\")\n def shutdown_event():\n nonlocal shutting_down\n shutting_down = True\n if active_timer:\n active_timer.cancel()\n\n async_refresh()\n\n ui_dir = pkg_resources.resource_filename(__name__, \"ui/build/\")\n # Initialize with the projects-list.json file\n with open(ui_dir + \"projects-list.json\", mode=\"w\") as f:\n projects_dict = {\n \"projects\": [\n {\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n \"registryPath\": \"/registry\",\n }\n ]\n }\n f.write(json.dumps(projects_dict))\n\n @app.get(\"/registry\")\n def read_registry():\n return Response(\n content=registry_proto.SerializeToString(),\n media_type=\"application/octet-stream\",\n )\n\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n filename = ui_dir + \"index.html\"\n\n with open(filename) as f:\n content = f.read()\n\n return Response(content, media_type=\"text/html\")\n\n app.mount(\n \"/\",\n StaticFiles(directory=ui_dir, html=True),\n name=\"site\",\n )\n\n return app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_sec: int,\n root_path: str = \"\",\n):\n app = get_app(\n store,\n get_registry_dump,\n project_id,\n registry_ttl_sec,\n host,\n port,\n )\n assert root_path is not None\n uvicorn.run(app, host=host, port=port, root_path=root_path)\n", "path": "sdk/python/feast/ui_server.py"}], "after_files": [{"content": "import json\nimport threading\nfrom typing import Callable, Optional\n\nimport pkg_resources\nimport uvicorn\nfrom fastapi import FastAPI, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.staticfiles import StaticFiles\n\nimport feast\n\n\ndef get_app(\n store: \"feast.FeatureStore\",\n project_id: str,\n registry_ttl_secs: int,\n root_path: str = \"\",\n):\n app = FastAPI()\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down\n registry_proto = None\n shutting_down = False\n active_timer: Optional[threading.Timer] = None\n\n def async_refresh():\n store.refresh_registry()\n nonlocal registry_proto\n registry_proto = store.registry.proto()\n if shutting_down:\n return\n nonlocal active_timer\n active_timer = threading.Timer(registry_ttl_secs, async_refresh)\n active_timer.start()\n\n @app.on_event(\"shutdown\")\n def shutdown_event():\n nonlocal shutting_down\n shutting_down = True\n if active_timer:\n active_timer.cancel()\n\n async_refresh()\n\n ui_dir = pkg_resources.resource_filename(__name__, \"ui/build/\")\n # Initialize with the projects-list.json file\n with open(ui_dir + \"projects-list.json\", mode=\"w\") as f:\n projects_dict = {\n \"projects\": [\n {\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n \"registryPath\": f\"{root_path}/registry\",\n }\n ]\n }\n f.write(json.dumps(projects_dict))\n\n @app.get(\"/registry\")\n def read_registry():\n return Response(\n content=registry_proto.SerializeToString(),\n media_type=\"application/octet-stream\",\n )\n\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n filename = ui_dir + \"index.html\"\n\n with open(filename) as f:\n content = f.read()\n\n return Response(content, media_type=\"text/html\")\n\n app.mount(\n \"/\",\n StaticFiles(directory=ui_dir, html=True),\n name=\"site\",\n )\n\n return app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_sec: int,\n root_path: str = \"\",\n):\n app = get_app(\n store,\n project_id,\n registry_ttl_sec,\n root_path,\n )\n uvicorn.run(app, host=host, port=port)\n", "path": "sdk/python/feast/ui_server.py"}]} | 1,779 | 281 |
gh_patches_debug_18421 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2512 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
More 504s: on the results framework page
@Geerts reports on Skype: 504 Gateway timeout hunter strikes again: http://rsr.test.akvo.org/rest/v1/indicator_period_data_framework/?format=json&period__indicator__result__project=2780
Via: http://rsr.test.akvo.org/en/myrsr/results/2780/#results,13323,5679
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/indicator_period_data.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment
9
10 from ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,
11 IndicatorPeriodDataCommentSerializer)
12 from ..viewsets import PublicProjectViewSet
13
14 from django.http import HttpResponseForbidden
15
16 from rest_framework import status
17 from rest_framework.decorators import api_view, permission_classes
18 from rest_framework.response import Response
19
20
21 class IndicatorPeriodDataViewSet(PublicProjectViewSet):
22 """
23 """
24 queryset = IndicatorPeriodData.objects.all()
25 serializer_class = IndicatorPeriodDataSerializer
26
27 project_relation = 'period__indicator__result__project__'
28
29
30 class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):
31 """
32 """
33 queryset = IndicatorPeriodData.objects.all()
34 serializer_class = IndicatorPeriodDataFrameworkSerializer
35 project_relation = 'period__indicator__result__project__'
36
37
38 class IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):
39 """
40 """
41 queryset = IndicatorPeriodDataComment.objects.all()
42 serializer_class = IndicatorPeriodDataCommentSerializer
43 project_relation = 'data__period__indicator__result__project__'
44
45
46 @api_view(['POST'])
47 def indicator_upload_file(request, pk=None):
48 """
49 Special API call for directly uploading a file.
50
51 :param request; A Django request object.
52 :param pk; The primary key of an IndicatorPeriodData instance.
53 """
54 update = IndicatorPeriodData.objects.get(pk=pk)
55 upload_file = request.data['file']
56
57 # Permissions
58 user = getattr(request, 'user', None)
59 if not user:
60 return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)
61
62 # TODO: Check if user is allowed to upload a file
63 # if not user.has_perm('rsr.change_project', update.period.indicator.result.project):
64 # return Response({'error': 'User has no permission to place an update'},
65 # status=status.HTTP_403_FORBIDDEN)
66
67 try:
68 file_type = request.POST.copy()['type']
69 if file_type == 'photo':
70 update.photo = upload_file
71 update.save(update_fields=['photo'])
72 return Response({'file': update.photo.url})
73 elif file_type == 'file':
74 update.file = upload_file
75 update.save(update_fields=['file'])
76 return Response({'file': update.file.url})
77 except Exception as e:
78 return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)
79
```
Path: `akvo/rest/views/partnership.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import Partnership
9
10 from ..serializers import PartnershipSerializer, PartnershipBasicSerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class PartnershipViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = Partnership.objects.all()
18 serializer_class = PartnershipSerializer
19
20 def get_queryset(self):
21 """Allow filtering on partner_type."""
22 partner_type = self.request.query_params.get('partner_type', None)
23 if partner_type and partner_type in Partnership.PARTNER_TYPES_TO_ROLES_MAP.keys():
24 self.queryset = self.queryset.filter(
25 iati_organisation_role=Partnership.PARTNER_TYPES_TO_ROLES_MAP[partner_type]
26 ).distinct()
27 return super(PartnershipViewSet, self).get_queryset()
28
29
30 class PartnershipMoreLinkViewSet(PublicProjectViewSet):
31 """
32 Specific endpoint for the '+X partners' links in RSR. Contains the name, long name and logo of
33 an organisation and the partnership role.
34 """
35 queryset = Partnership.objects.all()
36 serializer_class = PartnershipBasicSerializer
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py
--- a/akvo/rest/views/indicator_period_data.py
+++ b/akvo/rest/views/indicator_period_data.py
@@ -30,7 +30,13 @@
class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):
"""
"""
- queryset = IndicatorPeriodData.objects.all()
+ queryset = IndicatorPeriodData.objects.select_related(
+ 'period',
+ 'user'
+ ).prefetch_related(
+ 'comments',
+ 'comments__user'
+ ).all()
serializer_class = IndicatorPeriodDataFrameworkSerializer
project_relation = 'period__indicator__result__project__'
diff --git a/akvo/rest/views/partnership.py b/akvo/rest/views/partnership.py
--- a/akvo/rest/views/partnership.py
+++ b/akvo/rest/views/partnership.py
@@ -14,7 +14,7 @@
class PartnershipViewSet(PublicProjectViewSet):
"""
"""
- queryset = Partnership.objects.all()
+ queryset = Partnership.objects.select_related('organisation', 'project').all()
serializer_class = PartnershipSerializer
def get_queryset(self):
| {"golden_diff": "diff --git a/akvo/rest/views/indicator_period_data.py b/akvo/rest/views/indicator_period_data.py\n--- a/akvo/rest/views/indicator_period_data.py\n+++ b/akvo/rest/views/indicator_period_data.py\n@@ -30,7 +30,13 @@\n class IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n- queryset = IndicatorPeriodData.objects.all()\n+ queryset = IndicatorPeriodData.objects.select_related(\n+ 'period',\n+ 'user'\n+ ).prefetch_related(\n+ 'comments',\n+ 'comments__user'\n+ ).all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n \ndiff --git a/akvo/rest/views/partnership.py b/akvo/rest/views/partnership.py\n--- a/akvo/rest/views/partnership.py\n+++ b/akvo/rest/views/partnership.py\n@@ -14,7 +14,7 @@\n class PartnershipViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n- queryset = Partnership.objects.all()\n+ queryset = Partnership.objects.select_related('organisation', 'project').all()\n serializer_class = PartnershipSerializer\n \n def get_queryset(self):\n", "issue": "More 504s: on the results framework page\n@Geerts reports on Skype: 504 Gateway timeout hunter strikes again: http://rsr.test.akvo.org/rest/v1/indicator_period_data_framework/?format=json&period__indicator__result__project=2780\r\n\r\nVia: http://rsr.test.akvo.org/en/myrsr/results/2780/#results,13323,5679\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom django.http import HttpResponseForbidden\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataSerializer\n\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodDataComment.objects.all()\n serializer_class = IndicatorPeriodDataCommentSerializer\n project_relation = 'data__period__indicator__result__project__'\n\n\n@api_view(['POST'])\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.data['file']\n\n # Permissions\n user = getattr(request, 'user', None)\n if not user:\n return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n\n # TODO: Check if user is allowed to upload a file\n # if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n # return Response({'error': 'User has no permission to place an update'},\n # status=status.HTTP_403_FORBIDDEN)\n\n try:\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n return Response({'file': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n", "path": "akvo/rest/views/indicator_period_data.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import Partnership\n\nfrom ..serializers import PartnershipSerializer, PartnershipBasicSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass PartnershipViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = Partnership.objects.all()\n serializer_class = PartnershipSerializer\n\n def get_queryset(self):\n \"\"\"Allow filtering on partner_type.\"\"\"\n partner_type = self.request.query_params.get('partner_type', None)\n if partner_type and partner_type in Partnership.PARTNER_TYPES_TO_ROLES_MAP.keys():\n self.queryset = self.queryset.filter(\n iati_organisation_role=Partnership.PARTNER_TYPES_TO_ROLES_MAP[partner_type]\n ).distinct()\n return super(PartnershipViewSet, self).get_queryset()\n\n\nclass PartnershipMoreLinkViewSet(PublicProjectViewSet):\n \"\"\"\n Specific endpoint for the '+X partners' links in RSR. Contains the name, long name and logo of\n an organisation and the partnership role.\n \"\"\"\n queryset = Partnership.objects.all()\n serializer_class = PartnershipBasicSerializer\n", "path": "akvo/rest/views/partnership.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorPeriodData, IndicatorPeriodDataComment\n\nfrom ..serializers import (IndicatorPeriodDataSerializer, IndicatorPeriodDataFrameworkSerializer,\n IndicatorPeriodDataCommentSerializer)\nfrom ..viewsets import PublicProjectViewSet\n\nfrom django.http import HttpResponseForbidden\n\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\n\nclass IndicatorPeriodDataViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.all()\n serializer_class = IndicatorPeriodDataSerializer\n\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataFrameworkViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodData.objects.select_related(\n 'period',\n 'user'\n ).prefetch_related(\n 'comments',\n 'comments__user'\n ).all()\n serializer_class = IndicatorPeriodDataFrameworkSerializer\n project_relation = 'period__indicator__result__project__'\n\n\nclass IndicatorPeriodDataCommentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorPeriodDataComment.objects.all()\n serializer_class = IndicatorPeriodDataCommentSerializer\n project_relation = 'data__period__indicator__result__project__'\n\n\n@api_view(['POST'])\ndef indicator_upload_file(request, pk=None):\n \"\"\"\n Special API call for directly uploading a file.\n\n :param request; A Django request object.\n :param pk; The primary key of an IndicatorPeriodData instance.\n \"\"\"\n update = IndicatorPeriodData.objects.get(pk=pk)\n upload_file = request.data['file']\n\n # Permissions\n user = getattr(request, 'user', None)\n if not user:\n return Response({'error': 'User is not logged in'}, status=status.HTTP_403_FORBIDDEN)\n\n # TODO: Check if user is allowed to upload a file\n # if not user.has_perm('rsr.change_project', update.period.indicator.result.project):\n # return Response({'error': 'User has no permission to place an update'},\n # status=status.HTTP_403_FORBIDDEN)\n\n try:\n file_type = request.POST.copy()['type']\n if file_type == 'photo':\n update.photo = upload_file\n update.save(update_fields=['photo'])\n return Response({'file': update.photo.url})\n elif file_type == 'file':\n update.file = upload_file\n update.save(update_fields=['file'])\n return Response({'file': update.file.url})\n except Exception as e:\n return Response({'error': str(e)}, status=status.HTTP_400_BAD_REQUEST)\n", "path": "akvo/rest/views/indicator_period_data.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import Partnership\n\nfrom ..serializers import PartnershipSerializer, PartnershipBasicSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass PartnershipViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = Partnership.objects.select_related('organisation', 'project').all()\n serializer_class = PartnershipSerializer\n\n def get_queryset(self):\n \"\"\"Allow filtering on partner_type.\"\"\"\n partner_type = self.request.query_params.get('partner_type', None)\n if partner_type and partner_type in Partnership.PARTNER_TYPES_TO_ROLES_MAP.keys():\n self.queryset = self.queryset.filter(\n iati_organisation_role=Partnership.PARTNER_TYPES_TO_ROLES_MAP[partner_type]\n ).distinct()\n return super(PartnershipViewSet, self).get_queryset()\n\n\nclass PartnershipMoreLinkViewSet(PublicProjectViewSet):\n \"\"\"\n Specific endpoint for the '+X partners' links in RSR. Contains the name, long name and logo of\n an organisation and the partnership role.\n \"\"\"\n queryset = Partnership.objects.all()\n serializer_class = PartnershipBasicSerializer\n", "path": "akvo/rest/views/partnership.py"}]} | 1,483 | 269 |
gh_patches_debug_33321 | rasdani/github-patches | git_diff | fedora-infra__bodhi-3173 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support Sourceware bugs in Fedora enhanced markdown
Many key projects reside on sourceware.org including glibc, gdb, binutils, elfutils, libabigail, systemtap etc.
Could you please add markdown support for sourceware.org bugzilla (https://sourceware.org/bugzilla/)?
I suggest a unified markup of SWBZ#XXXX or SW#XXXX for all projects on the main sourceware bugzilla instance.
Likewise gcc compiler bugs are also on sourceware but use a distinct instance (https://gcc.gnu.org/bugzilla/)
I suggest a markup of GCC#XXXX for gcc bugs.
Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/ffmarkdown.py`
Content:
```
1 # Copyright © 2014-2019 Red Hat, Inc. and others.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
18 # USA.
19 """
20 Fedora-flavored Markdown.
21
22 Author: Ralph Bean <[email protected]>
23 """
24
25 from markdown.extensions import Extension
26 import markdown.inlinepatterns
27 import markdown.postprocessors
28 import markdown.util
29 import pyramid.threadlocal
30
31 from bodhi import MENTION_RE
32
33
34 BUGZILLA_RE = r'([a-zA-Z]+)(#[0-9]{5,})'
35
36
37 def user_url(name):
38 """
39 Return a URL to the given username.
40
41 Args:
42 name (basestring): The username of the user we want a URL for.
43 Returns:
44 basestring: A URL to the requested user.
45 """
46 request = pyramid.threadlocal.get_current_request()
47 return request.route_url('user', name=name)
48
49
50 def bug_url(tracker, idx):
51 """
52 Return the URL for the given bug.
53
54 Args:
55 tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',
56 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'.
57 idx (basestring or int): The bug number.
58 Returns:
59 basestring: The URL of the given bug.
60 Raises:
61 KeyError: If the given tracker is not supported by this function.
62 """
63 try:
64 return {
65 'fedora': "https://bugzilla.redhat.com/show_bug.cgi?id=%s",
66 'gnome': "https://bugzilla.gnome.org/show_bug.cgi?id=%s",
67 'kde': "https://bugs.kde.org/show_bug.cgi?id=%s",
68 'mozilla': "https://bugzilla.mozilla.org/show_bug.cgi?id=%s",
69 'pear': "http://pear.php.net/bugs/bug.php?id=%s",
70 'perl': "https://rt.cpan.org/Public/Bug/Display.html?id=%s",
71 'php': "https://bugs.php.net/bug.php?id=%s",
72 'python': "https://bugs.python.org/issue%s",
73 'rh': "https://bugzilla.redhat.com/show_bug.cgi?id=%s",
74 'rhbz': "https://bugzilla.redhat.com/show_bug.cgi?id=%s"}[tracker.lower()] % idx
75
76 except KeyError:
77 return None
78
79
80 class MentionPattern(markdown.inlinepatterns.Pattern):
81 """Match username mentions and point to their profiles."""
82
83 def handleMatch(self, m):
84 """
85 Build and return an Element that links to the matched User's profile.
86
87 Args:
88 m (re.MatchObject): The regex match on the username.
89 Return:
90 xml.etree.Element: An html anchor referencing the user's profile.
91 """
92 el = markdown.util.etree.Element("a")
93 name = markdown.util.AtomicString(m.group(2))
94 el.set('href', user_url(name[1:]))
95 el.text = name
96 return el
97
98
99 class BugzillaPattern(markdown.inlinepatterns.Pattern):
100 """Match bug tracker patterns."""
101
102 def handleMatch(self, m):
103 """
104 Build and return an Element that links to the referenced bug.
105
106 Args:
107 m (re.MatchObject): The regex match on the bug.
108 Returns:
109 xml.etree.Element: An html anchor referencing the matched bug.
110 """
111 tracker = markdown.util.AtomicString(m.group(2))
112 idx = markdown.util.AtomicString(m.group(3))
113 url = bug_url(tracker, idx[1:])
114
115 if url is None:
116 return tracker + idx
117
118 el = markdown.util.etree.Element("a")
119 el.set('href', url)
120 el.text = idx
121 return el
122
123
124 class SurroundProcessor(markdown.postprocessors.Postprocessor):
125 """A postprocessor to surround the text with a markdown <div>."""
126
127 def run(self, text):
128 """
129 Return text wrapped in a <div> with a markdown class.
130
131 Args:
132 text (str): The text to wrap in a <div>.
133 Returns:
134 str: The text wrapped in a <div>.
135 """
136 return "<div class='markdown'>" + text + "</div>"
137
138
139 class BodhiExtension(Extension):
140 """Bodhi's markdown Extension."""
141
142 def extendMarkdown(self, md, md_globals):
143 """
144 Extend markdown to add our patterns and postprocessor.
145
146 Args:
147 md (Markdown): An instance of the Markdown class.
148 md_globals (dict): Contains all the various global variables within the markdown module.
149 """
150 md.inlinePatterns.add('mention', MentionPattern(MENTION_RE, md), '_end')
151 md.inlinePatterns.add('bugzilla', BugzillaPattern(BUGZILLA_RE, md), '_end')
152 md.postprocessors.add('surround', SurroundProcessor(md), '_end')
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/ffmarkdown.py b/bodhi/server/ffmarkdown.py
--- a/bodhi/server/ffmarkdown.py
+++ b/bodhi/server/ffmarkdown.py
@@ -53,7 +53,8 @@
Args:
tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',
- 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'.
+ 'gcc', 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', 'rhbz'
+ or 'sourceware'.
idx (basestring or int): The bug number.
Returns:
basestring: The URL of the given bug.
@@ -61,17 +62,21 @@
KeyError: If the given tracker is not supported by this function.
"""
try:
- return {
+ trackers = {
'fedora': "https://bugzilla.redhat.com/show_bug.cgi?id=%s",
+ 'gcc': "https://gcc.gnu.org/bugzilla/show_bug.cgi?id=%s",
'gnome': "https://bugzilla.gnome.org/show_bug.cgi?id=%s",
'kde': "https://bugs.kde.org/show_bug.cgi?id=%s",
'mozilla': "https://bugzilla.mozilla.org/show_bug.cgi?id=%s",
- 'pear': "http://pear.php.net/bugs/bug.php?id=%s",
+ 'pear': "https://pear.php.net/bugs/bug.php?id=%s",
'perl': "https://rt.cpan.org/Public/Bug/Display.html?id=%s",
'php': "https://bugs.php.net/bug.php?id=%s",
'python': "https://bugs.python.org/issue%s",
'rh': "https://bugzilla.redhat.com/show_bug.cgi?id=%s",
- 'rhbz': "https://bugzilla.redhat.com/show_bug.cgi?id=%s"}[tracker.lower()] % idx
+ 'rhbz': "https://bugzilla.redhat.com/show_bug.cgi?id=%s",
+ 'sourceware': "https://sourceware.org/bugzilla/show_bug.cgi?id=%s"}
+
+ return trackers[tracker.lower()] % idx
except KeyError:
return None
| {"golden_diff": "diff --git a/bodhi/server/ffmarkdown.py b/bodhi/server/ffmarkdown.py\n--- a/bodhi/server/ffmarkdown.py\n+++ b/bodhi/server/ffmarkdown.py\n@@ -53,7 +53,8 @@\n \n Args:\n tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',\n- 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'.\n+ 'gcc', 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', 'rhbz'\n+ or 'sourceware'.\n idx (basestring or int): The bug number.\n Returns:\n basestring: The URL of the given bug.\n@@ -61,17 +62,21 @@\n KeyError: If the given tracker is not supported by this function.\n \"\"\"\n try:\n- return {\n+ trackers = {\n 'fedora': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n+ 'gcc': \"https://gcc.gnu.org/bugzilla/show_bug.cgi?id=%s\",\n 'gnome': \"https://bugzilla.gnome.org/show_bug.cgi?id=%s\",\n 'kde': \"https://bugs.kde.org/show_bug.cgi?id=%s\",\n 'mozilla': \"https://bugzilla.mozilla.org/show_bug.cgi?id=%s\",\n- 'pear': \"http://pear.php.net/bugs/bug.php?id=%s\",\n+ 'pear': \"https://pear.php.net/bugs/bug.php?id=%s\",\n 'perl': \"https://rt.cpan.org/Public/Bug/Display.html?id=%s\",\n 'php': \"https://bugs.php.net/bug.php?id=%s\",\n 'python': \"https://bugs.python.org/issue%s\",\n 'rh': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n- 'rhbz': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\"}[tracker.lower()] % idx\n+ 'rhbz': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n+ 'sourceware': \"https://sourceware.org/bugzilla/show_bug.cgi?id=%s\"}\n+\n+ return trackers[tracker.lower()] % idx\n \n except KeyError:\n return None\n", "issue": "Support Sourceware bugs in Fedora enhanced markdown\nMany key projects reside on sourceware.org including glibc, gdb, binutils, elfutils, libabigail, systemtap etc.\r\n\r\nCould you please add markdown support for sourceware.org bugzilla (https://sourceware.org/bugzilla/)?\r\n\r\nI suggest a unified markup of SWBZ#XXXX or SW#XXXX for all projects on the main sourceware bugzilla instance.\r\n\r\nLikewise gcc compiler bugs are also on sourceware but use a distinct instance (https://gcc.gnu.org/bugzilla/)\r\n\r\nI suggest a markup of GCC#XXXX for gcc bugs.\r\n\r\nThank you!\n", "before_files": [{"content": "# Copyright \u00a9 2014-2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n\"\"\"\nFedora-flavored Markdown.\n\nAuthor: Ralph Bean <[email protected]>\n\"\"\"\n\nfrom markdown.extensions import Extension\nimport markdown.inlinepatterns\nimport markdown.postprocessors\nimport markdown.util\nimport pyramid.threadlocal\n\nfrom bodhi import MENTION_RE\n\n\nBUGZILLA_RE = r'([a-zA-Z]+)(#[0-9]{5,})'\n\n\ndef user_url(name):\n \"\"\"\n Return a URL to the given username.\n\n Args:\n name (basestring): The username of the user we want a URL for.\n Returns:\n basestring: A URL to the requested user.\n \"\"\"\n request = pyramid.threadlocal.get_current_request()\n return request.route_url('user', name=name)\n\n\ndef bug_url(tracker, idx):\n \"\"\"\n Return the URL for the given bug.\n\n Args:\n tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',\n 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', or 'rhbz'.\n idx (basestring or int): The bug number.\n Returns:\n basestring: The URL of the given bug.\n Raises:\n KeyError: If the given tracker is not supported by this function.\n \"\"\"\n try:\n return {\n 'fedora': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'gnome': \"https://bugzilla.gnome.org/show_bug.cgi?id=%s\",\n 'kde': \"https://bugs.kde.org/show_bug.cgi?id=%s\",\n 'mozilla': \"https://bugzilla.mozilla.org/show_bug.cgi?id=%s\",\n 'pear': \"http://pear.php.net/bugs/bug.php?id=%s\",\n 'perl': \"https://rt.cpan.org/Public/Bug/Display.html?id=%s\",\n 'php': \"https://bugs.php.net/bug.php?id=%s\",\n 'python': \"https://bugs.python.org/issue%s\",\n 'rh': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'rhbz': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\"}[tracker.lower()] % idx\n\n except KeyError:\n return None\n\n\nclass MentionPattern(markdown.inlinepatterns.Pattern):\n \"\"\"Match username mentions and point to their profiles.\"\"\"\n\n def handleMatch(self, m):\n \"\"\"\n Build and return an Element that links to the matched User's profile.\n\n Args:\n m (re.MatchObject): The regex match on the username.\n Return:\n xml.etree.Element: An html anchor referencing the user's profile.\n \"\"\"\n el = markdown.util.etree.Element(\"a\")\n name = markdown.util.AtomicString(m.group(2))\n el.set('href', user_url(name[1:]))\n el.text = name\n return el\n\n\nclass BugzillaPattern(markdown.inlinepatterns.Pattern):\n \"\"\"Match bug tracker patterns.\"\"\"\n\n def handleMatch(self, m):\n \"\"\"\n Build and return an Element that links to the referenced bug.\n\n Args:\n m (re.MatchObject): The regex match on the bug.\n Returns:\n xml.etree.Element: An html anchor referencing the matched bug.\n \"\"\"\n tracker = markdown.util.AtomicString(m.group(2))\n idx = markdown.util.AtomicString(m.group(3))\n url = bug_url(tracker, idx[1:])\n\n if url is None:\n return tracker + idx\n\n el = markdown.util.etree.Element(\"a\")\n el.set('href', url)\n el.text = idx\n return el\n\n\nclass SurroundProcessor(markdown.postprocessors.Postprocessor):\n \"\"\"A postprocessor to surround the text with a markdown <div>.\"\"\"\n\n def run(self, text):\n \"\"\"\n Return text wrapped in a <div> with a markdown class.\n\n Args:\n text (str): The text to wrap in a <div>.\n Returns:\n str: The text wrapped in a <div>.\n \"\"\"\n return \"<div class='markdown'>\" + text + \"</div>\"\n\n\nclass BodhiExtension(Extension):\n \"\"\"Bodhi's markdown Extension.\"\"\"\n\n def extendMarkdown(self, md, md_globals):\n \"\"\"\n Extend markdown to add our patterns and postprocessor.\n\n Args:\n md (Markdown): An instance of the Markdown class.\n md_globals (dict): Contains all the various global variables within the markdown module.\n \"\"\"\n md.inlinePatterns.add('mention', MentionPattern(MENTION_RE, md), '_end')\n md.inlinePatterns.add('bugzilla', BugzillaPattern(BUGZILLA_RE, md), '_end')\n md.postprocessors.add('surround', SurroundProcessor(md), '_end')\n", "path": "bodhi/server/ffmarkdown.py"}], "after_files": [{"content": "# Copyright \u00a9 2014-2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,\n# USA.\n\"\"\"\nFedora-flavored Markdown.\n\nAuthor: Ralph Bean <[email protected]>\n\"\"\"\n\nfrom markdown.extensions import Extension\nimport markdown.inlinepatterns\nimport markdown.postprocessors\nimport markdown.util\nimport pyramid.threadlocal\n\nfrom bodhi import MENTION_RE\n\n\nBUGZILLA_RE = r'([a-zA-Z]+)(#[0-9]{5,})'\n\n\ndef user_url(name):\n \"\"\"\n Return a URL to the given username.\n\n Args:\n name (basestring): The username of the user we want a URL for.\n Returns:\n basestring: A URL to the requested user.\n \"\"\"\n request = pyramid.threadlocal.get_current_request()\n return request.route_url('user', name=name)\n\n\ndef bug_url(tracker, idx):\n \"\"\"\n Return the URL for the given bug.\n\n Args:\n tracker (basestring): Which bug tracker is being referenced. May be any of 'fedora',\n 'gcc', 'gnome', 'kde', 'mozilla', 'pear', 'perl', 'php', 'python', 'rh', 'rhbz'\n or 'sourceware'.\n idx (basestring or int): The bug number.\n Returns:\n basestring: The URL of the given bug.\n Raises:\n KeyError: If the given tracker is not supported by this function.\n \"\"\"\n try:\n trackers = {\n 'fedora': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'gcc': \"https://gcc.gnu.org/bugzilla/show_bug.cgi?id=%s\",\n 'gnome': \"https://bugzilla.gnome.org/show_bug.cgi?id=%s\",\n 'kde': \"https://bugs.kde.org/show_bug.cgi?id=%s\",\n 'mozilla': \"https://bugzilla.mozilla.org/show_bug.cgi?id=%s\",\n 'pear': \"https://pear.php.net/bugs/bug.php?id=%s\",\n 'perl': \"https://rt.cpan.org/Public/Bug/Display.html?id=%s\",\n 'php': \"https://bugs.php.net/bug.php?id=%s\",\n 'python': \"https://bugs.python.org/issue%s\",\n 'rh': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'rhbz': \"https://bugzilla.redhat.com/show_bug.cgi?id=%s\",\n 'sourceware': \"https://sourceware.org/bugzilla/show_bug.cgi?id=%s\"}\n\n return trackers[tracker.lower()] % idx\n\n except KeyError:\n return None\n\n\nclass MentionPattern(markdown.inlinepatterns.Pattern):\n \"\"\"Match username mentions and point to their profiles.\"\"\"\n\n def handleMatch(self, m):\n \"\"\"\n Build and return an Element that links to the matched User's profile.\n\n Args:\n m (re.MatchObject): The regex match on the username.\n Return:\n xml.etree.Element: An html anchor referencing the user's profile.\n \"\"\"\n el = markdown.util.etree.Element(\"a\")\n name = markdown.util.AtomicString(m.group(2))\n el.set('href', user_url(name[1:]))\n el.text = name\n return el\n\n\nclass BugzillaPattern(markdown.inlinepatterns.Pattern):\n \"\"\"Match bug tracker patterns.\"\"\"\n\n def handleMatch(self, m):\n \"\"\"\n Build and return an Element that links to the referenced bug.\n\n Args:\n m (re.MatchObject): The regex match on the bug.\n Returns:\n xml.etree.Element: An html anchor referencing the matched bug.\n \"\"\"\n tracker = markdown.util.AtomicString(m.group(2))\n idx = markdown.util.AtomicString(m.group(3))\n url = bug_url(tracker, idx[1:])\n\n if url is None:\n return tracker + idx\n\n el = markdown.util.etree.Element(\"a\")\n el.set('href', url)\n el.text = idx\n return el\n\n\nclass SurroundProcessor(markdown.postprocessors.Postprocessor):\n \"\"\"A postprocessor to surround the text with a markdown <div>.\"\"\"\n\n def run(self, text):\n \"\"\"\n Return text wrapped in a <div> with a markdown class.\n\n Args:\n text (str): The text to wrap in a <div>.\n Returns:\n str: The text wrapped in a <div>.\n \"\"\"\n return \"<div class='markdown'>\" + text + \"</div>\"\n\n\nclass BodhiExtension(Extension):\n \"\"\"Bodhi's markdown Extension.\"\"\"\n\n def extendMarkdown(self, md, md_globals):\n \"\"\"\n Extend markdown to add our patterns and postprocessor.\n\n Args:\n md (Markdown): An instance of the Markdown class.\n md_globals (dict): Contains all the various global variables within the markdown module.\n \"\"\"\n md.inlinePatterns.add('mention', MentionPattern(MENTION_RE, md), '_end')\n md.inlinePatterns.add('bugzilla', BugzillaPattern(BUGZILLA_RE, md), '_end')\n md.postprocessors.add('surround', SurroundProcessor(md), '_end')\n", "path": "bodhi/server/ffmarkdown.py"}]} | 1,957 | 524 |
gh_patches_debug_12568 | rasdani/github-patches | git_diff | Kinto__kinto-474 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Do not require cliquet master branch in dev
As discussed with @Natim @almet
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/config/__init__.py`
Content:
```
1 import os
2 import binascii
3 import codecs
4 from kinto import logger
5
6 HERE = os.path.abspath(os.path.dirname(__file__))
7
8
9 def render_template(template, destination, **kwargs):
10 template = os.path.join(HERE, template)
11 folder = os.path.dirname(destination)
12
13 if not os.path.exists(folder):
14 os.makedirs(folder)
15
16 logger.info("Created config {}".format(os.path.abspath(destination)))
17
18 with codecs.open(template, 'r', encoding='utf-8') as f:
19 raw_template = f.read()
20 rendered = raw_template.format(**kwargs)
21 with codecs.open(destination, 'w+', encoding='utf-8') as output:
22 output.write(rendered)
23
24
25 def init(config_file, backend):
26 values = {}
27 values['secret'] = binascii.b2a_hex(os.urandom(32))
28
29 values['storage_backend'] = "cliquet.storage.%s" % backend
30 values['cache_backend'] = "cliquet.cache.%s" % backend
31 values['permission_backend'] = "cliquet.permission.%s" % backend
32
33 if backend == 'postgresql':
34 postgresql_url = "postgres://postgres:postgres@localhost/postgres"
35 values['storage_url'] = postgresql_url
36 values['cache_url'] = postgresql_url
37 values['permission_url'] = postgresql_url
38
39 elif backend == 'redis':
40 redis_url = "redis://localhost:6379"
41 values['storage_url'] = redis_url + "/1"
42 values['cache_url'] = redis_url + "/2"
43 values['permission_url'] = redis_url + "/3"
44
45 else:
46 values['storage_url'] = ''
47 values['cache_url'] = ''
48 values['permission_url'] = ''
49
50 render_template("kinto.tpl", config_file, **values)
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py
--- a/kinto/config/__init__.py
+++ b/kinto/config/__init__.py
@@ -1,6 +1,8 @@
import os
-import binascii
import codecs
+
+from cliquet import utils as cliquet_utils
+
from kinto import logger
HERE = os.path.abspath(os.path.dirname(__file__))
@@ -24,7 +26,8 @@
def init(config_file, backend):
values = {}
- values['secret'] = binascii.b2a_hex(os.urandom(32))
+
+ values['secret'] = cliquet_utils.random_bytes_hex(32)
values['storage_backend'] = "cliquet.storage.%s" % backend
values['cache_backend'] = "cliquet.cache.%s" % backend
| {"golden_diff": "diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py\n--- a/kinto/config/__init__.py\n+++ b/kinto/config/__init__.py\n@@ -1,6 +1,8 @@\n import os\n-import binascii\n import codecs\n+\n+from cliquet import utils as cliquet_utils\n+\n from kinto import logger\n \n HERE = os.path.abspath(os.path.dirname(__file__))\n@@ -24,7 +26,8 @@\n \n def init(config_file, backend):\n values = {}\n- values['secret'] = binascii.b2a_hex(os.urandom(32))\n+\n+ values['secret'] = cliquet_utils.random_bytes_hex(32)\n \n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n", "issue": "Do not require cliquet master branch in dev\nAs discussed with @Natim @almet \n\n", "before_files": [{"content": "import os\nimport binascii\nimport codecs\nfrom kinto import logger\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend):\n values = {}\n values['secret'] = binascii.b2a_hex(os.urandom(32))\n\n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n values['permission_backend'] = \"cliquet.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}], "after_files": [{"content": "import os\nimport codecs\n\nfrom cliquet import utils as cliquet_utils\n\nfrom kinto import logger\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend):\n values = {}\n\n values['secret'] = cliquet_utils.random_bytes_hex(32)\n\n values['storage_backend'] = \"cliquet.storage.%s\" % backend\n values['cache_backend'] = \"cliquet.cache.%s\" % backend\n values['permission_backend'] = \"cliquet.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}]} | 768 | 189 |
gh_patches_debug_21239 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-3068 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Celery queue names inconsistent
**Is your feature request related to a problem? Please describe.**
In the admin interface, the Celery Status tab shows all the current queues.
They seemed to be only ever-increasing recently, so I had to look into why that happened.
After a lot of wasting of time, I figured out how to properly get into Flower, (and how to start it...)
Here, I discovered that the Celery worker was ignoring all but four of the queues, so I had to manually add them.
This did not really seem to be a problem. However, when I entered `broadcasts`, as that is what the queue is named in the admin interface, nothing happened. An investigation later I found out that the queue was actually called `broadcast`, singular.
**Describe the solution you'd like**
So, please fix that. Either change the name of the queue, or change it in the admin interface, so that someone can look at the admin interface to know what the queue names are...
**Describe alternatives you've considered**
N/A
**Additional context**
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/admin/celery_status.py`
Content:
```
1 """ celery status """
2 import json
3
4 from django.contrib.auth.decorators import login_required, permission_required
5 from django.http import HttpResponse
6 from django.template.response import TemplateResponse
7 from django.utils.decorators import method_decorator
8 from django.views import View
9 from django.views.decorators.http import require_GET
10 from django import forms
11 import redis
12
13 from celerywyrm import settings
14 from bookwyrm.tasks import (
15 app as celery,
16 LOW,
17 MEDIUM,
18 HIGH,
19 STREAMS,
20 IMAGES,
21 SUGGESTED_USERS,
22 EMAIL,
23 CONNECTORS,
24 LISTS,
25 INBOX,
26 IMPORTS,
27 IMPORT_TRIGGERED,
28 BROADCAST,
29 MISC,
30 )
31
32 r = redis.from_url(settings.REDIS_BROKER_URL)
33
34 # pylint: disable= no-self-use
35 @method_decorator(login_required, name="dispatch")
36 @method_decorator(
37 permission_required("bookwyrm.edit_instance_settings", raise_exception=True),
38 name="dispatch",
39 )
40 class CeleryStatus(View):
41 """Are your tasks running? Well you'd better go catch them"""
42
43 def get(self, request):
44 """See workers and active tasks"""
45 errors = []
46 try:
47 inspect = celery.control.inspect()
48 stats = inspect.stats()
49 active_tasks = inspect.active()
50 # pylint: disable=broad-except
51 except Exception as err:
52 stats = active_tasks = None
53 errors.append(err)
54
55 try:
56 queues = {
57 LOW: r.llen(LOW),
58 MEDIUM: r.llen(MEDIUM),
59 HIGH: r.llen(HIGH),
60 STREAMS: r.llen(STREAMS),
61 IMAGES: r.llen(IMAGES),
62 SUGGESTED_USERS: r.llen(SUGGESTED_USERS),
63 EMAIL: r.llen(EMAIL),
64 CONNECTORS: r.llen(CONNECTORS),
65 LISTS: r.llen(LISTS),
66 INBOX: r.llen(INBOX),
67 IMPORTS: r.llen(IMPORTS),
68 IMPORT_TRIGGERED: r.llen(IMPORT_TRIGGERED),
69 BROADCAST: r.llen(BROADCAST),
70 MISC: r.llen(MISC),
71 }
72 # pylint: disable=broad-except
73 except Exception as err:
74 queues = None
75 errors.append(err)
76
77 form = ClearCeleryForm()
78
79 data = {
80 "stats": stats,
81 "active_tasks": active_tasks,
82 "queues": queues,
83 "form": form,
84 "errors": errors,
85 }
86 return TemplateResponse(request, "settings/celery.html", data)
87
88 def post(self, request):
89 """Submit form to clear queues"""
90 form = ClearCeleryForm(request.POST)
91 if form.is_valid():
92 if len(celery.control.ping()) != 0:
93 return HttpResponse(
94 "Refusing to delete tasks while Celery worker is active"
95 )
96 pipeline = r.pipeline()
97 for queue in form.cleaned_data["queues"]:
98 for task in r.lrange(queue, 0, -1):
99 task_json = json.loads(task)
100 if task_json["headers"]["task"] in form.cleaned_data["tasks"]:
101 pipeline.lrem(queue, 0, task)
102 results = pipeline.execute()
103
104 return HttpResponse(f"Deleted {sum(results)} tasks")
105
106
107 class ClearCeleryForm(forms.Form):
108 """Form to clear queues"""
109
110 queues = forms.MultipleChoiceField(
111 label="Queues",
112 choices=[
113 (LOW, "Low prioirty"),
114 (MEDIUM, "Medium priority"),
115 (HIGH, "High priority"),
116 (STREAMS, "Streams"),
117 (IMAGES, "Images"),
118 (SUGGESTED_USERS, "Suggested users"),
119 (EMAIL, "Email"),
120 (CONNECTORS, "Connectors"),
121 (LISTS, "Lists"),
122 (INBOX, "Inbox"),
123 (IMPORTS, "Imports"),
124 (IMPORT_TRIGGERED, "Import triggered"),
125 (BROADCAST, "Broadcasts"),
126 (MISC, "Misc"),
127 ],
128 widget=forms.CheckboxSelectMultiple,
129 )
130 tasks = forms.MultipleChoiceField(
131 label="Tasks", choices=[], widget=forms.CheckboxSelectMultiple
132 )
133
134 def __init__(self, *args, **kwargs):
135 super().__init__(*args, **kwargs)
136 celery.loader.import_default_modules()
137 self.fields["tasks"].choices = sorted(
138 [(k, k) for k in celery.tasks.keys() if not k.startswith("celery.")]
139 )
140
141
142 @require_GET
143 # pylint: disable=unused-argument
144 def celery_ping(request):
145 """Just tells you if Celery is on or not"""
146 try:
147 ping = celery.control.inspect().ping()
148 if ping:
149 return HttpResponse()
150 # pylint: disable=broad-except
151 except Exception:
152 pass
153
154 return HttpResponse(status=500)
155
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/admin/celery_status.py b/bookwyrm/views/admin/celery_status.py
--- a/bookwyrm/views/admin/celery_status.py
+++ b/bookwyrm/views/admin/celery_status.py
@@ -110,20 +110,20 @@
queues = forms.MultipleChoiceField(
label="Queues",
choices=[
- (LOW, "Low prioirty"),
+ (LOW, "Low priority"),
(MEDIUM, "Medium priority"),
(HIGH, "High priority"),
- (STREAMS, "Streams"),
- (IMAGES, "Images"),
- (SUGGESTED_USERS, "Suggested users"),
- (EMAIL, "Email"),
+ (BROADCAST, "Broadcast"),
(CONNECTORS, "Connectors"),
- (LISTS, "Lists"),
- (INBOX, "Inbox"),
+ (EMAIL, "Email"),
+ (IMAGES, "Images"),
(IMPORTS, "Imports"),
(IMPORT_TRIGGERED, "Import triggered"),
- (BROADCAST, "Broadcasts"),
+ (INBOX, "Inbox"),
+ (LISTS, "Lists"),
(MISC, "Misc"),
+ (STREAMS, "Streams"),
+ (SUGGESTED_USERS, "Suggested users"),
],
widget=forms.CheckboxSelectMultiple,
)
| {"golden_diff": "diff --git a/bookwyrm/views/admin/celery_status.py b/bookwyrm/views/admin/celery_status.py\n--- a/bookwyrm/views/admin/celery_status.py\n+++ b/bookwyrm/views/admin/celery_status.py\n@@ -110,20 +110,20 @@\n queues = forms.MultipleChoiceField(\n label=\"Queues\",\n choices=[\n- (LOW, \"Low prioirty\"),\n+ (LOW, \"Low priority\"),\n (MEDIUM, \"Medium priority\"),\n (HIGH, \"High priority\"),\n- (STREAMS, \"Streams\"),\n- (IMAGES, \"Images\"),\n- (SUGGESTED_USERS, \"Suggested users\"),\n- (EMAIL, \"Email\"),\n+ (BROADCAST, \"Broadcast\"),\n (CONNECTORS, \"Connectors\"),\n- (LISTS, \"Lists\"),\n- (INBOX, \"Inbox\"),\n+ (EMAIL, \"Email\"),\n+ (IMAGES, \"Images\"),\n (IMPORTS, \"Imports\"),\n (IMPORT_TRIGGERED, \"Import triggered\"),\n- (BROADCAST, \"Broadcasts\"),\n+ (INBOX, \"Inbox\"),\n+ (LISTS, \"Lists\"),\n (MISC, \"Misc\"),\n+ (STREAMS, \"Streams\"),\n+ (SUGGESTED_USERS, \"Suggested users\"),\n ],\n widget=forms.CheckboxSelectMultiple,\n )\n", "issue": " Celery queue names inconsistent\n**Is your feature request related to a problem? Please describe.**\r\nIn the admin interface, the Celery Status tab shows all the current queues.\r\nThey seemed to be only ever-increasing recently, so I had to look into why that happened.\r\nAfter a lot of wasting of time, I figured out how to properly get into Flower, (and how to start it...)\r\nHere, I discovered that the Celery worker was ignoring all but four of the queues, so I had to manually add them.\r\nThis did not really seem to be a problem. However, when I entered `broadcasts`, as that is what the queue is named in the admin interface, nothing happened. An investigation later I found out that the queue was actually called `broadcast`, singular.\r\n\r\n**Describe the solution you'd like**\r\nSo, please fix that. Either change the name of the queue, or change it in the admin interface, so that someone can look at the admin interface to know what the queue names are...\r\n\r\n**Describe alternatives you've considered**\r\nN/A\r\n\r\n**Additional context**\r\nN/A\r\n\n", "before_files": [{"content": "\"\"\" celery status \"\"\"\nimport json\n\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.http import require_GET\nfrom django import forms\nimport redis\n\nfrom celerywyrm import settings\nfrom bookwyrm.tasks import (\n app as celery,\n LOW,\n MEDIUM,\n HIGH,\n STREAMS,\n IMAGES,\n SUGGESTED_USERS,\n EMAIL,\n CONNECTORS,\n LISTS,\n INBOX,\n IMPORTS,\n IMPORT_TRIGGERED,\n BROADCAST,\n MISC,\n)\n\nr = redis.from_url(settings.REDIS_BROKER_URL)\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_instance_settings\", raise_exception=True),\n name=\"dispatch\",\n)\nclass CeleryStatus(View):\n \"\"\"Are your tasks running? Well you'd better go catch them\"\"\"\n\n def get(self, request):\n \"\"\"See workers and active tasks\"\"\"\n errors = []\n try:\n inspect = celery.control.inspect()\n stats = inspect.stats()\n active_tasks = inspect.active()\n # pylint: disable=broad-except\n except Exception as err:\n stats = active_tasks = None\n errors.append(err)\n\n try:\n queues = {\n LOW: r.llen(LOW),\n MEDIUM: r.llen(MEDIUM),\n HIGH: r.llen(HIGH),\n STREAMS: r.llen(STREAMS),\n IMAGES: r.llen(IMAGES),\n SUGGESTED_USERS: r.llen(SUGGESTED_USERS),\n EMAIL: r.llen(EMAIL),\n CONNECTORS: r.llen(CONNECTORS),\n LISTS: r.llen(LISTS),\n INBOX: r.llen(INBOX),\n IMPORTS: r.llen(IMPORTS),\n IMPORT_TRIGGERED: r.llen(IMPORT_TRIGGERED),\n BROADCAST: r.llen(BROADCAST),\n MISC: r.llen(MISC),\n }\n # pylint: disable=broad-except\n except Exception as err:\n queues = None\n errors.append(err)\n\n form = ClearCeleryForm()\n\n data = {\n \"stats\": stats,\n \"active_tasks\": active_tasks,\n \"queues\": queues,\n \"form\": form,\n \"errors\": errors,\n }\n return TemplateResponse(request, \"settings/celery.html\", data)\n\n def post(self, request):\n \"\"\"Submit form to clear queues\"\"\"\n form = ClearCeleryForm(request.POST)\n if form.is_valid():\n if len(celery.control.ping()) != 0:\n return HttpResponse(\n \"Refusing to delete tasks while Celery worker is active\"\n )\n pipeline = r.pipeline()\n for queue in form.cleaned_data[\"queues\"]:\n for task in r.lrange(queue, 0, -1):\n task_json = json.loads(task)\n if task_json[\"headers\"][\"task\"] in form.cleaned_data[\"tasks\"]:\n pipeline.lrem(queue, 0, task)\n results = pipeline.execute()\n\n return HttpResponse(f\"Deleted {sum(results)} tasks\")\n\n\nclass ClearCeleryForm(forms.Form):\n \"\"\"Form to clear queues\"\"\"\n\n queues = forms.MultipleChoiceField(\n label=\"Queues\",\n choices=[\n (LOW, \"Low prioirty\"),\n (MEDIUM, \"Medium priority\"),\n (HIGH, \"High priority\"),\n (STREAMS, \"Streams\"),\n (IMAGES, \"Images\"),\n (SUGGESTED_USERS, \"Suggested users\"),\n (EMAIL, \"Email\"),\n (CONNECTORS, \"Connectors\"),\n (LISTS, \"Lists\"),\n (INBOX, \"Inbox\"),\n (IMPORTS, \"Imports\"),\n (IMPORT_TRIGGERED, \"Import triggered\"),\n (BROADCAST, \"Broadcasts\"),\n (MISC, \"Misc\"),\n ],\n widget=forms.CheckboxSelectMultiple,\n )\n tasks = forms.MultipleChoiceField(\n label=\"Tasks\", choices=[], widget=forms.CheckboxSelectMultiple\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n celery.loader.import_default_modules()\n self.fields[\"tasks\"].choices = sorted(\n [(k, k) for k in celery.tasks.keys() if not k.startswith(\"celery.\")]\n )\n\n\n@require_GET\n# pylint: disable=unused-argument\ndef celery_ping(request):\n \"\"\"Just tells you if Celery is on or not\"\"\"\n try:\n ping = celery.control.inspect().ping()\n if ping:\n return HttpResponse()\n # pylint: disable=broad-except\n except Exception:\n pass\n\n return HttpResponse(status=500)\n", "path": "bookwyrm/views/admin/celery_status.py"}], "after_files": [{"content": "\"\"\" celery status \"\"\"\nimport json\n\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.http import require_GET\nfrom django import forms\nimport redis\n\nfrom celerywyrm import settings\nfrom bookwyrm.tasks import (\n app as celery,\n LOW,\n MEDIUM,\n HIGH,\n STREAMS,\n IMAGES,\n SUGGESTED_USERS,\n EMAIL,\n CONNECTORS,\n LISTS,\n INBOX,\n IMPORTS,\n IMPORT_TRIGGERED,\n BROADCAST,\n MISC,\n)\n\nr = redis.from_url(settings.REDIS_BROKER_URL)\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"bookwyrm.edit_instance_settings\", raise_exception=True),\n name=\"dispatch\",\n)\nclass CeleryStatus(View):\n \"\"\"Are your tasks running? Well you'd better go catch them\"\"\"\n\n def get(self, request):\n \"\"\"See workers and active tasks\"\"\"\n errors = []\n try:\n inspect = celery.control.inspect()\n stats = inspect.stats()\n active_tasks = inspect.active()\n # pylint: disable=broad-except\n except Exception as err:\n stats = active_tasks = None\n errors.append(err)\n\n try:\n queues = {\n LOW: r.llen(LOW),\n MEDIUM: r.llen(MEDIUM),\n HIGH: r.llen(HIGH),\n STREAMS: r.llen(STREAMS),\n IMAGES: r.llen(IMAGES),\n SUGGESTED_USERS: r.llen(SUGGESTED_USERS),\n EMAIL: r.llen(EMAIL),\n CONNECTORS: r.llen(CONNECTORS),\n LISTS: r.llen(LISTS),\n INBOX: r.llen(INBOX),\n IMPORTS: r.llen(IMPORTS),\n IMPORT_TRIGGERED: r.llen(IMPORT_TRIGGERED),\n BROADCAST: r.llen(BROADCAST),\n MISC: r.llen(MISC),\n }\n # pylint: disable=broad-except\n except Exception as err:\n queues = None\n errors.append(err)\n\n form = ClearCeleryForm()\n\n data = {\n \"stats\": stats,\n \"active_tasks\": active_tasks,\n \"queues\": queues,\n \"form\": form,\n \"errors\": errors,\n }\n return TemplateResponse(request, \"settings/celery.html\", data)\n\n def post(self, request):\n \"\"\"Submit form to clear queues\"\"\"\n form = ClearCeleryForm(request.POST)\n if form.is_valid():\n if len(celery.control.ping()) != 0:\n return HttpResponse(\n \"Refusing to delete tasks while Celery worker is active\"\n )\n pipeline = r.pipeline()\n for queue in form.cleaned_data[\"queues\"]:\n for task in r.lrange(queue, 0, -1):\n task_json = json.loads(task)\n if task_json[\"headers\"][\"task\"] in form.cleaned_data[\"tasks\"]:\n pipeline.lrem(queue, 0, task)\n results = pipeline.execute()\n\n return HttpResponse(f\"Deleted {sum(results)} tasks\")\n\n\nclass ClearCeleryForm(forms.Form):\n \"\"\"Form to clear queues\"\"\"\n\n queues = forms.MultipleChoiceField(\n label=\"Queues\",\n choices=[\n (LOW, \"Low priority\"),\n (MEDIUM, \"Medium priority\"),\n (HIGH, \"High priority\"),\n (BROADCAST, \"Broadcast\"),\n (CONNECTORS, \"Connectors\"),\n (EMAIL, \"Email\"),\n (IMAGES, \"Images\"),\n (IMPORTS, \"Imports\"),\n (IMPORT_TRIGGERED, \"Import triggered\"),\n (INBOX, \"Inbox\"),\n (LISTS, \"Lists\"),\n (MISC, \"Misc\"),\n (STREAMS, \"Streams\"),\n (SUGGESTED_USERS, \"Suggested users\"),\n ],\n widget=forms.CheckboxSelectMultiple,\n )\n tasks = forms.MultipleChoiceField(\n label=\"Tasks\", choices=[], widget=forms.CheckboxSelectMultiple\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n celery.loader.import_default_modules()\n self.fields[\"tasks\"].choices = sorted(\n [(k, k) for k in celery.tasks.keys() if not k.startswith(\"celery.\")]\n )\n\n\n@require_GET\n# pylint: disable=unused-argument\ndef celery_ping(request):\n \"\"\"Just tells you if Celery is on or not\"\"\"\n try:\n ping = celery.control.inspect().ping()\n if ping:\n return HttpResponse()\n # pylint: disable=broad-except\n except Exception:\n pass\n\n return HttpResponse(status=500)\n", "path": "bookwyrm/views/admin/celery_status.py"}]} | 1,878 | 306 |
gh_patches_debug_26026 | rasdani/github-patches | git_diff | python-discord__site-513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to parse the GitHub repository metadata from response!
Sentry Issue: [SITE-P](https://sentry.io/organizations/python-discord/issues/2093966668/?referrer=github_integration)
```
Unable to parse the GitHub repository metadata from response!
```
This is caused by us hitting github ratelimits, as we're not authenticating with the API.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydis_site/constants.py`
Content:
```
1 import os
2
3 GIT_SHA = os.environ.get("GIT_SHA", "development")
4
```
Path: `pydis_site/apps/home/views/home.py`
Content:
```
1 import logging
2 from typing import Dict, List
3
4 import requests
5 from django.core.handlers.wsgi import WSGIRequest
6 from django.http import HttpResponse
7 from django.shortcuts import render
8 from django.utils import timezone
9 from django.views import View
10
11 from pydis_site.apps.home.models import RepositoryMetadata
12
13 log = logging.getLogger(__name__)
14
15
16 class HomeView(View):
17 """The main landing page for the website."""
18
19 github_api = "https://api.github.com/users/python-discord/repos?per_page=100"
20 repository_cache_ttl = 3600
21
22 # Which of our GitHub repos should be displayed on the front page, and in which order?
23 repos = [
24 "python-discord/site",
25 "python-discord/bot",
26 "python-discord/snekbox",
27 "python-discord/sir-lancebot",
28 "python-discord/metricity",
29 "python-discord/django-simple-bulma",
30 ]
31
32 def __init__(self):
33 """Clean up stale RepositoryMetadata."""
34 RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()
35
36 def _get_api_data(self) -> Dict[str, Dict[str, str]]:
37 """
38 Call the GitHub API and get information about our repos.
39
40 If we're unable to get that info for any reason, return an empty dict.
41 """
42 repo_dict = {}
43
44 # Fetch the data from the GitHub API
45 api_data: List[dict] = requests.get(self.github_api).json()
46
47 # Process the API data into our dict
48 for repo in api_data:
49 try:
50 full_name = repo["full_name"]
51
52 if full_name in self.repos:
53 repo_dict[full_name] = {
54 "full_name": repo["full_name"],
55 "description": repo["description"],
56 "language": repo["language"],
57 "forks_count": repo["forks_count"],
58 "stargazers_count": repo["stargazers_count"],
59 }
60 # Something is not right about the API data we got back from GitHub.
61 except (TypeError, ConnectionError, KeyError) as e:
62 log.error(
63 "Unable to parse the GitHub repository metadata from response!",
64 extra={
65 'api_data': api_data,
66 'error': e
67 }
68 )
69 continue
70
71 return repo_dict
72
73 def _get_repo_data(self) -> List[RepositoryMetadata]:
74 """Build a list of RepositoryMetadata objects that we can use to populate the front page."""
75 database_repositories = []
76
77 # First, let's see if we have any metadata cached.
78 cached_data = RepositoryMetadata.objects.all()
79
80 # If we don't, we have to create some!
81 if not cached_data:
82
83 # Try to get new data from the API. If it fails, we'll return an empty list.
84 # In this case, we simply don't display our projects on the site.
85 api_repositories = self._get_api_data()
86
87 # Create all the repodata records in the database.
88 for api_data in api_repositories.values():
89 repo_data = RepositoryMetadata(
90 repo_name=api_data["full_name"],
91 description=api_data["description"],
92 forks=api_data["forks_count"],
93 stargazers=api_data["stargazers_count"],
94 language=api_data["language"],
95 )
96
97 repo_data.save()
98 database_repositories.append(repo_data)
99
100 return database_repositories
101
102 # If the data is stale, we should refresh it.
103 if (timezone.now() - cached_data[0].last_updated).seconds > self.repository_cache_ttl:
104 # Try to get new data from the API. If it fails, return the cached data.
105 api_repositories = self._get_api_data()
106
107 if not api_repositories:
108 return RepositoryMetadata.objects.all()
109
110 # Update or create all RepoData objects in self.repos
111 for repo_name, api_data in api_repositories.items():
112 try:
113 repo_data = RepositoryMetadata.objects.get(repo_name=repo_name)
114 repo_data.description = api_data["description"]
115 repo_data.language = api_data["language"]
116 repo_data.forks = api_data["forks_count"]
117 repo_data.stargazers = api_data["stargazers_count"]
118 except RepositoryMetadata.DoesNotExist:
119 repo_data = RepositoryMetadata(
120 repo_name=api_data["full_name"],
121 description=api_data["description"],
122 forks=api_data["forks_count"],
123 stargazers=api_data["stargazers_count"],
124 language=api_data["language"],
125 )
126 repo_data.save()
127 database_repositories.append(repo_data)
128 return database_repositories
129
130 # Otherwise, if the data is fresher than 2 minutes old, we should just return it.
131 else:
132 return RepositoryMetadata.objects.all()
133
134 def get(self, request: WSGIRequest) -> HttpResponse:
135 """Collect repo data and render the homepage view."""
136 repo_data = self._get_repo_data()
137 return render(request, "home/index.html", {"repo_data": repo_data})
138
139
140 def timeline(request: WSGIRequest) -> HttpResponse:
141 """Render timeline view."""
142 return render(request, 'home/timeline.html')
143
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydis_site/apps/home/views/home.py b/pydis_site/apps/home/views/home.py
--- a/pydis_site/apps/home/views/home.py
+++ b/pydis_site/apps/home/views/home.py
@@ -9,6 +9,7 @@
from django.views import View
from pydis_site.apps.home.models import RepositoryMetadata
+from pydis_site.constants import GITHUB_TOKEN
log = logging.getLogger(__name__)
@@ -18,6 +19,7 @@
github_api = "https://api.github.com/users/python-discord/repos?per_page=100"
repository_cache_ttl = 3600
+ headers = {"Authorization": f"token {GITHUB_TOKEN}"}
# Which of our GitHub repos should be displayed on the front page, and in which order?
repos = [
@@ -42,7 +44,7 @@
repo_dict = {}
# Fetch the data from the GitHub API
- api_data: List[dict] = requests.get(self.github_api).json()
+ api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()
# Process the API data into our dict
for repo in api_data:
diff --git a/pydis_site/constants.py b/pydis_site/constants.py
--- a/pydis_site/constants.py
+++ b/pydis_site/constants.py
@@ -1,3 +1,4 @@
import os
GIT_SHA = os.environ.get("GIT_SHA", "development")
+GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
| {"golden_diff": "diff --git a/pydis_site/apps/home/views/home.py b/pydis_site/apps/home/views/home.py\n--- a/pydis_site/apps/home/views/home.py\n+++ b/pydis_site/apps/home/views/home.py\n@@ -9,6 +9,7 @@\n from django.views import View\n \n from pydis_site.apps.home.models import RepositoryMetadata\n+from pydis_site.constants import GITHUB_TOKEN\n \n log = logging.getLogger(__name__)\n \n@@ -18,6 +19,7 @@\n \n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n+ headers = {\"Authorization\": f\"token {GITHUB_TOKEN}\"}\n \n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n@@ -42,7 +44,7 @@\n repo_dict = {}\n \n # Fetch the data from the GitHub API\n- api_data: List[dict] = requests.get(self.github_api).json()\n+ api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()\n \n # Process the API data into our dict\n for repo in api_data:\ndiff --git a/pydis_site/constants.py b/pydis_site/constants.py\n--- a/pydis_site/constants.py\n+++ b/pydis_site/constants.py\n@@ -1,3 +1,4 @@\n import os\n \n GIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\n+GITHUB_TOKEN = os.environ.get(\"GITHUB_TOKEN\")\n", "issue": "Unable to parse the GitHub repository metadata from response!\nSentry Issue: [SITE-P](https://sentry.io/organizations/python-discord/issues/2093966668/?referrer=github_integration)\n\n```\nUnable to parse the GitHub repository metadata from response!\n```\n\nThis is caused by us hitting github ratelimits, as we're not authenticating with the API.\n", "before_files": [{"content": "import os\n\nGIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\n", "path": "pydis_site/constants.py"}, {"content": "import logging\nfrom typing import Dict, List\n\nimport requests\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.views import View\n\nfrom pydis_site.apps.home.models import RepositoryMetadata\n\nlog = logging.getLogger(__name__)\n\n\nclass HomeView(View):\n \"\"\"The main landing page for the website.\"\"\"\n\n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n\n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n \"python-discord/site\",\n \"python-discord/bot\",\n \"python-discord/snekbox\",\n \"python-discord/sir-lancebot\",\n \"python-discord/metricity\",\n \"python-discord/django-simple-bulma\",\n ]\n\n def __init__(self):\n \"\"\"Clean up stale RepositoryMetadata.\"\"\"\n RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()\n\n def _get_api_data(self) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Call the GitHub API and get information about our repos.\n\n If we're unable to get that info for any reason, return an empty dict.\n \"\"\"\n repo_dict = {}\n\n # Fetch the data from the GitHub API\n api_data: List[dict] = requests.get(self.github_api).json()\n\n # Process the API data into our dict\n for repo in api_data:\n try:\n full_name = repo[\"full_name\"]\n\n if full_name in self.repos:\n repo_dict[full_name] = {\n \"full_name\": repo[\"full_name\"],\n \"description\": repo[\"description\"],\n \"language\": repo[\"language\"],\n \"forks_count\": repo[\"forks_count\"],\n \"stargazers_count\": repo[\"stargazers_count\"],\n }\n # Something is not right about the API data we got back from GitHub.\n except (TypeError, ConnectionError, KeyError) as e:\n log.error(\n \"Unable to parse the GitHub repository metadata from response!\",\n extra={\n 'api_data': api_data,\n 'error': e\n }\n )\n continue\n\n return repo_dict\n\n def _get_repo_data(self) -> List[RepositoryMetadata]:\n \"\"\"Build a list of RepositoryMetadata objects that we can use to populate the front page.\"\"\"\n database_repositories = []\n\n # First, let's see if we have any metadata cached.\n cached_data = RepositoryMetadata.objects.all()\n\n # If we don't, we have to create some!\n if not cached_data:\n\n # Try to get new data from the API. If it fails, we'll return an empty list.\n # In this case, we simply don't display our projects on the site.\n api_repositories = self._get_api_data()\n\n # Create all the repodata records in the database.\n for api_data in api_repositories.values():\n repo_data = RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n\n repo_data.save()\n database_repositories.append(repo_data)\n\n return database_repositories\n\n # If the data is stale, we should refresh it.\n if (timezone.now() - cached_data[0].last_updated).seconds > self.repository_cache_ttl:\n # Try to get new data from the API. If it fails, return the cached data.\n api_repositories = self._get_api_data()\n\n if not api_repositories:\n return RepositoryMetadata.objects.all()\n\n # Update or create all RepoData objects in self.repos\n for repo_name, api_data in api_repositories.items():\n try:\n repo_data = RepositoryMetadata.objects.get(repo_name=repo_name)\n repo_data.description = api_data[\"description\"]\n repo_data.language = api_data[\"language\"]\n repo_data.forks = api_data[\"forks_count\"]\n repo_data.stargazers = api_data[\"stargazers_count\"]\n except RepositoryMetadata.DoesNotExist:\n repo_data = RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n repo_data.save()\n database_repositories.append(repo_data)\n return database_repositories\n\n # Otherwise, if the data is fresher than 2 minutes old, we should just return it.\n else:\n return RepositoryMetadata.objects.all()\n\n def get(self, request: WSGIRequest) -> HttpResponse:\n \"\"\"Collect repo data and render the homepage view.\"\"\"\n repo_data = self._get_repo_data()\n return render(request, \"home/index.html\", {\"repo_data\": repo_data})\n\n\ndef timeline(request: WSGIRequest) -> HttpResponse:\n \"\"\"Render timeline view.\"\"\"\n return render(request, 'home/timeline.html')\n", "path": "pydis_site/apps/home/views/home.py"}], "after_files": [{"content": "import os\n\nGIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\nGITHUB_TOKEN = os.environ.get(\"GITHUB_TOKEN\")\n", "path": "pydis_site/constants.py"}, {"content": "import logging\nfrom typing import Dict, List\n\nimport requests\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.views import View\n\nfrom pydis_site.apps.home.models import RepositoryMetadata\nfrom pydis_site.constants import GITHUB_TOKEN\n\nlog = logging.getLogger(__name__)\n\n\nclass HomeView(View):\n \"\"\"The main landing page for the website.\"\"\"\n\n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n headers = {\"Authorization\": f\"token {GITHUB_TOKEN}\"}\n\n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n \"python-discord/site\",\n \"python-discord/bot\",\n \"python-discord/snekbox\",\n \"python-discord/sir-lancebot\",\n \"python-discord/metricity\",\n \"python-discord/django-simple-bulma\",\n ]\n\n def __init__(self):\n \"\"\"Clean up stale RepositoryMetadata.\"\"\"\n RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()\n\n def _get_api_data(self) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Call the GitHub API and get information about our repos.\n\n If we're unable to get that info for any reason, return an empty dict.\n \"\"\"\n repo_dict = {}\n\n # Fetch the data from the GitHub API\n api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()\n\n # Process the API data into our dict\n for repo in api_data:\n try:\n full_name = repo[\"full_name\"]\n\n if full_name in self.repos:\n repo_dict[full_name] = {\n \"full_name\": repo[\"full_name\"],\n \"description\": repo[\"description\"],\n \"language\": repo[\"language\"],\n \"forks_count\": repo[\"forks_count\"],\n \"stargazers_count\": repo[\"stargazers_count\"],\n }\n # Something is not right about the API data we got back from GitHub.\n except (TypeError, ConnectionError, KeyError) as e:\n log.error(\n \"Unable to parse the GitHub repository metadata from response!\",\n extra={\n 'api_data': api_data,\n 'error': e\n }\n )\n continue\n\n return repo_dict\n\n def _get_repo_data(self) -> List[RepositoryMetadata]:\n \"\"\"Build a list of RepositoryMetadata objects that we can use to populate the front page.\"\"\"\n database_repositories = []\n\n # First, let's see if we have any metadata cached.\n cached_data = RepositoryMetadata.objects.all()\n\n # If we don't, we have to create some!\n if not cached_data:\n\n # Try to get new data from the API. If it fails, we'll return an empty list.\n # In this case, we simply don't display our projects on the site.\n api_repositories = self._get_api_data()\n\n # Create all the repodata records in the database.\n for api_data in api_repositories.values():\n repo_data = RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n\n repo_data.save()\n database_repositories.append(repo_data)\n\n return database_repositories\n\n # If the data is stale, we should refresh it.\n if (timezone.now() - cached_data[0].last_updated).seconds > self.repository_cache_ttl:\n # Try to get new data from the API. If it fails, return the cached data.\n api_repositories = self._get_api_data()\n\n if not api_repositories:\n return RepositoryMetadata.objects.all()\n\n # Update or create all RepoData objects in self.repos\n for repo_name, api_data in api_repositories.items():\n try:\n repo_data = RepositoryMetadata.objects.get(repo_name=repo_name)\n repo_data.description = api_data[\"description\"]\n repo_data.language = api_data[\"language\"]\n repo_data.forks = api_data[\"forks_count\"]\n repo_data.stargazers = api_data[\"stargazers_count\"]\n except RepositoryMetadata.DoesNotExist:\n repo_data = RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n repo_data.save()\n database_repositories.append(repo_data)\n return database_repositories\n\n # Otherwise, if the data is fresher than 2 minutes old, we should just return it.\n else:\n return RepositoryMetadata.objects.all()\n\n def get(self, request: WSGIRequest) -> HttpResponse:\n \"\"\"Collect repo data and render the homepage view.\"\"\"\n repo_data = self._get_repo_data()\n return render(request, \"home/index.html\", {\"repo_data\": repo_data})\n\n\ndef timeline(request: WSGIRequest) -> HttpResponse:\n \"\"\"Render timeline view.\"\"\"\n return render(request, 'home/timeline.html')\n", "path": "pydis_site/apps/home/views/home.py"}]} | 1,814 | 335 |
gh_patches_debug_37959 | rasdani/github-patches | git_diff | openvinotoolkit__datumaro-371 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Import ImageNet dataset
### Steps to reproduce problem
1. Download and extract ImageNet dataset for image classification: [link](https://www.kaggle.com/c/imagenet-object-localization-challenge/data);
2. Add the loaded dataset into a Datumaro project;
3. Run `datum info`.
### Current behaviour
ImageNet dataset has ~1.2m images, but in the `info` output we can see that imported dataset has only 69647, and also these images have wrong labels.
### Expected behaviour
Correct import.
### Environment
`git log -1`: 7e35c8
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datumaro/plugins/imagenet_format.py`
Content:
```
1 # Copyright (C) 2020 Intel Corporation
2 #
3 # SPDX-License-Identifier: MIT
4
5 import logging as log
6 import os
7 import os.path as osp
8
9 from datumaro.components.converter import Converter
10 from datumaro.components.extractor import (
11 AnnotationType, DatasetItem, Importer, Label, LabelCategories,
12 SourceExtractor,
13 )
14 from datumaro.util.image import find_images
15
16
17 class ImagenetPath:
18 IMAGE_DIR_NO_LABEL = 'no_label'
19
20
21 class ImagenetExtractor(SourceExtractor):
22 def __init__(self, path, subset=None):
23 assert osp.isdir(path), path
24 super().__init__(subset=subset)
25
26 self._categories = self._load_categories(path)
27 self._items = list(self._load_items(path).values())
28
29 def _load_categories(self, path):
30 label_cat = LabelCategories()
31 for dirname in sorted(os.listdir(path)):
32 if dirname != ImagenetPath.IMAGE_DIR_NO_LABEL:
33 label_cat.add(dirname)
34 return { AnnotationType.label: label_cat }
35
36 def _load_items(self, path):
37 items = {}
38
39 for image_path in find_images(path, recursive=True, max_depth=1):
40 label = osp.basename(osp.dirname(image_path))
41 image_name = osp.splitext(osp.basename(image_path))[0]
42 if image_name.startswith(label + '_'):
43 image_name = image_name[len(label) + 1:]
44
45 item = items.get(image_name)
46 if item is None:
47 item = DatasetItem(id=image_name, subset=self._subset,
48 image=image_path)
49 items[image_name] = item
50 annotations = item.annotations
51
52 if label != ImagenetPath.IMAGE_DIR_NO_LABEL:
53 label = self._categories[AnnotationType.label].find(label)[0]
54 annotations.append(Label(label=label))
55
56 return items
57
58
59 class ImagenetImporter(Importer):
60 @classmethod
61 def find_sources(cls, path):
62 if not osp.isdir(path):
63 return []
64 return [{ 'url': path, 'format': 'imagenet' }]
65
66
67 class ImagenetConverter(Converter):
68 DEFAULT_IMAGE_EXT = '.jpg'
69
70 def apply(self):
71 if 1 < len(self._extractor.subsets()):
72 log.warning("ImageNet format only supports exporting a single "
73 "subset, subset information will not be used.")
74
75 subset_dir = self._save_dir
76 extractor = self._extractor
77 labels = {}
78 for item in self._extractor:
79 labels = set(p.label for p in item.annotations
80 if p.type == AnnotationType.label)
81
82 for label in labels:
83 label_name = extractor.categories()[AnnotationType.label][label].name
84 self._save_image(item, osp.join(subset_dir, label_name,
85 '%s_%s' % (label_name, self._make_image_filename(item))))
86
87 if not labels:
88 self._save_image(item, osp.join(subset_dir,
89 ImagenetPath.IMAGE_DIR_NO_LABEL,
90 ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \
91 self._make_image_filename(item)))
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/datumaro/plugins/imagenet_format.py b/datumaro/plugins/imagenet_format.py
--- a/datumaro/plugins/imagenet_format.py
+++ b/datumaro/plugins/imagenet_format.py
@@ -39,14 +39,13 @@
for image_path in find_images(path, recursive=True, max_depth=1):
label = osp.basename(osp.dirname(image_path))
image_name = osp.splitext(osp.basename(image_path))[0]
- if image_name.startswith(label + '_'):
- image_name = image_name[len(label) + 1:]
- item = items.get(image_name)
+ item_id = osp.join(label, image_name)
+ item = items.get(item_id)
if item is None:
- item = DatasetItem(id=image_name, subset=self._subset,
+ item = DatasetItem(id=item_id, subset=self._subset,
image=image_path)
- items[image_name] = item
+ items[item_id] = item
annotations = item.annotations
if label != ImagenetPath.IMAGE_DIR_NO_LABEL:
@@ -68,6 +67,13 @@
DEFAULT_IMAGE_EXT = '.jpg'
def apply(self):
+
+ def _get_dir_name(id_parts, label_name):
+ if 1 < len(id_parts) and id_parts[0] == label_name:
+ return ''
+ else:
+ return label_name
+
if 1 < len(self._extractor.subsets()):
log.warning("ImageNet format only supports exporting a single "
"subset, subset information will not be used.")
@@ -76,16 +82,15 @@
extractor = self._extractor
labels = {}
for item in self._extractor:
+ id_parts = item.id.split('/')
labels = set(p.label for p in item.annotations
if p.type == AnnotationType.label)
for label in labels:
label_name = extractor.categories()[AnnotationType.label][label].name
- self._save_image(item, osp.join(subset_dir, label_name,
- '%s_%s' % (label_name, self._make_image_filename(item))))
+ self._save_image(item, subdir=osp.join(subset_dir,
+ _get_dir_name(id_parts, label_name)))
if not labels:
- self._save_image(item, osp.join(subset_dir,
- ImagenetPath.IMAGE_DIR_NO_LABEL,
- ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \
- self._make_image_filename(item)))
+ self._save_image(item, subdir=osp.join(subset_dir,
+ _get_dir_name(id_parts, ImagenetPath.IMAGE_DIR_NO_LABEL)))
| {"golden_diff": "diff --git a/datumaro/plugins/imagenet_format.py b/datumaro/plugins/imagenet_format.py\n--- a/datumaro/plugins/imagenet_format.py\n+++ b/datumaro/plugins/imagenet_format.py\n@@ -39,14 +39,13 @@\n for image_path in find_images(path, recursive=True, max_depth=1):\n label = osp.basename(osp.dirname(image_path))\n image_name = osp.splitext(osp.basename(image_path))[0]\n- if image_name.startswith(label + '_'):\n- image_name = image_name[len(label) + 1:]\n \n- item = items.get(image_name)\n+ item_id = osp.join(label, image_name)\n+ item = items.get(item_id)\n if item is None:\n- item = DatasetItem(id=image_name, subset=self._subset,\n+ item = DatasetItem(id=item_id, subset=self._subset,\n image=image_path)\n- items[image_name] = item\n+ items[item_id] = item\n annotations = item.annotations\n \n if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\n@@ -68,6 +67,13 @@\n DEFAULT_IMAGE_EXT = '.jpg'\n \n def apply(self):\n+\n+ def _get_dir_name(id_parts, label_name):\n+ if 1 < len(id_parts) and id_parts[0] == label_name:\n+ return ''\n+ else:\n+ return label_name\n+\n if 1 < len(self._extractor.subsets()):\n log.warning(\"ImageNet format only supports exporting a single \"\n \"subset, subset information will not be used.\")\n@@ -76,16 +82,15 @@\n extractor = self._extractor\n labels = {}\n for item in self._extractor:\n+ id_parts = item.id.split('/')\n labels = set(p.label for p in item.annotations\n if p.type == AnnotationType.label)\n \n for label in labels:\n label_name = extractor.categories()[AnnotationType.label][label].name\n- self._save_image(item, osp.join(subset_dir, label_name,\n- '%s_%s' % (label_name, self._make_image_filename(item))))\n+ self._save_image(item, subdir=osp.join(subset_dir,\n+ _get_dir_name(id_parts, label_name)))\n \n if not labels:\n- self._save_image(item, osp.join(subset_dir,\n- ImagenetPath.IMAGE_DIR_NO_LABEL,\n- ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \\\n- self._make_image_filename(item)))\n+ self._save_image(item, subdir=osp.join(subset_dir,\n+ _get_dir_name(id_parts, ImagenetPath.IMAGE_DIR_NO_LABEL)))\n", "issue": "Import ImageNet dataset\n### Steps to reproduce problem\r\n1. Download and extract ImageNet dataset for image classification: [link](https://www.kaggle.com/c/imagenet-object-localization-challenge/data);\r\n2. Add the loaded dataset into a Datumaro project;\r\n3. Run `datum info`.\r\n\r\n### Current behaviour\r\nImageNet dataset has ~1.2m images, but in the `info` output we can see that imported dataset has only 69647, and also these images have wrong labels.\r\n\r\n### Expected behaviour\r\nCorrect import.\r\n\r\n### Environment\r\n`git log -1`: 7e35c8\n", "before_files": [{"content": "# Copyright (C) 2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport logging as log\nimport os\nimport os.path as osp\n\nfrom datumaro.components.converter import Converter\nfrom datumaro.components.extractor import (\n AnnotationType, DatasetItem, Importer, Label, LabelCategories,\n SourceExtractor,\n)\nfrom datumaro.util.image import find_images\n\n\nclass ImagenetPath:\n IMAGE_DIR_NO_LABEL = 'no_label'\n\n\nclass ImagenetExtractor(SourceExtractor):\n def __init__(self, path, subset=None):\n assert osp.isdir(path), path\n super().__init__(subset=subset)\n\n self._categories = self._load_categories(path)\n self._items = list(self._load_items(path).values())\n\n def _load_categories(self, path):\n label_cat = LabelCategories()\n for dirname in sorted(os.listdir(path)):\n if dirname != ImagenetPath.IMAGE_DIR_NO_LABEL:\n label_cat.add(dirname)\n return { AnnotationType.label: label_cat }\n\n def _load_items(self, path):\n items = {}\n\n for image_path in find_images(path, recursive=True, max_depth=1):\n label = osp.basename(osp.dirname(image_path))\n image_name = osp.splitext(osp.basename(image_path))[0]\n if image_name.startswith(label + '_'):\n image_name = image_name[len(label) + 1:]\n\n item = items.get(image_name)\n if item is None:\n item = DatasetItem(id=image_name, subset=self._subset,\n image=image_path)\n items[image_name] = item\n annotations = item.annotations\n\n if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\n label = self._categories[AnnotationType.label].find(label)[0]\n annotations.append(Label(label=label))\n\n return items\n\n\nclass ImagenetImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n if not osp.isdir(path):\n return []\n return [{ 'url': path, 'format': 'imagenet' }]\n\n\nclass ImagenetConverter(Converter):\n DEFAULT_IMAGE_EXT = '.jpg'\n\n def apply(self):\n if 1 < len(self._extractor.subsets()):\n log.warning(\"ImageNet format only supports exporting a single \"\n \"subset, subset information will not be used.\")\n\n subset_dir = self._save_dir\n extractor = self._extractor\n labels = {}\n for item in self._extractor:\n labels = set(p.label for p in item.annotations\n if p.type == AnnotationType.label)\n\n for label in labels:\n label_name = extractor.categories()[AnnotationType.label][label].name\n self._save_image(item, osp.join(subset_dir, label_name,\n '%s_%s' % (label_name, self._make_image_filename(item))))\n\n if not labels:\n self._save_image(item, osp.join(subset_dir,\n ImagenetPath.IMAGE_DIR_NO_LABEL,\n ImagenetPath.IMAGE_DIR_NO_LABEL + '_' + \\\n self._make_image_filename(item)))\n", "path": "datumaro/plugins/imagenet_format.py"}], "after_files": [{"content": "# Copyright (C) 2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport logging as log\nimport os\nimport os.path as osp\n\nfrom datumaro.components.converter import Converter\nfrom datumaro.components.extractor import (\n AnnotationType, DatasetItem, Importer, Label, LabelCategories,\n SourceExtractor,\n)\nfrom datumaro.util.image import find_images\n\n\nclass ImagenetPath:\n IMAGE_DIR_NO_LABEL = 'no_label'\n\n\nclass ImagenetExtractor(SourceExtractor):\n def __init__(self, path, subset=None):\n assert osp.isdir(path), path\n super().__init__(subset=subset)\n\n self._categories = self._load_categories(path)\n self._items = list(self._load_items(path).values())\n\n def _load_categories(self, path):\n label_cat = LabelCategories()\n for dirname in sorted(os.listdir(path)):\n if dirname != ImagenetPath.IMAGE_DIR_NO_LABEL:\n label_cat.add(dirname)\n return { AnnotationType.label: label_cat }\n\n def _load_items(self, path):\n items = {}\n\n for image_path in find_images(path, recursive=True, max_depth=1):\n label = osp.basename(osp.dirname(image_path))\n image_name = osp.splitext(osp.basename(image_path))[0]\n\n item_id = osp.join(label, image_name)\n item = items.get(item_id)\n if item is None:\n item = DatasetItem(id=item_id, subset=self._subset,\n image=image_path)\n items[item_id] = item\n annotations = item.annotations\n\n if label != ImagenetPath.IMAGE_DIR_NO_LABEL:\n label = self._categories[AnnotationType.label].find(label)[0]\n annotations.append(Label(label=label))\n\n return items\n\n\nclass ImagenetImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n if not osp.isdir(path):\n return []\n return [{ 'url': path, 'format': 'imagenet' }]\n\n\nclass ImagenetConverter(Converter):\n DEFAULT_IMAGE_EXT = '.jpg'\n\n def apply(self):\n\n def _get_dir_name(id_parts, label_name):\n if 1 < len(id_parts) and id_parts[0] == label_name:\n return ''\n else:\n return label_name\n\n if 1 < len(self._extractor.subsets()):\n log.warning(\"ImageNet format only supports exporting a single \"\n \"subset, subset information will not be used.\")\n\n subset_dir = self._save_dir\n extractor = self._extractor\n labels = {}\n for item in self._extractor:\n id_parts = item.id.split('/')\n labels = set(p.label for p in item.annotations\n if p.type == AnnotationType.label)\n\n for label in labels:\n label_name = extractor.categories()[AnnotationType.label][label].name\n self._save_image(item, subdir=osp.join(subset_dir,\n _get_dir_name(id_parts, label_name)))\n\n if not labels:\n self._save_image(item, subdir=osp.join(subset_dir,\n _get_dir_name(id_parts, ImagenetPath.IMAGE_DIR_NO_LABEL)))\n", "path": "datumaro/plugins/imagenet_format.py"}]} | 1,227 | 590 |
gh_patches_debug_4923 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-2852 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crash with web-history-max-items and no items in the history
When running `qutebrowser --debug --temp-basedir --backend webengine -s completion web-history-max-items 1000` and immediately pressing `o`, this happens:
```
12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: "SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)"
12:21:36 DEBUG sql sql:run:99 Running SQL query: "SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)"
12:21:36 DEBUG sql sql:run:102 query bindings: {':limit': 1000}
12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: "SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\' or title LIKE :pat escape '\') AND last_atime >= ORDER BY last_atime DESC"
12:21:36 DEBUG completion debug:__exit__:264 Starting url completion took 0.003652 seconds.
12:21:36 ERROR misc crashsignal:exception_hook:205 Uncaught exception
Traceback (most recent call last):
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py", line 236, in _update_completion
model = func(*args)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/urlmodel.py", line 70, in url
hist_cat = histcategory.HistoryCategory(delete_func=_delete_history)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py", line 54, in __init__
]), forward_only=False)
File "/home/florian/proj/qutebrowser/git/qutebrowser/misc/sql.py", line 83, in __init__
querystr, self.lastError().text()))
qutebrowser.misc.sql.SqlException: Failed to prepare query "SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\' or title LIKE :pat escape '\') AND last_atime >= ORDER BY last_atime DESC": "near "ORDER": syntax error Unable to execute statement"
```
cc @rcorre
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qutebrowser/completion/models/histcategory.py`
Content:
```
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2017 Ryan Roden-Corrent (rcorre) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """A completion category that queries the SQL History store."""
21
22 import re
23
24 from PyQt5.QtSql import QSqlQueryModel
25
26 from qutebrowser.misc import sql
27 from qutebrowser.utils import debug
28 from qutebrowser.commands import cmdexc
29 from qutebrowser.config import config
30
31
32 class HistoryCategory(QSqlQueryModel):
33
34 """A completion category that queries the SQL History store."""
35
36 def __init__(self, *, delete_func=None, parent=None):
37 """Create a new History completion category."""
38 super().__init__(parent=parent)
39 self.name = "History"
40
41 # replace ' in timestamp-format to avoid breaking the query
42 timefmt = ("strftime('{}', last_atime, 'unixepoch', 'localtime')"
43 .format(config.get('completion', 'timestamp-format')
44 .replace("'", "`")))
45
46 self._query = sql.Query(' '.join([
47 "SELECT url, title, {}".format(timefmt),
48 "FROM CompletionHistory",
49 # the incoming pattern will have literal % and _ escaped with '\'
50 # we need to tell sql to treat '\' as an escape character
51 "WHERE (url LIKE :pat escape '\\' or title LIKE :pat escape '\\')",
52 self._atime_expr(),
53 "ORDER BY last_atime DESC",
54 ]), forward_only=False)
55
56 # advertise that this model filters by URL and title
57 self.columns_to_filter = [0, 1]
58 self.delete_func = delete_func
59
60 def _atime_expr(self):
61 """If max_items is set, return an expression to limit the query."""
62 max_items = config.get('completion', 'web-history-max-items')
63 # HistoryCategory should not be added to the completion in that case.
64 assert max_items != 0
65
66 if max_items < 0:
67 return ''
68
69 min_atime = sql.Query(' '.join([
70 'SELECT min(last_atime) FROM',
71 '(SELECT last_atime FROM CompletionHistory',
72 'ORDER BY last_atime DESC LIMIT :limit)',
73 ])).run(limit=max_items).value()
74
75 return "AND last_atime >= {}".format(min_atime)
76
77 def set_pattern(self, pattern):
78 """Set the pattern used to filter results.
79
80 Args:
81 pattern: string pattern to filter by.
82 """
83 # escape to treat a user input % or _ as a literal, not a wildcard
84 pattern = pattern.replace('%', '\\%')
85 pattern = pattern.replace('_', '\\_')
86 # treat spaces as wildcards to match any of the typed words
87 pattern = re.sub(r' +', '%', pattern)
88 pattern = '%{}%'.format(pattern)
89 with debug.log_time('sql', 'Running completion query'):
90 self._query.run(pat=pattern)
91 self.setQuery(self._query)
92
93 def delete_cur_item(self, index):
94 """Delete the row at the given index."""
95 if not self.delete_func:
96 raise cmdexc.CommandError("Cannot delete this item.")
97 data = [self.data(index.sibling(index.row(), i))
98 for i in range(self.columnCount())]
99 self.delete_func(data)
100 # re-run query to reload updated table
101 with debug.log_time('sql', 'Re-running completion query post-delete'):
102 self._query.run()
103 self.setQuery(self._query)
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py
--- a/qutebrowser/completion/models/histcategory.py
+++ b/qutebrowser/completion/models/histcategory.py
@@ -72,6 +72,10 @@
'ORDER BY last_atime DESC LIMIT :limit)',
])).run(limit=max_items).value()
+ if not min_atime:
+ # if there are no history items, min_atime may be '' (issue #2849)
+ return ''
+
return "AND last_atime >= {}".format(min_atime)
def set_pattern(self, pattern):
| {"golden_diff": "diff --git a/qutebrowser/completion/models/histcategory.py b/qutebrowser/completion/models/histcategory.py\n--- a/qutebrowser/completion/models/histcategory.py\n+++ b/qutebrowser/completion/models/histcategory.py\n@@ -72,6 +72,10 @@\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n \n+ if not min_atime:\n+ # if there are no history items, min_atime may be '' (issue #2849)\n+ return ''\n+\n return \"AND last_atime >= {}\".format(min_atime)\n \n def set_pattern(self, pattern):\n", "issue": "Crash with web-history-max-items and no items in the history\nWhen running `qutebrowser --debug --temp-basedir --backend webengine -s completion web-history-max-items 1000` and immediately pressing `o`, this happens:\r\n\r\n```\r\n12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: \"SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)\"\r\n12:21:36 DEBUG sql sql:run:99 Running SQL query: \"SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)\"\r\n12:21:36 DEBUG sql sql:run:102 query bindings: {':limit': 1000}\r\n12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: \"SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\\' or title LIKE :pat escape '\\') AND last_atime >= ORDER BY last_atime DESC\"\r\n12:21:36 DEBUG completion debug:__exit__:264 Starting url completion took 0.003652 seconds.\r\n12:21:36 ERROR misc crashsignal:exception_hook:205 Uncaught exception\r\nTraceback (most recent call last):\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py\", line 236, in _update_completion\r\n model = func(*args)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/urlmodel.py\", line 70, in url\r\n hist_cat = histcategory.HistoryCategory(delete_func=_delete_history)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py\", line 54, in __init__\r\n ]), forward_only=False)\r\n File \"/home/florian/proj/qutebrowser/git/qutebrowser/misc/sql.py\", line 83, in __init__\r\n querystr, self.lastError().text()))\r\nqutebrowser.misc.sql.SqlException: Failed to prepare query \"SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\\' or title LIKE :pat escape '\\') AND last_atime >= ORDER BY last_atime DESC\": \"near \"ORDER\": syntax error Unable to execute statement\"\r\n```\r\n\r\ncc @rcorre \n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A completion category that queries the SQL History store.\"\"\"\n\nimport re\n\nfrom PyQt5.QtSql import QSqlQueryModel\n\nfrom qutebrowser.misc import sql\nfrom qutebrowser.utils import debug\nfrom qutebrowser.commands import cmdexc\nfrom qutebrowser.config import config\n\n\nclass HistoryCategory(QSqlQueryModel):\n\n \"\"\"A completion category that queries the SQL History store.\"\"\"\n\n def __init__(self, *, delete_func=None, parent=None):\n \"\"\"Create a new History completion category.\"\"\"\n super().__init__(parent=parent)\n self.name = \"History\"\n\n # replace ' in timestamp-format to avoid breaking the query\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(config.get('completion', 'timestamp-format')\n .replace(\"'\", \"`\")))\n\n self._query = sql.Query(' '.join([\n \"SELECT url, title, {}\".format(timefmt),\n \"FROM CompletionHistory\",\n # the incoming pattern will have literal % and _ escaped with '\\'\n # we need to tell sql to treat '\\' as an escape character\n \"WHERE (url LIKE :pat escape '\\\\' or title LIKE :pat escape '\\\\')\",\n self._atime_expr(),\n \"ORDER BY last_atime DESC\",\n ]), forward_only=False)\n\n # advertise that this model filters by URL and title\n self.columns_to_filter = [0, 1]\n self.delete_func = delete_func\n\n def _atime_expr(self):\n \"\"\"If max_items is set, return an expression to limit the query.\"\"\"\n max_items = config.get('completion', 'web-history-max-items')\n # HistoryCategory should not be added to the completion in that case.\n assert max_items != 0\n\n if max_items < 0:\n return ''\n\n min_atime = sql.Query(' '.join([\n 'SELECT min(last_atime) FROM',\n '(SELECT last_atime FROM CompletionHistory',\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n\n return \"AND last_atime >= {}\".format(min_atime)\n\n def set_pattern(self, pattern):\n \"\"\"Set the pattern used to filter results.\n\n Args:\n pattern: string pattern to filter by.\n \"\"\"\n # escape to treat a user input % or _ as a literal, not a wildcard\n pattern = pattern.replace('%', '\\\\%')\n pattern = pattern.replace('_', '\\\\_')\n # treat spaces as wildcards to match any of the typed words\n pattern = re.sub(r' +', '%', pattern)\n pattern = '%{}%'.format(pattern)\n with debug.log_time('sql', 'Running completion query'):\n self._query.run(pat=pattern)\n self.setQuery(self._query)\n\n def delete_cur_item(self, index):\n \"\"\"Delete the row at the given index.\"\"\"\n if not self.delete_func:\n raise cmdexc.CommandError(\"Cannot delete this item.\")\n data = [self.data(index.sibling(index.row(), i))\n for i in range(self.columnCount())]\n self.delete_func(data)\n # re-run query to reload updated table\n with debug.log_time('sql', 'Re-running completion query post-delete'):\n self._query.run()\n self.setQuery(self._query)\n", "path": "qutebrowser/completion/models/histcategory.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2017 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"A completion category that queries the SQL History store.\"\"\"\n\nimport re\n\nfrom PyQt5.QtSql import QSqlQueryModel\n\nfrom qutebrowser.misc import sql\nfrom qutebrowser.utils import debug\nfrom qutebrowser.commands import cmdexc\nfrom qutebrowser.config import config\n\n\nclass HistoryCategory(QSqlQueryModel):\n\n \"\"\"A completion category that queries the SQL History store.\"\"\"\n\n def __init__(self, *, delete_func=None, parent=None):\n \"\"\"Create a new History completion category.\"\"\"\n super().__init__(parent=parent)\n self.name = \"History\"\n\n # replace ' in timestamp-format to avoid breaking the query\n timefmt = (\"strftime('{}', last_atime, 'unixepoch', 'localtime')\"\n .format(config.get('completion', 'timestamp-format')\n .replace(\"'\", \"`\")))\n\n self._query = sql.Query(' '.join([\n \"SELECT url, title, {}\".format(timefmt),\n \"FROM CompletionHistory\",\n # the incoming pattern will have literal % and _ escaped with '\\'\n # we need to tell sql to treat '\\' as an escape character\n \"WHERE (url LIKE :pat escape '\\\\' or title LIKE :pat escape '\\\\')\",\n self._atime_expr(),\n \"ORDER BY last_atime DESC\",\n ]), forward_only=False)\n\n # advertise that this model filters by URL and title\n self.columns_to_filter = [0, 1]\n self.delete_func = delete_func\n\n def _atime_expr(self):\n \"\"\"If max_items is set, return an expression to limit the query.\"\"\"\n max_items = config.get('completion', 'web-history-max-items')\n # HistoryCategory should not be added to the completion in that case.\n assert max_items != 0\n\n if max_items < 0:\n return ''\n\n min_atime = sql.Query(' '.join([\n 'SELECT min(last_atime) FROM',\n '(SELECT last_atime FROM CompletionHistory',\n 'ORDER BY last_atime DESC LIMIT :limit)',\n ])).run(limit=max_items).value()\n\n if not min_atime:\n # if there are no history items, min_atime may be '' (issue #2849)\n return ''\n\n return \"AND last_atime >= {}\".format(min_atime)\n\n def set_pattern(self, pattern):\n \"\"\"Set the pattern used to filter results.\n\n Args:\n pattern: string pattern to filter by.\n \"\"\"\n # escape to treat a user input % or _ as a literal, not a wildcard\n pattern = pattern.replace('%', '\\\\%')\n pattern = pattern.replace('_', '\\\\_')\n # treat spaces as wildcards to match any of the typed words\n pattern = re.sub(r' +', '%', pattern)\n pattern = '%{}%'.format(pattern)\n with debug.log_time('sql', 'Running completion query'):\n self._query.run(pat=pattern)\n self.setQuery(self._query)\n\n def delete_cur_item(self, index):\n \"\"\"Delete the row at the given index.\"\"\"\n if not self.delete_func:\n raise cmdexc.CommandError(\"Cannot delete this item.\")\n data = [self.data(index.sibling(index.row(), i))\n for i in range(self.columnCount())]\n self.delete_func(data)\n # re-run query to reload updated table\n with debug.log_time('sql', 'Re-running completion query post-delete'):\n self._query.run()\n self.setQuery(self._query)\n", "path": "qutebrowser/completion/models/histcategory.py"}]} | 1,957 | 148 |
gh_patches_debug_38680 | rasdani/github-patches | git_diff | zigpy__zha-device-handlers-528 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]Ikea FYRTUR blind and remote reporting wrong battery or no battery
First
Blind:
Never updates the battery automatically.
However if you do a zigbee get cluster attribute for battery percentage remaining it does return a correct value.
Remote:
does update battery on its own.
Both:
both values are displayed correctly in the get attribute box but the sensor for the batteries figures are always half what it shows.


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zhaquirks/ikea/blinds.py`
Content:
```
1 """Device handler for IKEA of Sweden TRADFRI Fyrtur blinds."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomDevice
4 from zigpy.zcl.clusters.closures import WindowCovering
5 from zigpy.zcl.clusters.general import (
6 Basic,
7 Groups,
8 Identify,
9 Ota,
10 PollControl,
11 PowerConfiguration,
12 Scenes,
13 )
14 from zigpy.zcl.clusters.lightlink import LightLink
15
16 from . import IKEA
17 from .. import DoublingPowerConfigurationCluster
18 from ..const import (
19 DEVICE_TYPE,
20 ENDPOINTS,
21 INPUT_CLUSTERS,
22 MODELS_INFO,
23 OUTPUT_CLUSTERS,
24 PROFILE_ID,
25 )
26
27 IKEA_CLUSTER_ID = 0xFC7C # decimal = 64636
28
29
30 class IkeaTradfriRollerBlinds(CustomDevice):
31 """Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds."""
32
33 signature = {
34 # <SimpleDescriptor endpoint=1 profile=260 device_type=2080
35 # device_version=1
36 # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096]
37 # output_clusters=[25, 4096]>
38 MODELS_INFO: [
39 (IKEA, "FYRTUR block-out roller blind"),
40 (IKEA, "KADRILJ roller blind"),
41 ],
42 ENDPOINTS: {
43 1: {
44 PROFILE_ID: zha.PROFILE_ID,
45 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,
46 INPUT_CLUSTERS: [
47 Basic.cluster_id,
48 PowerConfiguration.cluster_id,
49 Identify.cluster_id,
50 Groups.cluster_id,
51 Scenes.cluster_id,
52 PollControl.cluster_id,
53 WindowCovering.cluster_id,
54 LightLink.cluster_id,
55 ],
56 OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],
57 }
58 },
59 }
60
61 replacement = {
62 "endpoints": {
63 1: {
64 "profile_id": zha.PROFILE_ID,
65 "device_type": zha.DeviceType.WINDOW_COVERING_DEVICE,
66 "input_clusters": [
67 Basic.cluster_id,
68 DoublingPowerConfigurationCluster,
69 Identify.cluster_id,
70 Groups.cluster_id,
71 Scenes.cluster_id,
72 PollControl.cluster_id,
73 WindowCovering.cluster_id,
74 LightLink.cluster_id,
75 ],
76 "output_clusters": [Ota.cluster_id, LightLink.cluster_id],
77 }
78 }
79 }
80
```
Path: `zhaquirks/ikea/opencloseremote.py`
Content:
```
1 """Device handler for IKEA of Sweden TRADFRI remote control."""
2 from zigpy.profiles import zha
3 from zigpy.quirks import CustomDevice
4 from zigpy.zcl.clusters.closures import WindowCovering
5 from zigpy.zcl.clusters.general import (
6 Alarms,
7 Basic,
8 Groups,
9 Identify,
10 LevelControl,
11 OnOff,
12 Ota,
13 PollControl,
14 PowerConfiguration,
15 )
16 from zigpy.zcl.clusters.lightlink import LightLink
17
18 from . import IKEA
19 from .. import DoublingPowerConfigurationCluster
20 from ..const import (
21 DEVICE_TYPE,
22 ENDPOINTS,
23 INPUT_CLUSTERS,
24 MODELS_INFO,
25 OUTPUT_CLUSTERS,
26 PROFILE_ID,
27 )
28
29 IKEA_CLUSTER_ID = 0xFC7C # decimal = 64636
30
31
32 class IkeaTradfriOpenCloseRemote(CustomDevice):
33 """Custom device representing IKEA of Sweden TRADFRI remote control."""
34
35 signature = {
36 MODELS_INFO: [("\x02KE", "TRADFRI open/close remote")],
37 ENDPOINTS: {
38 1: {
39 PROFILE_ID: zha.PROFILE_ID,
40 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,
41 INPUT_CLUSTERS: [
42 Basic.cluster_id,
43 PowerConfiguration.cluster_id,
44 Identify.cluster_id,
45 Alarms.cluster_id,
46 PollControl.cluster_id,
47 LightLink.cluster_id,
48 IKEA_CLUSTER_ID,
49 ],
50 OUTPUT_CLUSTERS: [
51 Identify.cluster_id,
52 Groups.cluster_id,
53 OnOff.cluster_id,
54 LevelControl.cluster_id,
55 Ota.cluster_id,
56 WindowCovering.cluster_id,
57 LightLink.cluster_id,
58 ],
59 }
60 },
61 }
62
63 replacement = {
64 MODELS_INFO: [(IKEA, "TRADFRI open/close remote")],
65 ENDPOINTS: {
66 1: {
67 PROFILE_ID: zha.PROFILE_ID,
68 DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,
69 INPUT_CLUSTERS: [
70 Basic.cluster_id,
71 DoublingPowerConfigurationCluster,
72 Identify.cluster_id,
73 Alarms.cluster_id,
74 PollControl.cluster_id,
75 LightLink.cluster_id,
76 IKEA_CLUSTER_ID,
77 ],
78 OUTPUT_CLUSTERS: [
79 Identify.cluster_id,
80 Groups.cluster_id,
81 OnOff.cluster_id,
82 LevelControl.cluster_id,
83 Ota.cluster_id,
84 WindowCovering.cluster_id,
85 LightLink.cluster_id,
86 ],
87 }
88 },
89 }
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zhaquirks/ikea/blinds.py b/zhaquirks/ikea/blinds.py
--- a/zhaquirks/ikea/blinds.py
+++ b/zhaquirks/ikea/blinds.py
@@ -31,9 +31,9 @@
"""Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds."""
signature = {
- # <SimpleDescriptor endpoint=1 profile=260 device_type=2080
+ # <SimpleDescriptor endpoint=1 profile=260 device_type=514
# device_version=1
- # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096]
+ # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096, 64636]
# output_clusters=[25, 4096]>
MODELS_INFO: [
(IKEA, "FYRTUR block-out roller blind"),
@@ -52,6 +52,7 @@
PollControl.cluster_id,
WindowCovering.cluster_id,
LightLink.cluster_id,
+ IKEA_CLUSTER_ID,
],
OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],
}
@@ -59,11 +60,11 @@
}
replacement = {
- "endpoints": {
+ ENDPOINTS: {
1: {
- "profile_id": zha.PROFILE_ID,
- "device_type": zha.DeviceType.WINDOW_COVERING_DEVICE,
- "input_clusters": [
+ PROFILE_ID: zha.PROFILE_ID,
+ DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,
+ INPUT_CLUSTERS: [
Basic.cluster_id,
DoublingPowerConfigurationCluster,
Identify.cluster_id,
@@ -72,8 +73,9 @@
PollControl.cluster_id,
WindowCovering.cluster_id,
LightLink.cluster_id,
+ IKEA_CLUSTER_ID,
],
- "output_clusters": [Ota.cluster_id, LightLink.cluster_id],
+ OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],
}
}
}
diff --git a/zhaquirks/ikea/opencloseremote.py b/zhaquirks/ikea/opencloseremote.py
--- a/zhaquirks/ikea/opencloseremote.py
+++ b/zhaquirks/ikea/opencloseremote.py
@@ -33,7 +33,14 @@
"""Custom device representing IKEA of Sweden TRADFRI remote control."""
signature = {
- MODELS_INFO: [("\x02KE", "TRADFRI open/close remote")],
+ # <SimpleDescriptor endpoint=1 profile=260 device_type=515
+ # device_version=1
+ # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]
+ # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>
+ MODELS_INFO: [
+ ("\x02KE", "TRADFRI open/close remote"),
+ (IKEA, "TRADFRI open/close remote"),
+ ],
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
| {"golden_diff": "diff --git a/zhaquirks/ikea/blinds.py b/zhaquirks/ikea/blinds.py\n--- a/zhaquirks/ikea/blinds.py\n+++ b/zhaquirks/ikea/blinds.py\n@@ -31,9 +31,9 @@\n \"\"\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\n \n signature = {\n- # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n+ # <SimpleDescriptor endpoint=1 profile=260 device_type=514\n # device_version=1\n- # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096]\n+ # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096, 64636]\n # output_clusters=[25, 4096]>\n MODELS_INFO: [\n (IKEA, \"FYRTUR block-out roller blind\"),\n@@ -52,6 +52,7 @@\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n+ IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n@@ -59,11 +60,11 @@\n }\n \n replacement = {\n- \"endpoints\": {\n+ ENDPOINTS: {\n 1: {\n- \"profile_id\": zha.PROFILE_ID,\n- \"device_type\": zha.DeviceType.WINDOW_COVERING_DEVICE,\n- \"input_clusters\": [\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n+ INPUT_CLUSTERS: [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n@@ -72,8 +73,9 @@\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n+ IKEA_CLUSTER_ID,\n ],\n- \"output_clusters\": [Ota.cluster_id, LightLink.cluster_id],\n+ OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n }\n }\ndiff --git a/zhaquirks/ikea/opencloseremote.py b/zhaquirks/ikea/opencloseremote.py\n--- a/zhaquirks/ikea/opencloseremote.py\n+++ b/zhaquirks/ikea/opencloseremote.py\n@@ -33,7 +33,14 @@\n \"\"\"Custom device representing IKEA of Sweden TRADFRI remote control.\"\"\"\n \n signature = {\n- MODELS_INFO: [(\"\\x02KE\", \"TRADFRI open/close remote\")],\n+ # <SimpleDescriptor endpoint=1 profile=260 device_type=515\n+ # device_version=1\n+ # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]\n+ # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>\n+ MODELS_INFO: [\n+ (\"\\x02KE\", \"TRADFRI open/close remote\"),\n+ (IKEA, \"TRADFRI open/close remote\"),\n+ ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n", "issue": "[BUG]Ikea FYRTUR blind and remote reporting wrong battery or no battery\nFirst \r\n\r\nBlind:\r\n\r\nNever updates the battery automatically.\r\n\r\nHowever if you do a zigbee get cluster attribute for battery percentage remaining it does return a correct value.\r\n\r\nRemote:\r\n\r\ndoes update battery on its own.\r\n\r\nBoth:\r\n\r\nboth values are displayed correctly in the get attribute box but the sensor for the batteries figures are always half what it shows.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Basic,\n Groups,\n Identify,\n Ota,\n PollControl,\n PowerConfiguration,\n Scenes,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom . import IKEA\nfrom .. import DoublingPowerConfigurationCluster\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n\n\nclass IkeaTradfriRollerBlinds(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=2080\n # device_version=1\n # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096]\n # output_clusters=[25, 4096]>\n MODELS_INFO: [\n (IKEA, \"FYRTUR block-out roller blind\"),\n (IKEA, \"KADRILJ roller blind\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n },\n }\n\n replacement = {\n \"endpoints\": {\n 1: {\n \"profile_id\": zha.PROFILE_ID,\n \"device_type\": zha.DeviceType.WINDOW_COVERING_DEVICE,\n \"input_clusters\": [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n \"output_clusters\": [Ota.cluster_id, LightLink.cluster_id],\n }\n }\n }\n", "path": "zhaquirks/ikea/blinds.py"}, {"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI remote control.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Alarms,\n Basic,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PollControl,\n PowerConfiguration,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom . import IKEA\nfrom .. import DoublingPowerConfigurationCluster\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n\n\nclass IkeaTradfriOpenCloseRemote(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI remote control.\"\"\"\n\n signature = {\n MODELS_INFO: [(\"\\x02KE\", \"TRADFRI open/close remote\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n MODELS_INFO: [(IKEA, \"TRADFRI open/close remote\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n", "path": "zhaquirks/ikea/opencloseremote.py"}], "after_files": [{"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Basic,\n Groups,\n Identify,\n Ota,\n PollControl,\n PowerConfiguration,\n Scenes,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom . import IKEA\nfrom .. import DoublingPowerConfigurationCluster\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n\n\nclass IkeaTradfriRollerBlinds(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI Fyrtur blinds.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=514\n # device_version=1\n # input_clusters=[0, 1, 3, 4, 5, 32, 258, 4096, 64636]\n # output_clusters=[25, 4096]>\n MODELS_INFO: [\n (IKEA, \"FYRTUR block-out roller blind\"),\n (IKEA, \"KADRILJ roller blind\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_DEVICE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n PollControl.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id, LightLink.cluster_id],\n }\n }\n }\n", "path": "zhaquirks/ikea/blinds.py"}, {"content": "\"\"\"Device handler for IKEA of Sweden TRADFRI remote control.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.closures import WindowCovering\nfrom zigpy.zcl.clusters.general import (\n Alarms,\n Basic,\n Groups,\n Identify,\n LevelControl,\n OnOff,\n Ota,\n PollControl,\n PowerConfiguration,\n)\nfrom zigpy.zcl.clusters.lightlink import LightLink\n\nfrom . import IKEA\nfrom .. import DoublingPowerConfigurationCluster\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nIKEA_CLUSTER_ID = 0xFC7C # decimal = 64636\n\n\nclass IkeaTradfriOpenCloseRemote(CustomDevice):\n \"\"\"Custom device representing IKEA of Sweden TRADFRI remote control.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=515\n # device_version=1\n # input_clusters=[0, 1, 3, 9, 32, 4096, 64636]\n # output_clusters=[3, 4, 6, 8, 25, 258, 4096]>\n MODELS_INFO: [\n (\"\\x02KE\", \"TRADFRI open/close remote\"),\n (IKEA, \"TRADFRI open/close remote\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfiguration.cluster_id,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n\n replacement = {\n MODELS_INFO: [(IKEA, \"TRADFRI open/close remote\")],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.WINDOW_COVERING_CONTROLLER,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n DoublingPowerConfigurationCluster,\n Identify.cluster_id,\n Alarms.cluster_id,\n PollControl.cluster_id,\n LightLink.cluster_id,\n IKEA_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [\n Identify.cluster_id,\n Groups.cluster_id,\n OnOff.cluster_id,\n LevelControl.cluster_id,\n Ota.cluster_id,\n WindowCovering.cluster_id,\n LightLink.cluster_id,\n ],\n }\n },\n }\n", "path": "zhaquirks/ikea/opencloseremote.py"}]} | 1,921 | 789 |
gh_patches_debug_12128 | rasdani/github-patches | git_diff | nf-core__tools-1357 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Linting should fail if pipeline module file is edited
In https://github.com/ewels/nf-core-methylseq/pull/2 I have edited a module file that was pulled from nf-core/modules. I shouldn't be allowed to do this.
The linting warns me that something is amiss:
```
│ bismark/align │ modules/nf-cor… │ Local copy of │
│ │ │ module outdated │
```
But it should be checking the git sha in `modules.json` and recognising that it doesn't match what it expects. Then throwing a hard error.
Possible that the code for this is lurking in a PR that's waiting to be merged..
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nf_core/modules/lint/module_changes.py`
Content:
```
1 """
2 Check whether the content of a module has changed compared to the original repository
3 """
4 import os
5 import requests
6 import rich
7 from nf_core.modules.lint import LintResult
8
9
10 def module_changes(module_lint_object, module):
11 """
12 Checks whether installed nf-core modules have changed compared to the
13 original repository
14 Downloads the 'main.nf' and 'meta.yml' files for every module
15 and compares them to the local copies
16
17 If the module has a 'git_sha', the file content is checked against this sha
18 """
19 files_to_check = ["main.nf", "meta.yml"]
20
21 # Loop over nf-core modules
22 module_base_url = f"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module_lint_object.modules_repo.branch}/modules/{module.module_name}/"
23
24 # If module.git_sha specified, check specific commit version for changes
25 if module.git_sha:
26 module_base_url = f"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module.git_sha}/modules/{module.module_name}/"
27
28 for f in files_to_check:
29 # open local copy, continue if file not found (a failed message has already been issued in this case)
30 try:
31 local_copy = open(os.path.join(module.module_dir, f), "r").read()
32 except FileNotFoundError as e:
33 continue
34
35 # Download remote copy and compare
36 url = module_base_url + f
37 r = requests.get(url=url)
38
39 if r.status_code != 200:
40 module.warned.append(
41 (
42 "check_local_copy",
43 f"Could not fetch remote copy, skipping comparison.",
44 f"{os.path.join(module.module_dir, f)}",
45 )
46 )
47 else:
48 try:
49 remote_copy = r.content.decode("utf-8")
50
51 if local_copy != remote_copy:
52 module.warned.append(
53 (
54 "check_local_copy",
55 "Local copy of module outdated",
56 f"{os.path.join(module.module_dir, f)}",
57 )
58 )
59 else:
60 module.passed.append(
61 (
62 "check_local_copy",
63 "Local copy of module up to date",
64 f"{os.path.join(module.module_dir, f)}",
65 )
66 )
67 except UnicodeDecodeError as e:
68 module.warned.append(
69 (
70 "check_local_copy",
71 f"Could not decode file from {url}. Skipping comparison ({e})",
72 f"{os.path.join(module.module_dir, f)}",
73 )
74 )
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nf_core/modules/lint/module_changes.py b/nf_core/modules/lint/module_changes.py
--- a/nf_core/modules/lint/module_changes.py
+++ b/nf_core/modules/lint/module_changes.py
@@ -49,10 +49,10 @@
remote_copy = r.content.decode("utf-8")
if local_copy != remote_copy:
- module.warned.append(
+ module.failed.append(
(
"check_local_copy",
- "Local copy of module outdated",
+ "Local copy of module does not match remote",
f"{os.path.join(module.module_dir, f)}",
)
)
| {"golden_diff": "diff --git a/nf_core/modules/lint/module_changes.py b/nf_core/modules/lint/module_changes.py\n--- a/nf_core/modules/lint/module_changes.py\n+++ b/nf_core/modules/lint/module_changes.py\n@@ -49,10 +49,10 @@\n remote_copy = r.content.decode(\"utf-8\")\n \n if local_copy != remote_copy:\n- module.warned.append(\n+ module.failed.append(\n (\n \"check_local_copy\",\n- \"Local copy of module outdated\",\n+ \"Local copy of module does not match remote\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n", "issue": "Linting should fail if pipeline module file is edited\nIn https://github.com/ewels/nf-core-methylseq/pull/2 I have edited a module file that was pulled from nf-core/modules. I shouldn't be allowed to do this.\r\n\r\nThe linting warns me that something is amiss:\r\n\r\n```\r\n\u2502 bismark/align \u2502 modules/nf-cor\u2026 \u2502 Local copy of \u2502\r\n\u2502 \u2502 \u2502 module outdated \u2502\r\n```\r\n\r\nBut it should be checking the git sha in `modules.json` and recognising that it doesn't match what it expects. Then throwing a hard error.\r\n\r\nPossible that the code for this is lurking in a PR that's waiting to be merged..\n", "before_files": [{"content": "\"\"\"\nCheck whether the content of a module has changed compared to the original repository\n\"\"\"\nimport os\nimport requests\nimport rich\nfrom nf_core.modules.lint import LintResult\n\n\ndef module_changes(module_lint_object, module):\n \"\"\"\n Checks whether installed nf-core modules have changed compared to the\n original repository\n Downloads the 'main.nf' and 'meta.yml' files for every module\n and compares them to the local copies\n\n If the module has a 'git_sha', the file content is checked against this sha\n \"\"\"\n files_to_check = [\"main.nf\", \"meta.yml\"]\n\n # Loop over nf-core modules\n module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module_lint_object.modules_repo.branch}/modules/{module.module_name}/\"\n\n # If module.git_sha specified, check specific commit version for changes\n if module.git_sha:\n module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module.git_sha}/modules/{module.module_name}/\"\n\n for f in files_to_check:\n # open local copy, continue if file not found (a failed message has already been issued in this case)\n try:\n local_copy = open(os.path.join(module.module_dir, f), \"r\").read()\n except FileNotFoundError as e:\n continue\n\n # Download remote copy and compare\n url = module_base_url + f\n r = requests.get(url=url)\n\n if r.status_code != 200:\n module.warned.append(\n (\n \"check_local_copy\",\n f\"Could not fetch remote copy, skipping comparison.\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n else:\n try:\n remote_copy = r.content.decode(\"utf-8\")\n\n if local_copy != remote_copy:\n module.warned.append(\n (\n \"check_local_copy\",\n \"Local copy of module outdated\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n else:\n module.passed.append(\n (\n \"check_local_copy\",\n \"Local copy of module up to date\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n except UnicodeDecodeError as e:\n module.warned.append(\n (\n \"check_local_copy\",\n f\"Could not decode file from {url}. Skipping comparison ({e})\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n", "path": "nf_core/modules/lint/module_changes.py"}], "after_files": [{"content": "\"\"\"\nCheck whether the content of a module has changed compared to the original repository\n\"\"\"\nimport os\nimport requests\nimport rich\nfrom nf_core.modules.lint import LintResult\n\n\ndef module_changes(module_lint_object, module):\n \"\"\"\n Checks whether installed nf-core modules have changed compared to the\n original repository\n Downloads the 'main.nf' and 'meta.yml' files for every module\n and compares them to the local copies\n\n If the module has a 'git_sha', the file content is checked against this sha\n \"\"\"\n files_to_check = [\"main.nf\", \"meta.yml\"]\n\n # Loop over nf-core modules\n module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module_lint_object.modules_repo.branch}/modules/{module.module_name}/\"\n\n # If module.git_sha specified, check specific commit version for changes\n if module.git_sha:\n module_base_url = f\"https://raw.githubusercontent.com/{module_lint_object.modules_repo.name}/{module.git_sha}/modules/{module.module_name}/\"\n\n for f in files_to_check:\n # open local copy, continue if file not found (a failed message has already been issued in this case)\n try:\n local_copy = open(os.path.join(module.module_dir, f), \"r\").read()\n except FileNotFoundError as e:\n continue\n\n # Download remote copy and compare\n url = module_base_url + f\n r = requests.get(url=url)\n\n if r.status_code != 200:\n module.warned.append(\n (\n \"check_local_copy\",\n f\"Could not fetch remote copy, skipping comparison.\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n else:\n try:\n remote_copy = r.content.decode(\"utf-8\")\n\n if local_copy != remote_copy:\n module.failed.append(\n (\n \"check_local_copy\",\n \"Local copy of module does not match remote\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n else:\n module.passed.append(\n (\n \"check_local_copy\",\n \"Local copy of module up to date\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n except UnicodeDecodeError as e:\n module.warned.append(\n (\n \"check_local_copy\",\n f\"Could not decode file from {url}. Skipping comparison ({e})\",\n f\"{os.path.join(module.module_dir, f)}\",\n )\n )\n", "path": "nf_core/modules/lint/module_changes.py"}]} | 1,086 | 142 |
gh_patches_debug_6286 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-284 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Include tailwind.css via npm instead of static file
At the moment, we include tailwind.css as a static file in our repo.
Instead, we should use [the recommended installation via npm](https://tailwindcss.com/docs/installation/) where we can configure exactly which parts we need and compile a minified css file in our deployment chain.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/cms/views/media/media_list_view.py`
Content:
```
1 from django.contrib.auth.decorators import login_required
2 from django.shortcuts import render
3 from django.utils.decorators import method_decorator
4 from django.views.generic import TemplateView
5
6 from ...decorators import region_permission_required
7 from ...models import Document
8
9
10 @method_decorator(login_required, name='dispatch')
11 @method_decorator(region_permission_required, name='dispatch')
12 class MediaListView(TemplateView):
13 template_name = 'media/list.html'
14 base_context = {'current_menu_item': 'media'}
15
16 def get(self, request, *args, **kwargs):
17 documents = Document.objects.all()
18
19 return render(
20 request,
21 self.template_name,
22 {
23 **self.base_context,
24 'documents': documents
25 }
26 )
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/cms/views/media/media_list_view.py b/backend/cms/views/media/media_list_view.py
--- a/backend/cms/views/media/media_list_view.py
+++ b/backend/cms/views/media/media_list_view.py
@@ -10,7 +10,7 @@
@method_decorator(login_required, name='dispatch')
@method_decorator(region_permission_required, name='dispatch')
class MediaListView(TemplateView):
- template_name = 'media/list.html'
+ template_name = 'media/media_list.html'
base_context = {'current_menu_item': 'media'}
def get(self, request, *args, **kwargs):
| {"golden_diff": "diff --git a/backend/cms/views/media/media_list_view.py b/backend/cms/views/media/media_list_view.py\n--- a/backend/cms/views/media/media_list_view.py\n+++ b/backend/cms/views/media/media_list_view.py\n@@ -10,7 +10,7 @@\n @method_decorator(login_required, name='dispatch')\n @method_decorator(region_permission_required, name='dispatch')\n class MediaListView(TemplateView):\n- template_name = 'media/list.html'\n+ template_name = 'media/media_list.html'\n base_context = {'current_menu_item': 'media'}\n \n def get(self, request, *args, **kwargs):\n", "issue": "Include tailwind.css via npm instead of static file\nAt the moment, we include tailwind.css as a static file in our repo.\r\nInstead, we should use [the recommended installation via npm](https://tailwindcss.com/docs/installation/) where we can configure exactly which parts we need and compile a minified css file in our deployment chain.\n", "before_files": [{"content": "from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView\n\nfrom ...decorators import region_permission_required\nfrom ...models import Document\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass MediaListView(TemplateView):\n template_name = 'media/list.html'\n base_context = {'current_menu_item': 'media'}\n\n def get(self, request, *args, **kwargs):\n documents = Document.objects.all()\n\n return render(\n request,\n self.template_name,\n {\n **self.base_context,\n 'documents': documents\n }\n )\n", "path": "backend/cms/views/media/media_list_view.py"}], "after_files": [{"content": "from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import TemplateView\n\nfrom ...decorators import region_permission_required\nfrom ...models import Document\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass MediaListView(TemplateView):\n template_name = 'media/media_list.html'\n base_context = {'current_menu_item': 'media'}\n\n def get(self, request, *args, **kwargs):\n documents = Document.objects.all()\n\n return render(\n request,\n self.template_name,\n {\n **self.base_context,\n 'documents': documents\n }\n )\n", "path": "backend/cms/views/media/media_list_view.py"}]} | 527 | 131 |
gh_patches_debug_378 | rasdani/github-patches | git_diff | encode__uvicorn-1099 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping
### Checklist
<!-- Please make sure you check all these items before submitting your bug report. -->
- [X] The bug is reproducible against the latest release and/or `master`.
- [X] There are no similar issues or pull requests to fix it yet.
### Describe the bug
When adding uvicorn package with poetry the following warning is raised:
PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping
because the constraint `>=7.*` violates PEP440 syntax. It should either be `>=7.0` or `=7.*`.
Because of this, the `click` dependency is not installed and uvicorn may not work.
### To reproduce
Just execute `poetry add uvicorn` in any environment.
### Expected behavior
To install `uvicorn` correctly together with all its dependencies.
### Actual behavior
The `click` dependency is not installed and uvicorn may not work.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import os
5 import re
6
7 from setuptools import setup
8
9
10 def get_version(package):
11 """
12 Return package version as listed in `__version__` in `init.py`.
13 """
14 path = os.path.join(package, "__init__.py")
15 init_py = open(path, "r", encoding="utf8").read()
16 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
17
18
19 def get_long_description():
20 """
21 Return the README.
22 """
23 return open("README.md", "r", encoding="utf8").read()
24
25
26 def get_packages(package):
27 """
28 Return root package and all sub-packages.
29 """
30 return [
31 dirpath
32 for dirpath, dirnames, filenames in os.walk(package)
33 if os.path.exists(os.path.join(dirpath, "__init__.py"))
34 ]
35
36
37 env_marker_cpython = (
38 "sys_platform != 'win32'"
39 " and (sys_platform != 'cygwin'"
40 " and platform_python_implementation != 'PyPy')"
41 )
42
43 env_marker_win = "sys_platform == 'win32'"
44 env_marker_below_38 = "python_version < '3.8'"
45
46 minimal_requirements = [
47 "asgiref>=3.4.0",
48 "click>=7.*",
49 "h11>=0.8",
50 "typing-extensions;" + env_marker_below_38,
51 ]
52
53
54 extra_requirements = [
55 "websockets>=9.1",
56 "httptools==0.2.*",
57 "uvloop>=0.14.0,!=0.15.0,!=0.15.1; " + env_marker_cpython,
58 "colorama>=0.4;" + env_marker_win,
59 "watchgod>=0.6",
60 "python-dotenv>=0.13",
61 "PyYAML>=5.1",
62 ]
63
64
65 setup(
66 name="uvicorn",
67 version=get_version("uvicorn"),
68 url="https://github.com/encode/uvicorn",
69 license="BSD",
70 description="The lightning-fast ASGI server.",
71 long_description=get_long_description(),
72 long_description_content_type="text/markdown",
73 author="Tom Christie",
74 author_email="[email protected]",
75 packages=get_packages("uvicorn"),
76 install_requires=minimal_requirements,
77 extras_require={"standard": extra_requirements},
78 include_package_data=True,
79 classifiers=[
80 "Development Status :: 4 - Beta",
81 "Environment :: Web Environment",
82 "Intended Audience :: Developers",
83 "License :: OSI Approved :: BSD License",
84 "Operating System :: OS Independent",
85 "Topic :: Internet :: WWW/HTTP",
86 "Programming Language :: Python :: 3",
87 "Programming Language :: Python :: 3.6",
88 "Programming Language :: Python :: 3.7",
89 "Programming Language :: Python :: 3.8",
90 "Programming Language :: Python :: 3.9",
91 "Programming Language :: Python :: Implementation :: CPython",
92 "Programming Language :: Python :: Implementation :: PyPy",
93 ],
94 entry_points="""
95 [console_scripts]
96 uvicorn=uvicorn.main:main
97 """,
98 )
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@
minimal_requirements = [
"asgiref>=3.4.0",
- "click>=7.*",
+ "click>=7.0",
"h11>=0.8",
"typing-extensions;" + env_marker_below_38,
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,7 +45,7 @@\n \n minimal_requirements = [\n \"asgiref>=3.4.0\",\n- \"click>=7.*\",\n+ \"click>=7.0\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n ]\n", "issue": "PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping\n### Checklist\r\n\r\n<!-- Please make sure you check all these items before submitting your bug report. -->\r\n\r\n- [X] The bug is reproducible against the latest release and/or `master`.\r\n- [X] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\nWhen adding uvicorn package with poetry the following warning is raised:\r\n\r\n PackageInfo: Invalid constraint (click (>=7.*)) found in uvicorn-0.14.0 dependencies, skipping\r\n\r\nbecause the constraint `>=7.*` violates PEP440 syntax. It should either be `>=7.0` or `=7.*`.\r\n\r\nBecause of this, the `click` dependency is not installed and uvicorn may not work.\r\n\r\n### To reproduce\r\n\r\nJust execute `poetry add uvicorn` in any environment.\r\n\r\n### Expected behavior\r\n\r\nTo install `uvicorn` correctly together with all its dependencies.\r\n\r\n### Actual behavior\r\n\r\nThe `click` dependency is not installed and uvicorn may not work.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, \"__init__.py\")\n init_py = open(path, \"r\", encoding=\"utf8\").read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open(\"README.md\", \"r\", encoding=\"utf8\").read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nenv_marker_win = \"sys_platform == 'win32'\"\nenv_marker_below_38 = \"python_version < '3.8'\"\n\nminimal_requirements = [\n \"asgiref>=3.4.0\",\n \"click>=7.*\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n]\n\n\nextra_requirements = [\n \"websockets>=9.1\",\n \"httptools==0.2.*\",\n \"uvloop>=0.14.0,!=0.15.0,!=0.15.1; \" + env_marker_cpython,\n \"colorama>=0.4;\" + env_marker_win,\n \"watchgod>=0.6\",\n \"python-dotenv>=0.13\",\n \"PyYAML>=5.1\",\n]\n\n\nsetup(\n name=\"uvicorn\",\n version=get_version(\"uvicorn\"),\n url=\"https://github.com/encode/uvicorn\",\n license=\"BSD\",\n description=\"The lightning-fast ASGI server.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"uvicorn\"),\n install_requires=minimal_requirements,\n extras_require={\"standard\": extra_requirements},\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, \"__init__.py\")\n init_py = open(path, \"r\", encoding=\"utf8\").read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open(\"README.md\", \"r\", encoding=\"utf8\").read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nenv_marker_win = \"sys_platform == 'win32'\"\nenv_marker_below_38 = \"python_version < '3.8'\"\n\nminimal_requirements = [\n \"asgiref>=3.4.0\",\n \"click>=7.0\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n]\n\n\nextra_requirements = [\n \"websockets>=9.1\",\n \"httptools==0.2.*\",\n \"uvloop>=0.14.0,!=0.15.0,!=0.15.1; \" + env_marker_cpython,\n \"colorama>=0.4;\" + env_marker_win,\n \"watchgod>=0.6\",\n \"python-dotenv>=0.13\",\n \"PyYAML>=5.1\",\n]\n\n\nsetup(\n name=\"uvicorn\",\n version=get_version(\"uvicorn\"),\n url=\"https://github.com/encode/uvicorn\",\n license=\"BSD\",\n description=\"The lightning-fast ASGI server.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"uvicorn\"),\n install_requires=minimal_requirements,\n extras_require={\"standard\": extra_requirements},\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\",\n)\n", "path": "setup.py"}]} | 1,383 | 92 |
gh_patches_debug_31527 | rasdani/github-patches | git_diff | iterative__dvc-4739 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dvc dag --outs
In the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs.
I thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.
Can you maybe (re)implement this feature?
Thanks!
dvc dag --outs
In the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs.
I thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.
Can you maybe (re)implement this feature?
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/command/dag.py`
Content:
```
1 import argparse
2 import logging
3
4 from dvc.command.base import CmdBase, append_doc_link
5 from dvc.exceptions import DvcException
6
7 logger = logging.getLogger(__name__)
8
9
10 def _show_ascii(G):
11 from dvc.dagascii import draw
12 from dvc.repo.graph import get_pipelines
13
14 pipelines = get_pipelines(G)
15
16 ret = []
17 for pipeline in pipelines:
18 ret.append(draw(pipeline.nodes, pipeline.edges))
19
20 return "\n".join(ret)
21
22
23 def _show_dot(G):
24 import io
25
26 from networkx.drawing.nx_pydot import write_dot
27
28 dot_file = io.StringIO()
29 write_dot(G, dot_file)
30 return dot_file.getvalue()
31
32
33 def _build(G, target=None, full=False):
34 import networkx as nx
35
36 from dvc.repo.graph import get_pipeline, get_pipelines
37
38 if target:
39 H = get_pipeline(get_pipelines(G), target)
40 if not full:
41 descendants = nx.descendants(G, target)
42 descendants.add(target)
43 H.remove_nodes_from(set(G.nodes()) - descendants)
44 else:
45 H = G
46
47 def _relabel(stage):
48 return stage.addressing
49
50 return nx.relabel_nodes(H, _relabel, copy=False)
51
52
53 class CmdDAG(CmdBase):
54 def run(self):
55 try:
56 target = None
57 if self.args.target:
58 stages = self.repo.collect(self.args.target)
59 if len(stages) > 1:
60 logger.error(
61 f"'{self.args.target}' contains more than one stage "
62 "{stages}, please specify one stage"
63 )
64 return 1
65 target = stages[0]
66
67 G = _build(self.repo.graph, target=target, full=self.args.full,)
68
69 if self.args.dot:
70 logger.info(_show_dot(G))
71 else:
72 from dvc.utils.pager import pager
73
74 pager(_show_ascii(G))
75
76 return 0
77 except DvcException:
78 msg = "failed to show "
79 if self.args.target:
80 msg += f"a pipeline for '{target}'"
81 else:
82 msg += "pipelines"
83 logger.exception(msg)
84 return 1
85
86
87 def add_parser(subparsers, parent_parser):
88 DAG_HELP = "Visualize DVC project DAG."
89 dag_parser = subparsers.add_parser(
90 "dag",
91 parents=[parent_parser],
92 description=append_doc_link(DAG_HELP, "dag"),
93 help=DAG_HELP,
94 formatter_class=argparse.RawDescriptionHelpFormatter,
95 )
96 dag_parser.add_argument(
97 "--dot",
98 action="store_true",
99 default=False,
100 help="Print DAG with .dot format.",
101 )
102 dag_parser.add_argument(
103 "--full",
104 action="store_true",
105 default=False,
106 help=(
107 "Show full DAG that the target belongs too, instead of "
108 "showing DAG consisting only of ancestors."
109 ),
110 )
111 dag_parser.add_argument(
112 "target",
113 nargs="?",
114 help="Stage or output to show pipeline for (optional). "
115 "Finds all stages in the workspace by default.",
116 )
117 dag_parser.set_defaults(func=CmdDAG)
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/command/dag.py b/dvc/command/dag.py
--- a/dvc/command/dag.py
+++ b/dvc/command/dag.py
@@ -30,7 +30,7 @@
return dot_file.getvalue()
-def _build(G, target=None, full=False):
+def _build(G, target=None, full=False, outs=False):
import networkx as nx
from dvc.repo.graph import get_pipeline, get_pipelines
@@ -44,8 +44,25 @@
else:
H = G
- def _relabel(stage):
- return stage.addressing
+ if outs:
+ G = nx.DiGraph()
+ for stage in H.nodes:
+ G.add_nodes_from(stage.outs)
+
+ for from_stage, to_stage in nx.edge_dfs(H):
+ G.add_edges_from(
+ [
+ (from_out, to_out)
+ for from_out in from_stage.outs
+ for to_out in to_stage.outs
+ ]
+ )
+ H = G
+
+ def _relabel(node):
+ from dvc.stage import Stage
+
+ return node.addressing if isinstance(node, Stage) else str(node)
return nx.relabel_nodes(H, _relabel, copy=False)
@@ -64,7 +81,12 @@
return 1
target = stages[0]
- G = _build(self.repo.graph, target=target, full=self.args.full,)
+ G = _build(
+ self.repo.graph,
+ target=target,
+ full=self.args.full,
+ outs=self.args.outs,
+ )
if self.args.dot:
logger.info(_show_dot(G))
@@ -108,6 +130,13 @@
"showing DAG consisting only of ancestors."
),
)
+ dag_parser.add_argument(
+ "-o",
+ "--outs",
+ action="store_true",
+ default=False,
+ help="Print output files instead of stages.",
+ )
dag_parser.add_argument(
"target",
nargs="?",
| {"golden_diff": "diff --git a/dvc/command/dag.py b/dvc/command/dag.py\n--- a/dvc/command/dag.py\n+++ b/dvc/command/dag.py\n@@ -30,7 +30,7 @@\n return dot_file.getvalue()\n \n \n-def _build(G, target=None, full=False):\n+def _build(G, target=None, full=False, outs=False):\n import networkx as nx\n \n from dvc.repo.graph import get_pipeline, get_pipelines\n@@ -44,8 +44,25 @@\n else:\n H = G\n \n- def _relabel(stage):\n- return stage.addressing\n+ if outs:\n+ G = nx.DiGraph()\n+ for stage in H.nodes:\n+ G.add_nodes_from(stage.outs)\n+\n+ for from_stage, to_stage in nx.edge_dfs(H):\n+ G.add_edges_from(\n+ [\n+ (from_out, to_out)\n+ for from_out in from_stage.outs\n+ for to_out in to_stage.outs\n+ ]\n+ )\n+ H = G\n+\n+ def _relabel(node):\n+ from dvc.stage import Stage\n+\n+ return node.addressing if isinstance(node, Stage) else str(node)\n \n return nx.relabel_nodes(H, _relabel, copy=False)\n \n@@ -64,7 +81,12 @@\n return 1\n target = stages[0]\n \n- G = _build(self.repo.graph, target=target, full=self.args.full,)\n+ G = _build(\n+ self.repo.graph,\n+ target=target,\n+ full=self.args.full,\n+ outs=self.args.outs,\n+ )\n \n if self.args.dot:\n logger.info(_show_dot(G))\n@@ -108,6 +130,13 @@\n \"showing DAG consisting only of ancestors.\"\n ),\n )\n+ dag_parser.add_argument(\n+ \"-o\",\n+ \"--outs\",\n+ action=\"store_true\",\n+ default=False,\n+ help=\"Print output files instead of stages.\",\n+ )\n dag_parser.add_argument(\n \"target\",\n nargs=\"?\",\n", "issue": "dvc dag --outs\nIn the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. \r\nI thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.\r\nCan you maybe (re)implement this feature?\r\nThanks!\ndvc dag --outs\nIn the `0.xx` version days, there was a `dvc pipeline show --outs` command that was able to show a DAG that included outputs. \r\nI thought this was a really nice feature. For instance, I have a `train_test_split` stage that creates two outputs, `train` and `test`, and it would be nice to see them taking different paths in the DAG.\r\nCan you maybe (re)implement this feature?\r\nThanks!\n", "before_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.command.base import CmdBase, append_doc_link\nfrom dvc.exceptions import DvcException\n\nlogger = logging.getLogger(__name__)\n\n\ndef _show_ascii(G):\n from dvc.dagascii import draw\n from dvc.repo.graph import get_pipelines\n\n pipelines = get_pipelines(G)\n\n ret = []\n for pipeline in pipelines:\n ret.append(draw(pipeline.nodes, pipeline.edges))\n\n return \"\\n\".join(ret)\n\n\ndef _show_dot(G):\n import io\n\n from networkx.drawing.nx_pydot import write_dot\n\n dot_file = io.StringIO()\n write_dot(G, dot_file)\n return dot_file.getvalue()\n\n\ndef _build(G, target=None, full=False):\n import networkx as nx\n\n from dvc.repo.graph import get_pipeline, get_pipelines\n\n if target:\n H = get_pipeline(get_pipelines(G), target)\n if not full:\n descendants = nx.descendants(G, target)\n descendants.add(target)\n H.remove_nodes_from(set(G.nodes()) - descendants)\n else:\n H = G\n\n def _relabel(stage):\n return stage.addressing\n\n return nx.relabel_nodes(H, _relabel, copy=False)\n\n\nclass CmdDAG(CmdBase):\n def run(self):\n try:\n target = None\n if self.args.target:\n stages = self.repo.collect(self.args.target)\n if len(stages) > 1:\n logger.error(\n f\"'{self.args.target}' contains more than one stage \"\n \"{stages}, please specify one stage\"\n )\n return 1\n target = stages[0]\n\n G = _build(self.repo.graph, target=target, full=self.args.full,)\n\n if self.args.dot:\n logger.info(_show_dot(G))\n else:\n from dvc.utils.pager import pager\n\n pager(_show_ascii(G))\n\n return 0\n except DvcException:\n msg = \"failed to show \"\n if self.args.target:\n msg += f\"a pipeline for '{target}'\"\n else:\n msg += \"pipelines\"\n logger.exception(msg)\n return 1\n\n\ndef add_parser(subparsers, parent_parser):\n DAG_HELP = \"Visualize DVC project DAG.\"\n dag_parser = subparsers.add_parser(\n \"dag\",\n parents=[parent_parser],\n description=append_doc_link(DAG_HELP, \"dag\"),\n help=DAG_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n dag_parser.add_argument(\n \"--dot\",\n action=\"store_true\",\n default=False,\n help=\"Print DAG with .dot format.\",\n )\n dag_parser.add_argument(\n \"--full\",\n action=\"store_true\",\n default=False,\n help=(\n \"Show full DAG that the target belongs too, instead of \"\n \"showing DAG consisting only of ancestors.\"\n ),\n )\n dag_parser.add_argument(\n \"target\",\n nargs=\"?\",\n help=\"Stage or output to show pipeline for (optional). \"\n \"Finds all stages in the workspace by default.\",\n )\n dag_parser.set_defaults(func=CmdDAG)\n", "path": "dvc/command/dag.py"}], "after_files": [{"content": "import argparse\nimport logging\n\nfrom dvc.command.base import CmdBase, append_doc_link\nfrom dvc.exceptions import DvcException\n\nlogger = logging.getLogger(__name__)\n\n\ndef _show_ascii(G):\n from dvc.dagascii import draw\n from dvc.repo.graph import get_pipelines\n\n pipelines = get_pipelines(G)\n\n ret = []\n for pipeline in pipelines:\n ret.append(draw(pipeline.nodes, pipeline.edges))\n\n return \"\\n\".join(ret)\n\n\ndef _show_dot(G):\n import io\n\n from networkx.drawing.nx_pydot import write_dot\n\n dot_file = io.StringIO()\n write_dot(G, dot_file)\n return dot_file.getvalue()\n\n\ndef _build(G, target=None, full=False, outs=False):\n import networkx as nx\n\n from dvc.repo.graph import get_pipeline, get_pipelines\n\n if target:\n H = get_pipeline(get_pipelines(G), target)\n if not full:\n descendants = nx.descendants(G, target)\n descendants.add(target)\n H.remove_nodes_from(set(G.nodes()) - descendants)\n else:\n H = G\n\n if outs:\n G = nx.DiGraph()\n for stage in H.nodes:\n G.add_nodes_from(stage.outs)\n\n for from_stage, to_stage in nx.edge_dfs(H):\n G.add_edges_from(\n [\n (from_out, to_out)\n for from_out in from_stage.outs\n for to_out in to_stage.outs\n ]\n )\n H = G\n\n def _relabel(node):\n from dvc.stage import Stage\n\n return node.addressing if isinstance(node, Stage) else str(node)\n\n return nx.relabel_nodes(H, _relabel, copy=False)\n\n\nclass CmdDAG(CmdBase):\n def run(self):\n try:\n target = None\n if self.args.target:\n stages = self.repo.collect(self.args.target)\n if len(stages) > 1:\n logger.error(\n f\"'{self.args.target}' contains more than one stage \"\n \"{stages}, please specify one stage\"\n )\n return 1\n target = stages[0]\n\n G = _build(\n self.repo.graph,\n target=target,\n full=self.args.full,\n outs=self.args.outs,\n )\n\n if self.args.dot:\n logger.info(_show_dot(G))\n else:\n from dvc.utils.pager import pager\n\n pager(_show_ascii(G))\n\n return 0\n except DvcException:\n msg = \"failed to show \"\n if self.args.target:\n msg += f\"a pipeline for '{target}'\"\n else:\n msg += \"pipelines\"\n logger.exception(msg)\n return 1\n\n\ndef add_parser(subparsers, parent_parser):\n DAG_HELP = \"Visualize DVC project DAG.\"\n dag_parser = subparsers.add_parser(\n \"dag\",\n parents=[parent_parser],\n description=append_doc_link(DAG_HELP, \"dag\"),\n help=DAG_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n dag_parser.add_argument(\n \"--dot\",\n action=\"store_true\",\n default=False,\n help=\"Print DAG with .dot format.\",\n )\n dag_parser.add_argument(\n \"--full\",\n action=\"store_true\",\n default=False,\n help=(\n \"Show full DAG that the target belongs too, instead of \"\n \"showing DAG consisting only of ancestors.\"\n ),\n )\n dag_parser.add_argument(\n \"-o\",\n \"--outs\",\n action=\"store_true\",\n default=False,\n help=\"Print output files instead of stages.\",\n )\n dag_parser.add_argument(\n \"target\",\n nargs=\"?\",\n help=\"Stage or output to show pipeline for (optional). \"\n \"Finds all stages in the workspace by default.\",\n )\n dag_parser.set_defaults(func=CmdDAG)\n", "path": "dvc/command/dag.py"}]} | 1,394 | 471 |
gh_patches_debug_32659 | rasdani/github-patches | git_diff | pypi__warehouse-12343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.1 classifier filtering is broken
When [filtering by the `Programming Language :: Python :: 3.1` classifier on pypi.org][1], the results include packages which don't have that classifier - any package with a classifier matching `Programming Language :: Python :: 3.1*` is included. That is, packages for 3.10, 3.11, 3.12, and so on are included in the results.
[1]: https://pypi.org/search/?q=&o=&c=Programming+Language+%3A%3A+Python+%3A%3A+3.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/search/queries.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import re
14
15 from elasticsearch_dsl import Q
16
17 SEARCH_FIELDS = [
18 "author",
19 "author_email",
20 "description",
21 "download_url",
22 "home_page",
23 "keywords",
24 "license",
25 "maintainer",
26 "maintainer_email",
27 "normalized_name",
28 "platform",
29 "summary",
30 ]
31 SEARCH_BOOSTS = {
32 "name": 10,
33 "normalized_name": 10,
34 "description": 5,
35 "keywords": 5,
36 "summary": 5,
37 }
38 SEARCH_FILTER_ORDER = (
39 "Framework",
40 "Topic",
41 "Development Status",
42 "License",
43 "Programming Language",
44 "Operating System",
45 "Environment",
46 "Intended Audience",
47 "Natural Language",
48 )
49
50
51 def get_es_query(es, terms, order, classifiers):
52 """
53 Returns an Elasticsearch query from data from the request.
54 """
55 if not terms:
56 query = es.query()
57 else:
58 bool_query = gather_es_queries(terms)
59 query = es.query(bool_query)
60 query = query.suggest("name_suggestion", terms, term={"field": "name"})
61
62 # Require match to all specified classifiers
63 for classifier in classifiers:
64 query = query.query("prefix", classifiers=classifier)
65
66 query = query_for_order(query, order)
67 return query
68
69
70 def gather_es_queries(q):
71 quoted_string, unquoted_string = filter_query(q)
72 must = [form_query("phrase", i) for i in quoted_string] + [
73 form_query("best_fields", i) for i in unquoted_string
74 ]
75
76 bool_query = Q("bool", must=must)
77
78 # Allow to optionally match on prefix
79 # if ``q`` is longer than one character.
80 if len(q) > 1:
81 bool_query = bool_query | Q("prefix", normalized_name=q)
82 return bool_query
83
84
85 def filter_query(s):
86 """
87 Filters given query with the below regex
88 and returns lists of quoted and unquoted strings
89 """
90 matches = re.findall(r'(?:"([^"]*)")|([^"]*)', s)
91 result_quoted = [t[0].strip() for t in matches if t[0]]
92 result_unquoted = [t[1].strip() for t in matches if t[1]]
93 return result_quoted, result_unquoted
94
95
96 def form_query(query_type, query):
97 """
98 Returns a multi match query
99 """
100 fields = [
101 field + "^" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field
102 for field in SEARCH_FIELDS
103 ]
104 return Q("multi_match", fields=fields, query=query, type=query_type)
105
106
107 def query_for_order(query, order):
108 """
109 Applies transformations on the ES query based on the search order.
110
111 Order is assumed to be a string with the name of a field with an optional
112 hyphen to indicate descending sort order.
113 """
114 if order == "": # relevance should not sort
115 return query
116
117 field = order[order.find("-") + 1 :]
118 sort_info = {
119 field: {
120 "order": "desc" if order.startswith("-") else "asc",
121 "unmapped_type": "long",
122 }
123 }
124 query = query.sort(sort_info)
125 return query
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/search/queries.py b/warehouse/search/queries.py
--- a/warehouse/search/queries.py
+++ b/warehouse/search/queries.py
@@ -52,36 +52,45 @@
"""
Returns an Elasticsearch query from data from the request.
"""
+ classifier_q = Q(
+ "bool",
+ # Theh results must have all selected classifiers
+ must=[
+ Q(
+ "bool",
+ should=[
+ # Term search for the exact classifier
+ Q("term", classifiers=classifier),
+ # Prefix search for potential children classifiers
+ Q("prefix", classifiers=classifier + " :: "),
+ ],
+ )
+ for classifier in classifiers
+ ],
+ )
if not terms:
- query = es.query()
+ query = es.query(classifier_q) if classifiers else es.query()
else:
- bool_query = gather_es_queries(terms)
+ quoted_string, unquoted_string = filter_query(terms)
+ bool_query = Q(
+ "bool",
+ must=[form_query("phrase", i) for i in quoted_string]
+ + [form_query("best_fields", i) for i in unquoted_string]
+ + ([classifier_q] if classifiers else []),
+ )
+
+ # Allow to optionally match on prefix
+ # if ``q`` is longer than one character.
+ if len(terms) > 1:
+ bool_query = bool_query | Q("prefix", normalized_name=terms)
+
query = es.query(bool_query)
query = query.suggest("name_suggestion", terms, term={"field": "name"})
- # Require match to all specified classifiers
- for classifier in classifiers:
- query = query.query("prefix", classifiers=classifier)
-
query = query_for_order(query, order)
return query
-def gather_es_queries(q):
- quoted_string, unquoted_string = filter_query(q)
- must = [form_query("phrase", i) for i in quoted_string] + [
- form_query("best_fields", i) for i in unquoted_string
- ]
-
- bool_query = Q("bool", must=must)
-
- # Allow to optionally match on prefix
- # if ``q`` is longer than one character.
- if len(q) > 1:
- bool_query = bool_query | Q("prefix", normalized_name=q)
- return bool_query
-
-
def filter_query(s):
"""
Filters given query with the below regex
| {"golden_diff": "diff --git a/warehouse/search/queries.py b/warehouse/search/queries.py\n--- a/warehouse/search/queries.py\n+++ b/warehouse/search/queries.py\n@@ -52,36 +52,45 @@\n \"\"\"\n Returns an Elasticsearch query from data from the request.\n \"\"\"\n+ classifier_q = Q(\n+ \"bool\",\n+ # Theh results must have all selected classifiers\n+ must=[\n+ Q(\n+ \"bool\",\n+ should=[\n+ # Term search for the exact classifier\n+ Q(\"term\", classifiers=classifier),\n+ # Prefix search for potential children classifiers\n+ Q(\"prefix\", classifiers=classifier + \" :: \"),\n+ ],\n+ )\n+ for classifier in classifiers\n+ ],\n+ )\n if not terms:\n- query = es.query()\n+ query = es.query(classifier_q) if classifiers else es.query()\n else:\n- bool_query = gather_es_queries(terms)\n+ quoted_string, unquoted_string = filter_query(terms)\n+ bool_query = Q(\n+ \"bool\",\n+ must=[form_query(\"phrase\", i) for i in quoted_string]\n+ + [form_query(\"best_fields\", i) for i in unquoted_string]\n+ + ([classifier_q] if classifiers else []),\n+ )\n+\n+ # Allow to optionally match on prefix\n+ # if ``q`` is longer than one character.\n+ if len(terms) > 1:\n+ bool_query = bool_query | Q(\"prefix\", normalized_name=terms)\n+\n query = es.query(bool_query)\n query = query.suggest(\"name_suggestion\", terms, term={\"field\": \"name\"})\n \n- # Require match to all specified classifiers\n- for classifier in classifiers:\n- query = query.query(\"prefix\", classifiers=classifier)\n-\n query = query_for_order(query, order)\n return query\n \n \n-def gather_es_queries(q):\n- quoted_string, unquoted_string = filter_query(q)\n- must = [form_query(\"phrase\", i) for i in quoted_string] + [\n- form_query(\"best_fields\", i) for i in unquoted_string\n- ]\n-\n- bool_query = Q(\"bool\", must=must)\n-\n- # Allow to optionally match on prefix\n- # if ``q`` is longer than one character.\n- if len(q) > 1:\n- bool_query = bool_query | Q(\"prefix\", normalized_name=q)\n- return bool_query\n-\n-\n def filter_query(s):\n \"\"\"\n Filters given query with the below regex\n", "issue": "Python 3.1 classifier filtering is broken\nWhen [filtering by the `Programming Language :: Python :: 3.1` classifier on pypi.org][1], the results include packages which don't have that classifier - any package with a classifier matching `Programming Language :: Python :: 3.1*` is included. That is, packages for 3.10, 3.11, 3.12, and so on are included in the results.\r\n\r\n[1]: https://pypi.org/search/?q=&o=&c=Programming+Language+%3A%3A+Python+%3A%3A+3.1\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nfrom elasticsearch_dsl import Q\n\nSEARCH_FIELDS = [\n \"author\",\n \"author_email\",\n \"description\",\n \"download_url\",\n \"home_page\",\n \"keywords\",\n \"license\",\n \"maintainer\",\n \"maintainer_email\",\n \"normalized_name\",\n \"platform\",\n \"summary\",\n]\nSEARCH_BOOSTS = {\n \"name\": 10,\n \"normalized_name\": 10,\n \"description\": 5,\n \"keywords\": 5,\n \"summary\": 5,\n}\nSEARCH_FILTER_ORDER = (\n \"Framework\",\n \"Topic\",\n \"Development Status\",\n \"License\",\n \"Programming Language\",\n \"Operating System\",\n \"Environment\",\n \"Intended Audience\",\n \"Natural Language\",\n)\n\n\ndef get_es_query(es, terms, order, classifiers):\n \"\"\"\n Returns an Elasticsearch query from data from the request.\n \"\"\"\n if not terms:\n query = es.query()\n else:\n bool_query = gather_es_queries(terms)\n query = es.query(bool_query)\n query = query.suggest(\"name_suggestion\", terms, term={\"field\": \"name\"})\n\n # Require match to all specified classifiers\n for classifier in classifiers:\n query = query.query(\"prefix\", classifiers=classifier)\n\n query = query_for_order(query, order)\n return query\n\n\ndef gather_es_queries(q):\n quoted_string, unquoted_string = filter_query(q)\n must = [form_query(\"phrase\", i) for i in quoted_string] + [\n form_query(\"best_fields\", i) for i in unquoted_string\n ]\n\n bool_query = Q(\"bool\", must=must)\n\n # Allow to optionally match on prefix\n # if ``q`` is longer than one character.\n if len(q) > 1:\n bool_query = bool_query | Q(\"prefix\", normalized_name=q)\n return bool_query\n\n\ndef filter_query(s):\n \"\"\"\n Filters given query with the below regex\n and returns lists of quoted and unquoted strings\n \"\"\"\n matches = re.findall(r'(?:\"([^\"]*)\")|([^\"]*)', s)\n result_quoted = [t[0].strip() for t in matches if t[0]]\n result_unquoted = [t[1].strip() for t in matches if t[1]]\n return result_quoted, result_unquoted\n\n\ndef form_query(query_type, query):\n \"\"\"\n Returns a multi match query\n \"\"\"\n fields = [\n field + \"^\" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field\n for field in SEARCH_FIELDS\n ]\n return Q(\"multi_match\", fields=fields, query=query, type=query_type)\n\n\ndef query_for_order(query, order):\n \"\"\"\n Applies transformations on the ES query based on the search order.\n\n Order is assumed to be a string with the name of a field with an optional\n hyphen to indicate descending sort order.\n \"\"\"\n if order == \"\": # relevance should not sort\n return query\n\n field = order[order.find(\"-\") + 1 :]\n sort_info = {\n field: {\n \"order\": \"desc\" if order.startswith(\"-\") else \"asc\",\n \"unmapped_type\": \"long\",\n }\n }\n query = query.sort(sort_info)\n return query\n", "path": "warehouse/search/queries.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\n\nfrom elasticsearch_dsl import Q\n\nSEARCH_FIELDS = [\n \"author\",\n \"author_email\",\n \"description\",\n \"download_url\",\n \"home_page\",\n \"keywords\",\n \"license\",\n \"maintainer\",\n \"maintainer_email\",\n \"normalized_name\",\n \"platform\",\n \"summary\",\n]\nSEARCH_BOOSTS = {\n \"name\": 10,\n \"normalized_name\": 10,\n \"description\": 5,\n \"keywords\": 5,\n \"summary\": 5,\n}\nSEARCH_FILTER_ORDER = (\n \"Framework\",\n \"Topic\",\n \"Development Status\",\n \"License\",\n \"Programming Language\",\n \"Operating System\",\n \"Environment\",\n \"Intended Audience\",\n \"Natural Language\",\n)\n\n\ndef get_es_query(es, terms, order, classifiers):\n \"\"\"\n Returns an Elasticsearch query from data from the request.\n \"\"\"\n classifier_q = Q(\n \"bool\",\n # Theh results must have all selected classifiers\n must=[\n Q(\n \"bool\",\n should=[\n # Term search for the exact classifier\n Q(\"term\", classifiers=classifier),\n # Prefix search for potential children classifiers\n Q(\"prefix\", classifiers=classifier + \" :: \"),\n ],\n )\n for classifier in classifiers\n ],\n )\n if not terms:\n query = es.query(classifier_q) if classifiers else es.query()\n else:\n quoted_string, unquoted_string = filter_query(terms)\n bool_query = Q(\n \"bool\",\n must=[form_query(\"phrase\", i) for i in quoted_string]\n + [form_query(\"best_fields\", i) for i in unquoted_string]\n + ([classifier_q] if classifiers else []),\n )\n\n # Allow to optionally match on prefix\n # if ``q`` is longer than one character.\n if len(terms) > 1:\n bool_query = bool_query | Q(\"prefix\", normalized_name=terms)\n\n query = es.query(bool_query)\n query = query.suggest(\"name_suggestion\", terms, term={\"field\": \"name\"})\n\n query = query_for_order(query, order)\n return query\n\n\ndef filter_query(s):\n \"\"\"\n Filters given query with the below regex\n and returns lists of quoted and unquoted strings\n \"\"\"\n matches = re.findall(r'(?:\"([^\"]*)\")|([^\"]*)', s)\n result_quoted = [t[0].strip() for t in matches if t[0]]\n result_unquoted = [t[1].strip() for t in matches if t[1]]\n return result_quoted, result_unquoted\n\n\ndef form_query(query_type, query):\n \"\"\"\n Returns a multi match query\n \"\"\"\n fields = [\n field + \"^\" + str(SEARCH_BOOSTS[field]) if field in SEARCH_BOOSTS else field\n for field in SEARCH_FIELDS\n ]\n return Q(\"multi_match\", fields=fields, query=query, type=query_type)\n\n\ndef query_for_order(query, order):\n \"\"\"\n Applies transformations on the ES query based on the search order.\n\n Order is assumed to be a string with the name of a field with an optional\n hyphen to indicate descending sort order.\n \"\"\"\n if order == \"\": # relevance should not sort\n return query\n\n field = order[order.find(\"-\") + 1 :]\n sort_info = {\n field: {\n \"order\": \"desc\" if order.startswith(\"-\") else \"asc\",\n \"unmapped_type\": \"long\",\n }\n }\n query = query.sort(sort_info)\n return query\n", "path": "warehouse/search/queries.py"}]} | 1,512 | 561 |
gh_patches_debug_29422 | rasdani/github-patches | git_diff | freedomofpress__securedrop-7035 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
determine post-upgrade failure-mode for a SHA-1-signed submission key
## Description
After #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature.
After #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948.
What will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0?
## Possible approaches
| Option | Documentation changes | Code changes | Implication |
| --- | --- | --- | --- |
| Fail open, but log | optional | ✓ | Admin must monitor logs and/or OSSEC alerts. |
| Fail open, but document | ✓ | ✗ | Admin must monitor release notes or check documentation. |
| Fail closed | optional | ✓[1] | Admin can contact us for help. |
**Notes:**
1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/journalist.py`
Content:
```
1 from encryption import EncryptionManager, GpgKeyNotFoundError
2 from execution import asynchronous
3 from journalist_app import create_app
4 from models import Source
5 from sdconfig import SecureDropConfig
6
7 config = SecureDropConfig.get_current()
8 # app is imported by journalist.wsgi
9 app = create_app(config)
10
11
12 @asynchronous
13 def prime_keycache() -> None:
14 """Pre-load the source public keys into Redis."""
15 with app.app_context():
16 encryption_mgr = EncryptionManager.get_default()
17 for source in Source.query.filter_by(pending=False, deleted_at=None).all():
18 try:
19 encryption_mgr.get_source_public_key(source.filesystem_id)
20 except GpgKeyNotFoundError:
21 pass
22
23
24 prime_keycache()
25
26
27 if __name__ == "__main__": # pragma: no cover
28 debug = getattr(config, "env", "prod") != "prod"
29 # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host
30 app.run(debug=debug, host="0.0.0.0", port=8081)
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/journalist.py b/securedrop/journalist.py
--- a/securedrop/journalist.py
+++ b/securedrop/journalist.py
@@ -1,9 +1,13 @@
+import sys
+
from encryption import EncryptionManager, GpgKeyNotFoundError
from execution import asynchronous
from journalist_app import create_app
from models import Source
from sdconfig import SecureDropConfig
+import redwood
+
config = SecureDropConfig.get_current()
# app is imported by journalist.wsgi
app = create_app(config)
@@ -21,10 +25,28 @@
pass
-prime_keycache()
+def validate_journalist_key() -> None:
+ """Verify the journalist PGP key is valid"""
+ encryption_mgr = EncryptionManager.get_default()
+ # First check that we can read it
+ try:
+ journalist_key = encryption_mgr.get_journalist_public_key()
+ except Exception as e:
+ print(f"ERROR: Unable to read journalist public key: {e}", file=sys.stderr)
+ app.logger.error(f"ERROR: Unable to read journalist public key: {e}")
+ sys.exit(1)
+ # And then what we read is valid
+ try:
+ redwood.is_valid_public_key(journalist_key)
+ except redwood.RedwoodError as e:
+ print(f"ERROR: Journalist public key is not valid: {e}", file=sys.stderr)
+ app.logger.error(f"ERROR: Journalist public key is not valid: {e}")
+ sys.exit(1)
if __name__ == "__main__": # pragma: no cover
+ validate_journalist_key()
+ prime_keycache()
debug = getattr(config, "env", "prod") != "prod"
# nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host
app.run(debug=debug, host="0.0.0.0", port=8081)
| {"golden_diff": "diff --git a/securedrop/journalist.py b/securedrop/journalist.py\n--- a/securedrop/journalist.py\n+++ b/securedrop/journalist.py\n@@ -1,9 +1,13 @@\n+import sys\n+\n from encryption import EncryptionManager, GpgKeyNotFoundError\n from execution import asynchronous\n from journalist_app import create_app\n from models import Source\n from sdconfig import SecureDropConfig\n \n+import redwood\n+\n config = SecureDropConfig.get_current()\n # app is imported by journalist.wsgi\n app = create_app(config)\n@@ -21,10 +25,28 @@\n pass\n \n \n-prime_keycache()\n+def validate_journalist_key() -> None:\n+ \"\"\"Verify the journalist PGP key is valid\"\"\"\n+ encryption_mgr = EncryptionManager.get_default()\n+ # First check that we can read it\n+ try:\n+ journalist_key = encryption_mgr.get_journalist_public_key()\n+ except Exception as e:\n+ print(f\"ERROR: Unable to read journalist public key: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Unable to read journalist public key: {e}\")\n+ sys.exit(1)\n+ # And then what we read is valid\n+ try:\n+ redwood.is_valid_public_key(journalist_key)\n+ except redwood.RedwoodError as e:\n+ print(f\"ERROR: Journalist public key is not valid: {e}\", file=sys.stderr)\n+ app.logger.error(f\"ERROR: Journalist public key is not valid: {e}\")\n+ sys.exit(1)\n \n \n if __name__ == \"__main__\": # pragma: no cover\n+ validate_journalist_key()\n+ prime_keycache()\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "issue": "determine post-upgrade failure-mode for a SHA-1-signed submission key\n## Description\r\n\r\nAfter #6948 (for #6399), redwood will refuse to encrypt to a submission key with a SHA-1 signature.\r\n\r\nAfter #6928, `securedrop-admin sdconfig` will reject a submission key with a SHA-1 signature. This check guarantees that new and reconfigured instances will comply with #6948.\r\n\r\nWhat will happen to an instance with a SHA-1-signed signature after upgrading to v2.7.0?\r\n\r\n## Possible approaches\r\n\r\n| Option | Documentation changes | Code changes | Implication |\r\n| --- | --- | --- | --- |\r\n| Fail open, but log | optional | \u2713 | Admin must monitor logs and/or OSSEC alerts. |\r\n| Fail open, but document | \u2713 | \u2717 | Admin must monitor release notes or check documentation. |\r\n| Fail closed | optional | \u2713[1] | Admin can contact us for help. |\r\n\r\n**Notes:**\r\n1. @legoktm observes that, without a code change to handle this case, Apache will come back up after reboot even if the `postinst` script fails under `unattended-upgrades`.\n", "before_files": [{"content": "from encryption import EncryptionManager, GpgKeyNotFoundError\nfrom execution import asynchronous\nfrom journalist_app import create_app\nfrom models import Source\nfrom sdconfig import SecureDropConfig\n\nconfig = SecureDropConfig.get_current()\n# app is imported by journalist.wsgi\napp = create_app(config)\n\n\n@asynchronous\ndef prime_keycache() -> None:\n \"\"\"Pre-load the source public keys into Redis.\"\"\"\n with app.app_context():\n encryption_mgr = EncryptionManager.get_default()\n for source in Source.query.filter_by(pending=False, deleted_at=None).all():\n try:\n encryption_mgr.get_source_public_key(source.filesystem_id)\n except GpgKeyNotFoundError:\n pass\n\n\nprime_keycache()\n\n\nif __name__ == \"__main__\": # pragma: no cover\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "path": "securedrop/journalist.py"}], "after_files": [{"content": "import sys\n\nfrom encryption import EncryptionManager, GpgKeyNotFoundError\nfrom execution import asynchronous\nfrom journalist_app import create_app\nfrom models import Source\nfrom sdconfig import SecureDropConfig\n\nimport redwood\n\nconfig = SecureDropConfig.get_current()\n# app is imported by journalist.wsgi\napp = create_app(config)\n\n\n@asynchronous\ndef prime_keycache() -> None:\n \"\"\"Pre-load the source public keys into Redis.\"\"\"\n with app.app_context():\n encryption_mgr = EncryptionManager.get_default()\n for source in Source.query.filter_by(pending=False, deleted_at=None).all():\n try:\n encryption_mgr.get_source_public_key(source.filesystem_id)\n except GpgKeyNotFoundError:\n pass\n\n\ndef validate_journalist_key() -> None:\n \"\"\"Verify the journalist PGP key is valid\"\"\"\n encryption_mgr = EncryptionManager.get_default()\n # First check that we can read it\n try:\n journalist_key = encryption_mgr.get_journalist_public_key()\n except Exception as e:\n print(f\"ERROR: Unable to read journalist public key: {e}\", file=sys.stderr)\n app.logger.error(f\"ERROR: Unable to read journalist public key: {e}\")\n sys.exit(1)\n # And then what we read is valid\n try:\n redwood.is_valid_public_key(journalist_key)\n except redwood.RedwoodError as e:\n print(f\"ERROR: Journalist public key is not valid: {e}\", file=sys.stderr)\n app.logger.error(f\"ERROR: Journalist public key is not valid: {e}\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n validate_journalist_key()\n prime_keycache()\n debug = getattr(config, \"env\", \"prod\") != \"prod\"\n # nosemgrep: python.flask.security.audit.app-run-param-config.avoid_app_run_with_bad_host\n app.run(debug=debug, host=\"0.0.0.0\", port=8081)\n", "path": "securedrop/journalist.py"}]} | 797 | 440 |
gh_patches_debug_12398 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1590 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
improve `healthy()` check for node
See pre-commit/actions#45
for `language_version: system` hooks this looks like:
```
eslint...................................................................Failed
- hook id: eslint
- exit code: 127
/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory
/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory
##[error]The process '/opt/hostedtoolcache/Python/3.8.5/x64/bin/pre-commit' failed with exit code 1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/node.py`
Content:
```
1 import contextlib
2 import functools
3 import os
4 import sys
5 from typing import Generator
6 from typing import Sequence
7 from typing import Tuple
8
9 import pre_commit.constants as C
10 from pre_commit import parse_shebang
11 from pre_commit.envcontext import envcontext
12 from pre_commit.envcontext import PatchesT
13 from pre_commit.envcontext import UNSET
14 from pre_commit.envcontext import Var
15 from pre_commit.hook import Hook
16 from pre_commit.languages import helpers
17 from pre_commit.languages.python import bin_dir
18 from pre_commit.prefix import Prefix
19 from pre_commit.util import clean_path_on_failure
20 from pre_commit.util import cmd_output
21 from pre_commit.util import cmd_output_b
22
23 ENVIRONMENT_DIR = 'node_env'
24 healthy = helpers.basic_healthy
25
26
27 @functools.lru_cache(maxsize=1)
28 def get_default_version() -> str:
29 # nodeenv does not yet support `-n system` on windows
30 if sys.platform == 'win32':
31 return C.DEFAULT
32 # if node is already installed, we can save a bunch of setup time by
33 # using the installed version
34 elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')):
35 return 'system'
36 else:
37 return C.DEFAULT
38
39
40 def _envdir(prefix: Prefix, version: str) -> str:
41 directory = helpers.environment_dir(ENVIRONMENT_DIR, version)
42 return prefix.path(directory)
43
44
45 def get_env_patch(venv: str) -> PatchesT:
46 if sys.platform == 'cygwin': # pragma: no cover
47 _, win_venv, _ = cmd_output('cygpath', '-w', venv)
48 install_prefix = fr'{win_venv.strip()}\bin'
49 lib_dir = 'lib'
50 elif sys.platform == 'win32': # pragma: no cover
51 install_prefix = bin_dir(venv)
52 lib_dir = 'Scripts'
53 else: # pragma: win32 no cover
54 install_prefix = venv
55 lib_dir = 'lib'
56 return (
57 ('NODE_VIRTUAL_ENV', venv),
58 ('NPM_CONFIG_PREFIX', install_prefix),
59 ('npm_config_prefix', install_prefix),
60 ('NPM_CONFIG_USERCONFIG', UNSET),
61 ('npm_config_userconfig', UNSET),
62 ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),
63 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),
64 )
65
66
67 @contextlib.contextmanager
68 def in_env(
69 prefix: Prefix,
70 language_version: str,
71 ) -> Generator[None, None, None]:
72 with envcontext(get_env_patch(_envdir(prefix, language_version))):
73 yield
74
75
76 def install_environment(
77 prefix: Prefix, version: str, additional_dependencies: Sequence[str],
78 ) -> None:
79 additional_dependencies = tuple(additional_dependencies)
80 assert prefix.exists('package.json')
81 envdir = _envdir(prefix, version)
82
83 # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath
84 if sys.platform == 'win32': # pragma: no cover
85 envdir = fr'\\?\{os.path.normpath(envdir)}'
86 with clean_path_on_failure(envdir):
87 cmd = [
88 sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,
89 ]
90 if version != C.DEFAULT:
91 cmd.extend(['-n', version])
92 cmd_output_b(*cmd)
93
94 with in_env(prefix, version):
95 # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449
96 # install as if we installed from git
97 helpers.run_setup_cmd(prefix, ('npm', 'install'))
98 helpers.run_setup_cmd(
99 prefix,
100 ('npm', 'install', '-g', '.', *additional_dependencies),
101 )
102
103
104 def run_hook(
105 hook: Hook,
106 file_args: Sequence[str],
107 color: bool,
108 ) -> Tuple[int, bytes]:
109 with in_env(hook.prefix, hook.language_version):
110 return helpers.run_xargs(hook, hook.cmd, file_args, color=color)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py
--- a/pre_commit/languages/node.py
+++ b/pre_commit/languages/node.py
@@ -21,7 +21,6 @@
from pre_commit.util import cmd_output_b
ENVIRONMENT_DIR = 'node_env'
-healthy = helpers.basic_healthy
@functools.lru_cache(maxsize=1)
@@ -73,6 +72,12 @@
yield
+def healthy(prefix: Prefix, language_version: str) -> bool:
+ with in_env(prefix, language_version):
+ retcode, _, _ = cmd_output_b('node', '--version', retcode=None)
+ return retcode == 0
+
+
def install_environment(
prefix: Prefix, version: str, additional_dependencies: Sequence[str],
) -> None:
| {"golden_diff": "diff --git a/pre_commit/languages/node.py b/pre_commit/languages/node.py\n--- a/pre_commit/languages/node.py\n+++ b/pre_commit/languages/node.py\n@@ -21,7 +21,6 @@\n from pre_commit.util import cmd_output_b\n \n ENVIRONMENT_DIR = 'node_env'\n-healthy = helpers.basic_healthy\n \n \n @functools.lru_cache(maxsize=1)\n@@ -73,6 +72,12 @@\n yield\n \n \n+def healthy(prefix: Prefix, language_version: str) -> bool:\n+ with in_env(prefix, language_version):\n+ retcode, _, _ = cmd_output_b('node', '--version', retcode=None)\n+ return retcode == 0\n+\n+\n def install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n ) -> None:\n", "issue": "improve `healthy()` check for node\nSee pre-commit/actions#45\r\n\r\nfor `language_version: system` hooks this looks like:\r\n\r\n```\r\neslint...................................................................Failed\r\n- hook id: eslint\r\n- exit code: 127\r\n\r\n/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory\r\n/home/runner/.cache/pre-commit/repoibq27hfw/node_env-system/bin/node: line 5: /opt/hostedtoolcache/node/14.8.0/x64/bin/node: No such file or directory\r\n\r\n##[error]The process '/opt/hostedtoolcache/Python/3.8.5/x64/bin/pre-commit' failed with exit code 1\r\n```\r\n\r\n\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import parse_shebang\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'node_env'\nhealthy = helpers.basic_healthy\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n # nodeenv does not yet support `-n system` on windows\n if sys.platform == 'win32':\n return C.DEFAULT\n # if node is already installed, we can save a bunch of setup time by\n # using the installed version\n elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef _envdir(prefix: Prefix, version: str) -> str:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n return prefix.path(directory)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = fr'{win_venv.strip()}\\bin'\n lib_dir = 'lib'\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n lib_dir = 'Scripts'\n else: # pragma: win32 no cover\n install_prefix = venv\n lib_dir = 'lib'\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NPM_CONFIG_USERCONFIG', UNSET),\n ('npm_config_userconfig', UNSET),\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n with envcontext(get_env_patch(_envdir(prefix, language_version))):\n yield\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n assert prefix.exists('package.json')\n envdir = _envdir(prefix, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = fr'\\\\?\\{os.path.normpath(envdir)}'\n with clean_path_on_failure(envdir):\n cmd = [\n sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,\n ]\n if version != C.DEFAULT:\n cmd.extend(['-n', version])\n cmd_output_b(*cmd)\n\n with in_env(prefix, version):\n # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\n # install as if we installed from git\n helpers.run_setup_cmd(prefix, ('npm', 'install'))\n helpers.run_setup_cmd(\n prefix,\n ('npm', 'install', '-g', '.', *additional_dependencies),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/node.py"}], "after_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import parse_shebang\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.languages.python import bin_dir\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'node_env'\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n # nodeenv does not yet support `-n system` on windows\n if sys.platform == 'win32':\n return C.DEFAULT\n # if node is already installed, we can save a bunch of setup time by\n # using the installed version\n elif all(parse_shebang.find_executable(exe) for exe in ('node', 'npm')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef _envdir(prefix: Prefix, version: str) -> str:\n directory = helpers.environment_dir(ENVIRONMENT_DIR, version)\n return prefix.path(directory)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n if sys.platform == 'cygwin': # pragma: no cover\n _, win_venv, _ = cmd_output('cygpath', '-w', venv)\n install_prefix = fr'{win_venv.strip()}\\bin'\n lib_dir = 'lib'\n elif sys.platform == 'win32': # pragma: no cover\n install_prefix = bin_dir(venv)\n lib_dir = 'Scripts'\n else: # pragma: win32 no cover\n install_prefix = venv\n lib_dir = 'lib'\n return (\n ('NODE_VIRTUAL_ENV', venv),\n ('NPM_CONFIG_PREFIX', install_prefix),\n ('npm_config_prefix', install_prefix),\n ('NPM_CONFIG_USERCONFIG', UNSET),\n ('npm_config_userconfig', UNSET),\n ('NODE_PATH', os.path.join(venv, lib_dir, 'node_modules')),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(\n prefix: Prefix,\n language_version: str,\n) -> Generator[None, None, None]:\n with envcontext(get_env_patch(_envdir(prefix, language_version))):\n yield\n\n\ndef healthy(prefix: Prefix, language_version: str) -> bool:\n with in_env(prefix, language_version):\n retcode, _, _ = cmd_output_b('node', '--version', retcode=None)\n return retcode == 0\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n additional_dependencies = tuple(additional_dependencies)\n assert prefix.exists('package.json')\n envdir = _envdir(prefix, version)\n\n # https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx?f=255&MSPPError=-2147217396#maxpath\n if sys.platform == 'win32': # pragma: no cover\n envdir = fr'\\\\?\\{os.path.normpath(envdir)}'\n with clean_path_on_failure(envdir):\n cmd = [\n sys.executable, '-mnodeenv', '--prebuilt', '--clean-src', envdir,\n ]\n if version != C.DEFAULT:\n cmd.extend(['-n', version])\n cmd_output_b(*cmd)\n\n with in_env(prefix, version):\n # https://npm.community/t/npm-install-g-git-vs-git-clone-cd-npm-install-g/5449\n # install as if we installed from git\n helpers.run_setup_cmd(prefix, ('npm', 'install'))\n helpers.run_setup_cmd(\n prefix,\n ('npm', 'install', '-g', '.', *additional_dependencies),\n )\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/node.py"}]} | 1,608 | 183 |
gh_patches_debug_16984 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1872 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider sallybeauty is broken
During the global build at 2021-05-26-14-42-23, spider **sallybeauty** failed with **2712 features** and **5 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/sallybeauty.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/sallybeauty.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 from locations.items import GeojsonPointItem
4 from urllib.parse import urlencode
5 import json
6 import csv
7 from locations.hours import OpeningHours
8 from scrapy.selector import Selector
9
10
11 class SallySpider(scrapy.Spider):
12 name = "sallybeauty"
13 item_attributes = { 'brand': "Sally Beauty" }
14 allowed_domains = ["sallybeauty.com"]
15
16 def start_requests(self):
17 base_url = "https://www.sallybeauty.com/on/demandware.store/Sites-SA-Site/default/Stores-FindStores?"
18
19 point_files = [
20 './locations/searchable_points/us_centroids_100mile_radius.csv',
21 './locations/searchable_points/ca_centroids_100mile_radius.csv'
22 ]
23
24 params = {
25 "showmap": "true",
26 "radius": "100",
27 }
28
29 for point_file in point_files:
30 with open(point_file) as points:
31 next(points)
32 for point in points:
33 _, lat, lon = point.strip().split(',')
34 params.update({"lat": lat, "long": lon})
35 yield scrapy.Request(url=base_url + urlencode(params))
36
37 def parse_hours(self, hours):
38 hrs = Selector(text=hours)
39 days = hrs.xpath('//div[@class="store-hours-day"]/text()').extract()
40 hours = hrs.xpath('//div[@class="store-hours-day"]/span/text()').extract()
41
42 opening_hours = OpeningHours()
43
44 for d, h in zip(days, hours):
45 try:
46 day = d.strip(': ')
47 open_time, close_time = h.split(' - ')
48 open_time = open_time.lstrip('0')
49 opening_hours.add_range(day=day[:2],
50 open_time=open_time,
51 close_time=close_time,
52 time_format="%I:%M %p")
53 except:
54 continue
55
56 return opening_hours.as_opening_hours()
57
58 def parse(self, response):
59 jdata = json.loads(response.body_as_unicode())
60
61 for row in jdata.get('stores',[]):
62
63 properties = {
64 'ref': row["ID"],
65 'name': row["name"],
66 'addr_full': " ".join([row["address1"], row.get("address2", "") or ""]).strip(),
67 'city': row["city"],
68 'postcode': row["postalCode"],
69 'lat': row["latitude"],
70 'lon': row["longitude"],
71 'phone': row["phone"],
72 'state': row["stateCode"],
73 }
74
75 hours = self.parse_hours(row["storeHours"])
76 if hours:
77 properties['opening_hours'] = hours
78
79 yield GeojsonPointItem(**properties)
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/sallybeauty.py b/locations/spiders/sallybeauty.py
--- a/locations/spiders/sallybeauty.py
+++ b/locations/spiders/sallybeauty.py
@@ -58,7 +58,7 @@
def parse(self, response):
jdata = json.loads(response.body_as_unicode())
- for row in jdata.get('stores',[]):
+ for row in jdata.get('stores', []):
properties = {
'ref': row["ID"],
@@ -72,8 +72,11 @@
'state': row["stateCode"],
}
- hours = self.parse_hours(row["storeHours"])
- if hours:
- properties['opening_hours'] = hours
+ store_hours = row.get("storeHours")
+ if store_hours:
+ hours = self.parse_hours(store_hours)
+
+ if hours:
+ properties['opening_hours'] = hours
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/sallybeauty.py b/locations/spiders/sallybeauty.py\n--- a/locations/spiders/sallybeauty.py\n+++ b/locations/spiders/sallybeauty.py\n@@ -58,7 +58,7 @@\n def parse(self, response):\n jdata = json.loads(response.body_as_unicode())\n \n- for row in jdata.get('stores',[]):\n+ for row in jdata.get('stores', []):\n \n properties = {\n 'ref': row[\"ID\"],\n@@ -72,8 +72,11 @@\n 'state': row[\"stateCode\"],\n }\n \n- hours = self.parse_hours(row[\"storeHours\"])\n- if hours:\n- properties['opening_hours'] = hours\n+ store_hours = row.get(\"storeHours\")\n+ if store_hours:\n+ hours = self.parse_hours(store_hours)\n+\n+ if hours:\n+ properties['opening_hours'] = hours\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider sallybeauty is broken\nDuring the global build at 2021-05-26-14-42-23, spider **sallybeauty** failed with **2712 features** and **5 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/sallybeauty.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/sallybeauty.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom urllib.parse import urlencode\nimport json\nimport csv\nfrom locations.hours import OpeningHours\nfrom scrapy.selector import Selector\n\n\nclass SallySpider(scrapy.Spider):\n name = \"sallybeauty\"\n item_attributes = { 'brand': \"Sally Beauty\" }\n allowed_domains = [\"sallybeauty.com\"]\n\n def start_requests(self):\n base_url = \"https://www.sallybeauty.com/on/demandware.store/Sites-SA-Site/default/Stores-FindStores?\"\n\n point_files = [\n './locations/searchable_points/us_centroids_100mile_radius.csv',\n './locations/searchable_points/ca_centroids_100mile_radius.csv'\n ]\n\n params = {\n \"showmap\": \"true\",\n \"radius\": \"100\",\n }\n\n for point_file in point_files:\n with open(point_file) as points:\n next(points)\n for point in points:\n _, lat, lon = point.strip().split(',')\n params.update({\"lat\": lat, \"long\": lon})\n yield scrapy.Request(url=base_url + urlencode(params))\n\n def parse_hours(self, hours):\n hrs = Selector(text=hours)\n days = hrs.xpath('//div[@class=\"store-hours-day\"]/text()').extract()\n hours = hrs.xpath('//div[@class=\"store-hours-day\"]/span/text()').extract()\n\n opening_hours = OpeningHours()\n\n for d, h in zip(days, hours):\n try:\n day = d.strip(': ')\n open_time, close_time = h.split(' - ')\n open_time = open_time.lstrip('0')\n opening_hours.add_range(day=day[:2],\n open_time=open_time,\n close_time=close_time,\n time_format=\"%I:%M %p\")\n except:\n continue\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n jdata = json.loads(response.body_as_unicode())\n\n for row in jdata.get('stores',[]):\n\n properties = {\n 'ref': row[\"ID\"],\n 'name': row[\"name\"],\n 'addr_full': \" \".join([row[\"address1\"], row.get(\"address2\", \"\") or \"\"]).strip(),\n 'city': row[\"city\"],\n 'postcode': row[\"postalCode\"],\n 'lat': row[\"latitude\"],\n 'lon': row[\"longitude\"],\n 'phone': row[\"phone\"],\n 'state': row[\"stateCode\"],\n }\n\n hours = self.parse_hours(row[\"storeHours\"])\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/sallybeauty.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom urllib.parse import urlencode\nimport json\nimport csv\nfrom locations.hours import OpeningHours\nfrom scrapy.selector import Selector\n\n\nclass SallySpider(scrapy.Spider):\n name = \"sallybeauty\"\n item_attributes = { 'brand': \"Sally Beauty\" }\n allowed_domains = [\"sallybeauty.com\"]\n\n def start_requests(self):\n base_url = \"https://www.sallybeauty.com/on/demandware.store/Sites-SA-Site/default/Stores-FindStores?\"\n\n point_files = [\n './locations/searchable_points/us_centroids_100mile_radius.csv',\n './locations/searchable_points/ca_centroids_100mile_radius.csv'\n ]\n\n params = {\n \"showmap\": \"true\",\n \"radius\": \"100\",\n }\n\n for point_file in point_files:\n with open(point_file) as points:\n next(points)\n for point in points:\n _, lat, lon = point.strip().split(',')\n params.update({\"lat\": lat, \"long\": lon})\n yield scrapy.Request(url=base_url + urlencode(params))\n\n def parse_hours(self, hours):\n hrs = Selector(text=hours)\n days = hrs.xpath('//div[@class=\"store-hours-day\"]/text()').extract()\n hours = hrs.xpath('//div[@class=\"store-hours-day\"]/span/text()').extract()\n\n opening_hours = OpeningHours()\n\n for d, h in zip(days, hours):\n try:\n day = d.strip(': ')\n open_time, close_time = h.split(' - ')\n open_time = open_time.lstrip('0')\n opening_hours.add_range(day=day[:2],\n open_time=open_time,\n close_time=close_time,\n time_format=\"%I:%M %p\")\n except:\n continue\n\n return opening_hours.as_opening_hours()\n\n def parse(self, response):\n jdata = json.loads(response.body_as_unicode())\n\n for row in jdata.get('stores', []):\n\n properties = {\n 'ref': row[\"ID\"],\n 'name': row[\"name\"],\n 'addr_full': \" \".join([row[\"address1\"], row.get(\"address2\", \"\") or \"\"]).strip(),\n 'city': row[\"city\"],\n 'postcode': row[\"postalCode\"],\n 'lat': row[\"latitude\"],\n 'lon': row[\"longitude\"],\n 'phone': row[\"phone\"],\n 'state': row[\"stateCode\"],\n }\n\n store_hours = row.get(\"storeHours\")\n if store_hours:\n hours = self.parse_hours(store_hours)\n\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/sallybeauty.py"}]} | 1,187 | 222 |
gh_patches_debug_55584 | rasdani/github-patches | git_diff | wagtail__wagtail-1873 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Migrating to 1.1 Migration File Errors
I am attempting to migrate to 1.1 and I am getting an error involving the migration files.
```
Migration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')
```
The last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.
I tried manually applying 0016, but the error is preventing that from happening.
I know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.
Migrating to 1.1 Migration File Errors
I am attempting to migrate to 1.1 and I am getting an error involving the migration files.
```
Migration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')
```
The last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.
I tried manually applying 0016, but the error is preventing that from happening.
I know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.db import models, migrations
5
6
7 class Migration(migrations.Migration):
8
9 dependencies = [
10 ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
11 ]
12
13 operations = [
14 migrations.AlterField(
15 model_name='grouppagepermission',
16 name='permission_type',
17 field=models.CharField(choices=[('add', 'Add/edit pages you own'), ('edit', 'Edit any page'), ('publish', 'Publish any page'), ('lock', 'Lock/unlock any page')], max_length=20, verbose_name='Permission type'),
18 preserve_default=True,
19 ),
20 ]
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py
--- a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py
+++ b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py
@@ -7,7 +7,7 @@
class Migration(migrations.Migration):
dependencies = [
- ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
+ ('wagtailcore', '0016_change_page_url_path_to_text_field'),
]
operations = [
| {"golden_diff": "diff --git a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n--- a/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n+++ b/wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py\n@@ -7,7 +7,7 @@\n class Migration(migrations.Migration):\n \n dependencies = [\n- ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),\n+ ('wagtailcore', '0016_change_page_url_path_to_text_field'),\n ]\n \n operations = [\n", "issue": "Migrating to 1.1 Migration File Errors\nI am attempting to migrate to 1.1 and I am getting an error involving the migration files.\n\n```\nMigration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')\n```\n\nThe last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.\n\nI tried manually applying 0016, but the error is preventing that from happening.\n\nI know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.\n\nMigrating to 1.1 Migration File Errors\nI am attempting to migrate to 1.1 and I am getting an error involving the migration files.\n\n```\nMigration wagtailcore.0017_change_edit_page_permission_description dependencies reference nonexistent parent node (u'wagtailcore', u'0001_squashed_0016_change_page_url_path_to_text_field')\n```\n\nThe last migration for wagtail core in my migrations table is 0015. Since 0017 refers to 0001_squashed_0016 as a dependency and since I have not applied that migration, it's turn up as an error.\n\nI tried manually applying 0016, but the error is preventing that from happening.\n\nI know the issue queue is not intended for support questions, but I was speaking in the #django irc channel and they told me to check and see if the migrations were autogenerated. They said that normally migrations refer to the one before it and not the squashed ones.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='grouppagepermission',\n name='permission_type',\n field=models.CharField(choices=[('add', 'Add/edit pages you own'), ('edit', 'Edit any page'), ('publish', 'Publish any page'), ('lock', 'Lock/unlock any page')], max_length=20, verbose_name='Permission type'),\n preserve_default=True,\n ),\n ]\n", "path": "wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('wagtailcore', '0016_change_page_url_path_to_text_field'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='grouppagepermission',\n name='permission_type',\n field=models.CharField(choices=[('add', 'Add/edit pages you own'), ('edit', 'Edit any page'), ('publish', 'Publish any page'), ('lock', 'Lock/unlock any page')], max_length=20, verbose_name='Permission type'),\n preserve_default=True,\n ),\n ]\n", "path": "wagtail/wagtailcore/migrations/0017_change_edit_page_permission_description.py"}]} | 902 | 171 |
gh_patches_debug_33817 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-530 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
opentelemetry-instrument command fails if incompatible instrumentation is found
If an instrumentation is installed for a library that is not found in the environment, the instrument command raises the following exception:
```
❯ opentelemetry-instrument python main.py
Instrumenting of flask failed
Traceback (most recent call last):
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 71, in _load_instrumentors
conflict = get_dist_dependency_conflicts(entry_point.dist)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 33, in get_dist_dependency_conflicts
return get_dependency_conflicts(deps)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 41, in get_dependency_conflicts
get_distribution(str(dep))
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 482, in get_distribution
dist = get_provider(dist)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 358, in get_provider
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
IndexError: list index out of range
Failed to auto initialize opentelemetry
Traceback (most recent call last):
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 111, in initialize
_load_instrumentors(distro)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 85, in _load_instrumentors
raise exc
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py", line 71, in _load_instrumentors
conflict = get_dist_dependency_conflicts(entry_point.dist)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 33, in get_dist_dependency_conflicts
return get_dependency_conflicts(deps)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py", line 41, in get_dependency_conflicts
get_distribution(str(dep))
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 482, in get_distribution
dist = get_provider(dist)
File "/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py", line 358, in get_provider
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
IndexError: list index out of range
```
bootstrap command does not install any instrumentations for libraries that are not present in the environment so this would only happen if someone manually installed an instrumentation package for a library they're not using. So this is not a deal breaker and doesn't require an immediate hotfix. That said, this IS a bug as the intended behavior of instrument command is to silently ignore such instrumentations.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py`
Content:
```
1 from typing import Collection, Optional
2
3 from pkg_resources import (
4 Distribution,
5 DistributionNotFound,
6 VersionConflict,
7 get_distribution,
8 )
9
10
11 class DependencyConflict:
12 required: str = None
13 found: Optional[str] = None
14
15 def __init__(self, required, found=None):
16 self.required = required
17 self.found = found
18
19 def __str__(self):
20 return 'DependencyConflict: requested: "{0}" but found: "{1}"'.format(
21 self.required, self.found
22 )
23
24
25 def get_dist_dependency_conflicts(
26 dist: Distribution,
27 ) -> Optional[DependencyConflict]:
28 deps = [
29 dep
30 for dep in dist.requires(("instruments",))
31 if dep not in dist.requires()
32 ]
33 return get_dependency_conflicts(deps)
34
35
36 def get_dependency_conflicts(
37 deps: Collection[str],
38 ) -> Optional[DependencyConflict]:
39 for dep in deps:
40 try:
41 get_distribution(str(dep))
42 except VersionConflict as exc:
43 return DependencyConflict(dep, exc.dist)
44 except DistributionNotFound:
45 return DependencyConflict(dep)
46 return None
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py
--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py
+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py
@@ -1,12 +1,16 @@
+from logging import getLogger
from typing import Collection, Optional
from pkg_resources import (
Distribution,
DistributionNotFound,
+ RequirementParseError,
VersionConflict,
get_distribution,
)
+logger = getLogger(__file__)
+
class DependencyConflict:
required: str = None
@@ -25,12 +29,19 @@
def get_dist_dependency_conflicts(
dist: Distribution,
) -> Optional[DependencyConflict]:
- deps = [
- dep
- for dep in dist.requires(("instruments",))
- if dep not in dist.requires()
- ]
- return get_dependency_conflicts(deps)
+ main_deps = dist.requires()
+ instrumentation_deps = []
+ for dep in dist.requires(("instruments",)):
+ if dep not in main_deps:
+ # we set marker to none so string representation of the dependency looks like
+ # requests ~= 1.0
+ # instead of
+ # requests ~= 1.0; extra = "instruments"
+ # which does not work with `get_distribution()`
+ dep.marker = None
+ instrumentation_deps.append(str(dep))
+
+ return get_dependency_conflicts(instrumentation_deps)
def get_dependency_conflicts(
@@ -38,9 +49,16 @@
) -> Optional[DependencyConflict]:
for dep in deps:
try:
- get_distribution(str(dep))
+ get_distribution(dep)
except VersionConflict as exc:
return DependencyConflict(dep, exc.dist)
except DistributionNotFound:
return DependencyConflict(dep)
+ except RequirementParseError as exc:
+ logger.warning(
+ 'error parsing dependency, reporting as a conflict: "%s" - %s',
+ dep,
+ exc,
+ )
+ return DependencyConflict(dep)
return None
| {"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py\n@@ -1,12 +1,16 @@\n+from logging import getLogger\n from typing import Collection, Optional\n \n from pkg_resources import (\n Distribution,\n DistributionNotFound,\n+ RequirementParseError,\n VersionConflict,\n get_distribution,\n )\n \n+logger = getLogger(__file__)\n+\n \n class DependencyConflict:\n required: str = None\n@@ -25,12 +29,19 @@\n def get_dist_dependency_conflicts(\n dist: Distribution,\n ) -> Optional[DependencyConflict]:\n- deps = [\n- dep\n- for dep in dist.requires((\"instruments\",))\n- if dep not in dist.requires()\n- ]\n- return get_dependency_conflicts(deps)\n+ main_deps = dist.requires()\n+ instrumentation_deps = []\n+ for dep in dist.requires((\"instruments\",)):\n+ if dep not in main_deps:\n+ # we set marker to none so string representation of the dependency looks like\n+ # requests ~= 1.0\n+ # instead of\n+ # requests ~= 1.0; extra = \"instruments\"\n+ # which does not work with `get_distribution()`\n+ dep.marker = None\n+ instrumentation_deps.append(str(dep))\n+\n+ return get_dependency_conflicts(instrumentation_deps)\n \n \n def get_dependency_conflicts(\n@@ -38,9 +49,16 @@\n ) -> Optional[DependencyConflict]:\n for dep in deps:\n try:\n- get_distribution(str(dep))\n+ get_distribution(dep)\n except VersionConflict as exc:\n return DependencyConflict(dep, exc.dist)\n except DistributionNotFound:\n return DependencyConflict(dep)\n+ except RequirementParseError as exc:\n+ logger.warning(\n+ 'error parsing dependency, reporting as a conflict: \"%s\" - %s',\n+ dep,\n+ exc,\n+ )\n+ return DependencyConflict(dep)\n return None\n", "issue": "opentelemetry-instrument command fails if incompatible instrumentation is found\nIf an instrumentation is installed for a library that is not found in the environment, the instrument command raises the following exception:\r\n\r\n\r\n```\r\n\u276f opentelemetry-instrument python main.py\r\nInstrumenting of flask failed\r\nTraceback (most recent call last):\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 71, in _load_instrumentors\r\n conflict = get_dist_dependency_conflicts(entry_point.dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 33, in get_dist_dependency_conflicts\r\n return get_dependency_conflicts(deps)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 41, in get_dependency_conflicts\r\n get_distribution(str(dep))\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 482, in get_distribution\r\n dist = get_provider(dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 358, in get_provider\r\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\r\nIndexError: list index out of range\r\nFailed to auto initialize opentelemetry\r\nTraceback (most recent call last):\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 111, in initialize\r\n _load_instrumentors(distro)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 85, in _load_instrumentors\r\n raise exc\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/auto_instrumentation/sitecustomize.py\", line 71, in _load_instrumentors\r\n conflict = get_dist_dependency_conflicts(entry_point.dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 33, in get_dist_dependency_conflicts\r\n return get_dependency_conflicts(deps)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/opentelemetry/instrumentation/dependencies.py\", line 41, in get_dependency_conflicts\r\n get_distribution(str(dep))\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 482, in get_distribution\r\n dist = get_provider(dist)\r\n File \"/Users/olone/playground/splunk-otel-py/venv/lib/python3.8/site-packages/pkg_resources/__init__.py\", line 358, in get_provider\r\n return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]\r\nIndexError: list index out of range\r\n```\r\n\r\nbootstrap command does not install any instrumentations for libraries that are not present in the environment so this would only happen if someone manually installed an instrumentation package for a library they're not using. So this is not a deal breaker and doesn't require an immediate hotfix. That said, this IS a bug as the intended behavior of instrument command is to silently ignore such instrumentations.\n", "before_files": [{"content": "from typing import Collection, Optional\n\nfrom pkg_resources import (\n Distribution,\n DistributionNotFound,\n VersionConflict,\n get_distribution,\n)\n\n\nclass DependencyConflict:\n required: str = None\n found: Optional[str] = None\n\n def __init__(self, required, found=None):\n self.required = required\n self.found = found\n\n def __str__(self):\n return 'DependencyConflict: requested: \"{0}\" but found: \"{1}\"'.format(\n self.required, self.found\n )\n\n\ndef get_dist_dependency_conflicts(\n dist: Distribution,\n) -> Optional[DependencyConflict]:\n deps = [\n dep\n for dep in dist.requires((\"instruments\",))\n if dep not in dist.requires()\n ]\n return get_dependency_conflicts(deps)\n\n\ndef get_dependency_conflicts(\n deps: Collection[str],\n) -> Optional[DependencyConflict]:\n for dep in deps:\n try:\n get_distribution(str(dep))\n except VersionConflict as exc:\n return DependencyConflict(dep, exc.dist)\n except DistributionNotFound:\n return DependencyConflict(dep)\n return None\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py"}], "after_files": [{"content": "from logging import getLogger\nfrom typing import Collection, Optional\n\nfrom pkg_resources import (\n Distribution,\n DistributionNotFound,\n RequirementParseError,\n VersionConflict,\n get_distribution,\n)\n\nlogger = getLogger(__file__)\n\n\nclass DependencyConflict:\n required: str = None\n found: Optional[str] = None\n\n def __init__(self, required, found=None):\n self.required = required\n self.found = found\n\n def __str__(self):\n return 'DependencyConflict: requested: \"{0}\" but found: \"{1}\"'.format(\n self.required, self.found\n )\n\n\ndef get_dist_dependency_conflicts(\n dist: Distribution,\n) -> Optional[DependencyConflict]:\n main_deps = dist.requires()\n instrumentation_deps = []\n for dep in dist.requires((\"instruments\",)):\n if dep not in main_deps:\n # we set marker to none so string representation of the dependency looks like\n # requests ~= 1.0\n # instead of\n # requests ~= 1.0; extra = \"instruments\"\n # which does not work with `get_distribution()`\n dep.marker = None\n instrumentation_deps.append(str(dep))\n\n return get_dependency_conflicts(instrumentation_deps)\n\n\ndef get_dependency_conflicts(\n deps: Collection[str],\n) -> Optional[DependencyConflict]:\n for dep in deps:\n try:\n get_distribution(dep)\n except VersionConflict as exc:\n return DependencyConflict(dep, exc.dist)\n except DistributionNotFound:\n return DependencyConflict(dep)\n except RequirementParseError as exc:\n logger.warning(\n 'error parsing dependency, reporting as a conflict: \"%s\" - %s',\n dep,\n exc,\n )\n return DependencyConflict(dep)\n return None\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/dependencies.py"}]} | 1,454 | 483 |
gh_patches_debug_8973 | rasdani/github-patches | git_diff | spesmilo__electrum-2164 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HTTPS cert expired on LabelSync server
The cert on https://sync.bytesized-hosting.com:9090/ has expired and the LabelSync plugin stopped working
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/labels/labels.py`
Content:
```
1 import hashlib
2 import requests
3 import threading
4 import json
5 import sys
6 import traceback
7
8 import aes
9 import base64
10
11 import electrum
12 from electrum.plugins import BasePlugin, hook
13 from electrum.i18n import _
14
15
16
17
18 class LabelsPlugin(BasePlugin):
19
20 def __init__(self, parent, config, name):
21 BasePlugin.__init__(self, parent, config, name)
22 self.target_host = 'sync.bytesized-hosting.com:9090'
23 self.wallets = {}
24
25 def encode(self, wallet, msg):
26 password, iv, wallet_id = self.wallets[wallet]
27 encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,
28 msg.encode('utf8'))
29 return base64.b64encode(encrypted)
30
31 def decode(self, wallet, message):
32 password, iv, wallet_id = self.wallets[wallet]
33 decoded = base64.b64decode(message)
34 decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)
35 return decrypted.decode('utf8')
36
37 def get_nonce(self, wallet):
38 # nonce is the nonce to be used with the next change
39 nonce = wallet.storage.get('wallet_nonce')
40 if nonce is None:
41 nonce = 1
42 self.set_nonce(wallet, nonce)
43 return nonce
44
45 def set_nonce(self, wallet, nonce):
46 self.print_error("set", wallet.basename(), "nonce to", nonce)
47 wallet.storage.put("wallet_nonce", nonce)
48
49 @hook
50 def set_label(self, wallet, item, label):
51 if not wallet in self.wallets:
52 return
53 nonce = self.get_nonce(wallet)
54 wallet_id = self.wallets[wallet][2]
55 bundle = {"walletId": wallet_id,
56 "walletNonce": nonce,
57 "externalId": self.encode(wallet, item),
58 "encryptedLabel": self.encode(wallet, label)}
59 t = threading.Thread(target=self.do_request,
60 args=["POST", "/label", False, bundle])
61 t.setDaemon(True)
62 t.start()
63 # Caller will write the wallet
64 self.set_nonce(wallet, nonce + 1)
65
66 def do_request(self, method, url = "/labels", is_batch=False, data=None):
67 url = 'https://' + self.target_host + url
68 kwargs = {'headers': {}}
69 if method == 'GET' and data:
70 kwargs['params'] = data
71 elif method == 'POST' and data:
72 kwargs['data'] = json.dumps(data)
73 kwargs['headers']['Content-Type'] = 'application/json'
74 response = requests.request(method, url, **kwargs)
75 if response.status_code != 200:
76 raise BaseException(response.status_code, response.text)
77 response = response.json()
78 if "error" in response:
79 raise BaseException(response["error"])
80 return response
81
82 def push_thread(self, wallet):
83 wallet_id = self.wallets[wallet][2]
84 bundle = {"labels": [],
85 "walletId": wallet_id,
86 "walletNonce": self.get_nonce(wallet)}
87 for key, value in wallet.labels.iteritems():
88 try:
89 encoded_key = self.encode(wallet, key)
90 encoded_value = self.encode(wallet, value)
91 except:
92 self.print_error('cannot encode', repr(key), repr(value))
93 continue
94 bundle["labels"].append({'encryptedLabel': encoded_value,
95 'externalId': encoded_key})
96 self.do_request("POST", "/labels", True, bundle)
97
98 def pull_thread(self, wallet, force):
99 wallet_id = self.wallets[wallet][2]
100 nonce = 1 if force else self.get_nonce(wallet) - 1
101 self.print_error("asking for labels since nonce", nonce)
102 try:
103 response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
104 if response["labels"] is None:
105 self.print_error('no new labels')
106 return
107 result = {}
108 for label in response["labels"]:
109 try:
110 key = self.decode(wallet, label["externalId"])
111 value = self.decode(wallet, label["encryptedLabel"])
112 except:
113 continue
114 try:
115 json.dumps(key)
116 json.dumps(value)
117 except:
118 self.print_error('error: no json', key)
119 continue
120 result[key] = value
121
122 for key, value in result.items():
123 if force or not wallet.labels.get(key):
124 wallet.labels[key] = value
125
126 self.print_error("received %d labels" % len(response))
127 # do not write to disk because we're in a daemon thread
128 wallet.storage.put('labels', wallet.labels)
129 self.set_nonce(wallet, response["nonce"] + 1)
130 self.on_pulled(wallet)
131
132 except Exception as e:
133 traceback.print_exc(file=sys.stderr)
134 self.print_error("could not retrieve labels")
135
136 def start_wallet(self, wallet):
137 nonce = self.get_nonce(wallet)
138 self.print_error("wallet", wallet.basename(), "nonce is", nonce)
139 mpk = wallet.get_fingerprint()
140 if not mpk:
141 return
142 password = hashlib.sha1(mpk).digest().encode('hex')[:32]
143 iv = hashlib.sha256(password).digest()[:16]
144 wallet_id = hashlib.sha256(mpk).digest().encode('hex')
145 self.wallets[wallet] = (password, iv, wallet_id)
146 # If there is an auth token we can try to actually start syncing
147 t = threading.Thread(target=self.pull_thread, args=(wallet, False))
148 t.setDaemon(True)
149 t.start()
150
151 def stop_wallet(self, wallet):
152 self.wallets.pop(wallet, None)
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/labels/labels.py b/plugins/labels/labels.py
--- a/plugins/labels/labels.py
+++ b/plugins/labels/labels.py
@@ -5,7 +5,6 @@
import sys
import traceback
-import aes
import base64
import electrum
@@ -19,7 +18,7 @@
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
- self.target_host = 'sync.bytesized-hosting.com:9090'
+ self.target_host = 'labels.bauerj.eu'
self.wallets = {}
def encode(self, wallet, msg):
| {"golden_diff": "diff --git a/plugins/labels/labels.py b/plugins/labels/labels.py\n--- a/plugins/labels/labels.py\n+++ b/plugins/labels/labels.py\n@@ -5,7 +5,6 @@\n import sys\n import traceback\n \n-import aes\n import base64\n \n import electrum\n@@ -19,7 +18,7 @@\n \n def __init__(self, parent, config, name):\n BasePlugin.__init__(self, parent, config, name)\n- self.target_host = 'sync.bytesized-hosting.com:9090'\n+ self.target_host = 'labels.bauerj.eu'\n self.wallets = {}\n \n def encode(self, wallet, msg):\n", "issue": "HTTPS cert expired on LabelSync server\nThe cert on https://sync.bytesized-hosting.com:9090/ has expired and the LabelSync plugin stopped working \n", "before_files": [{"content": "import hashlib\nimport requests\nimport threading\nimport json\nimport sys\nimport traceback\n\nimport aes\nimport base64\n\nimport electrum\nfrom electrum.plugins import BasePlugin, hook\nfrom electrum.i18n import _\n\n\n\n\nclass LabelsPlugin(BasePlugin):\n\n def __init__(self, parent, config, name):\n BasePlugin.__init__(self, parent, config, name)\n self.target_host = 'sync.bytesized-hosting.com:9090'\n self.wallets = {}\n\n def encode(self, wallet, msg):\n password, iv, wallet_id = self.wallets[wallet]\n encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,\n msg.encode('utf8'))\n return base64.b64encode(encrypted)\n\n def decode(self, wallet, message):\n password, iv, wallet_id = self.wallets[wallet]\n decoded = base64.b64decode(message)\n decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)\n return decrypted.decode('utf8')\n\n def get_nonce(self, wallet):\n # nonce is the nonce to be used with the next change\n nonce = wallet.storage.get('wallet_nonce')\n if nonce is None:\n nonce = 1\n self.set_nonce(wallet, nonce)\n return nonce\n\n def set_nonce(self, wallet, nonce):\n self.print_error(\"set\", wallet.basename(), \"nonce to\", nonce)\n wallet.storage.put(\"wallet_nonce\", nonce)\n\n @hook\n def set_label(self, wallet, item, label):\n if not wallet in self.wallets:\n return\n nonce = self.get_nonce(wallet)\n wallet_id = self.wallets[wallet][2]\n bundle = {\"walletId\": wallet_id,\n \"walletNonce\": nonce,\n \"externalId\": self.encode(wallet, item),\n \"encryptedLabel\": self.encode(wallet, label)}\n t = threading.Thread(target=self.do_request,\n args=[\"POST\", \"/label\", False, bundle])\n t.setDaemon(True)\n t.start()\n # Caller will write the wallet\n self.set_nonce(wallet, nonce + 1)\n\n def do_request(self, method, url = \"/labels\", is_batch=False, data=None):\n url = 'https://' + self.target_host + url\n kwargs = {'headers': {}}\n if method == 'GET' and data:\n kwargs['params'] = data\n elif method == 'POST' and data:\n kwargs['data'] = json.dumps(data)\n kwargs['headers']['Content-Type'] = 'application/json'\n response = requests.request(method, url, **kwargs)\n if response.status_code != 200:\n raise BaseException(response.status_code, response.text)\n response = response.json()\n if \"error\" in response:\n raise BaseException(response[\"error\"])\n return response\n\n def push_thread(self, wallet):\n wallet_id = self.wallets[wallet][2]\n bundle = {\"labels\": [],\n \"walletId\": wallet_id,\n \"walletNonce\": self.get_nonce(wallet)}\n for key, value in wallet.labels.iteritems():\n try:\n encoded_key = self.encode(wallet, key)\n encoded_value = self.encode(wallet, value)\n except:\n self.print_error('cannot encode', repr(key), repr(value))\n continue\n bundle[\"labels\"].append({'encryptedLabel': encoded_value,\n 'externalId': encoded_key})\n self.do_request(\"POST\", \"/labels\", True, bundle)\n\n def pull_thread(self, wallet, force):\n wallet_id = self.wallets[wallet][2]\n nonce = 1 if force else self.get_nonce(wallet) - 1\n self.print_error(\"asking for labels since nonce\", nonce)\n try:\n response = self.do_request(\"GET\", (\"/labels/since/%d/for/%s\" % (nonce, wallet_id) ))\n if response[\"labels\"] is None:\n self.print_error('no new labels')\n return\n result = {}\n for label in response[\"labels\"]:\n try:\n key = self.decode(wallet, label[\"externalId\"])\n value = self.decode(wallet, label[\"encryptedLabel\"])\n except:\n continue\n try:\n json.dumps(key)\n json.dumps(value)\n except:\n self.print_error('error: no json', key)\n continue\n result[key] = value\n\n for key, value in result.items():\n if force or not wallet.labels.get(key):\n wallet.labels[key] = value\n\n self.print_error(\"received %d labels\" % len(response))\n # do not write to disk because we're in a daemon thread\n wallet.storage.put('labels', wallet.labels)\n self.set_nonce(wallet, response[\"nonce\"] + 1)\n self.on_pulled(wallet)\n\n except Exception as e:\n traceback.print_exc(file=sys.stderr)\n self.print_error(\"could not retrieve labels\")\n\n def start_wallet(self, wallet):\n nonce = self.get_nonce(wallet)\n self.print_error(\"wallet\", wallet.basename(), \"nonce is\", nonce)\n mpk = wallet.get_fingerprint()\n if not mpk:\n return\n password = hashlib.sha1(mpk).digest().encode('hex')[:32]\n iv = hashlib.sha256(password).digest()[:16]\n wallet_id = hashlib.sha256(mpk).digest().encode('hex')\n self.wallets[wallet] = (password, iv, wallet_id)\n # If there is an auth token we can try to actually start syncing\n t = threading.Thread(target=self.pull_thread, args=(wallet, False))\n t.setDaemon(True)\n t.start()\n\n def stop_wallet(self, wallet):\n self.wallets.pop(wallet, None)\n", "path": "plugins/labels/labels.py"}], "after_files": [{"content": "import hashlib\nimport requests\nimport threading\nimport json\nimport sys\nimport traceback\n\nimport base64\n\nimport electrum\nfrom electrum.plugins import BasePlugin, hook\nfrom electrum.i18n import _\n\n\n\n\nclass LabelsPlugin(BasePlugin):\n\n def __init__(self, parent, config, name):\n BasePlugin.__init__(self, parent, config, name)\n self.target_host = 'labels.bauerj.eu'\n self.wallets = {}\n\n def encode(self, wallet, msg):\n password, iv, wallet_id = self.wallets[wallet]\n encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv,\n msg.encode('utf8'))\n return base64.b64encode(encrypted)\n\n def decode(self, wallet, message):\n password, iv, wallet_id = self.wallets[wallet]\n decoded = base64.b64decode(message)\n decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded)\n return decrypted.decode('utf8')\n\n def get_nonce(self, wallet):\n # nonce is the nonce to be used with the next change\n nonce = wallet.storage.get('wallet_nonce')\n if nonce is None:\n nonce = 1\n self.set_nonce(wallet, nonce)\n return nonce\n\n def set_nonce(self, wallet, nonce):\n self.print_error(\"set\", wallet.basename(), \"nonce to\", nonce)\n wallet.storage.put(\"wallet_nonce\", nonce)\n\n @hook\n def set_label(self, wallet, item, label):\n if not wallet in self.wallets:\n return\n nonce = self.get_nonce(wallet)\n wallet_id = self.wallets[wallet][2]\n bundle = {\"walletId\": wallet_id,\n \"walletNonce\": nonce,\n \"externalId\": self.encode(wallet, item),\n \"encryptedLabel\": self.encode(wallet, label)}\n t = threading.Thread(target=self.do_request,\n args=[\"POST\", \"/label\", False, bundle])\n t.setDaemon(True)\n t.start()\n # Caller will write the wallet\n self.set_nonce(wallet, nonce + 1)\n\n def do_request(self, method, url = \"/labels\", is_batch=False, data=None):\n url = 'https://' + self.target_host + url\n kwargs = {'headers': {}}\n if method == 'GET' and data:\n kwargs['params'] = data\n elif method == 'POST' and data:\n kwargs['data'] = json.dumps(data)\n kwargs['headers']['Content-Type'] = 'application/json'\n response = requests.request(method, url, **kwargs)\n if response.status_code != 200:\n raise BaseException(response.status_code, response.text)\n response = response.json()\n if \"error\" in response:\n raise BaseException(response[\"error\"])\n return response\n\n def push_thread(self, wallet):\n wallet_id = self.wallets[wallet][2]\n bundle = {\"labels\": [],\n \"walletId\": wallet_id,\n \"walletNonce\": self.get_nonce(wallet)}\n for key, value in wallet.labels.iteritems():\n try:\n encoded_key = self.encode(wallet, key)\n encoded_value = self.encode(wallet, value)\n except:\n self.print_error('cannot encode', repr(key), repr(value))\n continue\n bundle[\"labels\"].append({'encryptedLabel': encoded_value,\n 'externalId': encoded_key})\n self.do_request(\"POST\", \"/labels\", True, bundle)\n\n def pull_thread(self, wallet, force):\n wallet_id = self.wallets[wallet][2]\n nonce = 1 if force else self.get_nonce(wallet) - 1\n self.print_error(\"asking for labels since nonce\", nonce)\n try:\n response = self.do_request(\"GET\", (\"/labels/since/%d/for/%s\" % (nonce, wallet_id) ))\n if response[\"labels\"] is None:\n self.print_error('no new labels')\n return\n result = {}\n for label in response[\"labels\"]:\n try:\n key = self.decode(wallet, label[\"externalId\"])\n value = self.decode(wallet, label[\"encryptedLabel\"])\n except:\n continue\n try:\n json.dumps(key)\n json.dumps(value)\n except:\n self.print_error('error: no json', key)\n continue\n result[key] = value\n\n for key, value in result.items():\n if force or not wallet.labels.get(key):\n wallet.labels[key] = value\n\n self.print_error(\"received %d labels\" % len(response))\n # do not write to disk because we're in a daemon thread\n wallet.storage.put('labels', wallet.labels)\n self.set_nonce(wallet, response[\"nonce\"] + 1)\n self.on_pulled(wallet)\n\n except Exception as e:\n traceback.print_exc(file=sys.stderr)\n self.print_error(\"could not retrieve labels\")\n\n def start_wallet(self, wallet):\n nonce = self.get_nonce(wallet)\n self.print_error(\"wallet\", wallet.basename(), \"nonce is\", nonce)\n mpk = wallet.get_fingerprint()\n if not mpk:\n return\n password = hashlib.sha1(mpk).digest().encode('hex')[:32]\n iv = hashlib.sha256(password).digest()[:16]\n wallet_id = hashlib.sha256(mpk).digest().encode('hex')\n self.wallets[wallet] = (password, iv, wallet_id)\n # If there is an auth token we can try to actually start syncing\n t = threading.Thread(target=self.pull_thread, args=(wallet, False))\n t.setDaemon(True)\n t.start()\n\n def stop_wallet(self, wallet):\n self.wallets.pop(wallet, None)\n", "path": "plugins/labels/labels.py"}]} | 1,882 | 152 |
gh_patches_debug_29287 | rasdani/github-patches | git_diff | weni-ai__bothub-engine-77 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Is possible translate example to same language
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bothub/api/serializers/translate.py`
Content:
```
1 from rest_framework import serializers
2
3 from django.utils.translation import gettext as _
4
5 from bothub.common.models import RepositoryTranslatedExampleEntity
6 from bothub.common.models import RepositoryTranslatedExample
7 from bothub.common.models import RepositoryExample
8
9 from ..validators import CanContributeInRepositoryTranslatedExampleValidator
10 from ..validators import CanContributeInRepositoryExampleValidator
11 from ..validators import TranslatedExampleEntitiesValidator
12
13
14 class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):
15 class Meta:
16 model = RepositoryTranslatedExampleEntity
17 fields = [
18 'id',
19 'repository_translated_example',
20 'start',
21 'end',
22 'entity',
23 'created_at',
24 'value',
25 ]
26
27 repository_translated_example = serializers.PrimaryKeyRelatedField(
28 queryset=RepositoryTranslatedExample.objects,
29 validators=[
30 CanContributeInRepositoryTranslatedExampleValidator(),
31 ],
32 help_text='Example translation ID')
33 value = serializers.SerializerMethodField()
34
35 def get_value(self, obj):
36 return obj.value
37
38
39 class RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):
40 class Meta:
41 model = RepositoryTranslatedExample
42 fields = [
43 'id',
44 'original_example',
45 'from_language',
46 'language',
47 'text',
48 'has_valid_entities',
49 'entities',
50 'created_at',
51 ]
52
53 original_example = serializers.PrimaryKeyRelatedField(
54 queryset=RepositoryExample.objects,
55 validators=[
56 CanContributeInRepositoryExampleValidator(),
57 ],
58 help_text=_('Example\'s ID'))
59 from_language = serializers.SerializerMethodField()
60 has_valid_entities = serializers.SerializerMethodField()
61 entities = RepositoryTranslatedExampleEntitySeralizer(
62 many=True,
63 read_only=True)
64
65 def get_from_language(self, obj):
66 return obj.original_example.repository_update.language
67
68 def get_has_valid_entities(self, obj):
69 return obj.has_valid_entities
70
71
72 class NewRepositoryTranslatedExampleEntitySeralizer(
73 serializers.ModelSerializer):
74 class Meta:
75 model = RepositoryTranslatedExampleEntity
76 fields = [
77 'start',
78 'end',
79 'entity',
80 ]
81
82
83 class NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):
84 class Meta:
85 model = RepositoryTranslatedExample
86 fields = [
87 'id',
88 'original_example',
89 'language',
90 'text',
91 'has_valid_entities',
92 'entities',
93 ]
94
95 def __init__(self, *args, **kwargs):
96 super().__init__(*args, **kwargs)
97 self.validators.append(TranslatedExampleEntitiesValidator())
98
99 original_example = serializers.PrimaryKeyRelatedField(
100 queryset=RepositoryExample.objects,
101 validators=[
102 CanContributeInRepositoryExampleValidator(),
103 ],
104 help_text=_('Example\'s ID'))
105 has_valid_entities = serializers.SerializerMethodField()
106 entities = NewRepositoryTranslatedExampleEntitySeralizer(
107 many=True,
108 style={'text_field': 'text'})
109
110 def get_has_valid_entities(self, obj):
111 return obj.has_valid_entities
112
113 def create(self, validated_data):
114 entities_data = validated_data.pop('entities')
115
116 translated = self.Meta.model.objects.create(**validated_data)
117 for entity_data in entities_data:
118 RepositoryTranslatedExampleEntity.objects.create(
119 repository_translated_example=translated,
120 **entity_data)
121 return translated
122
```
Path: `bothub/api/validators.py`
Content:
```
1 from django.utils.translation import gettext as _
2 from rest_framework.exceptions import PermissionDenied
3 from rest_framework.exceptions import ValidationError
4
5 from bothub.common.models import RepositoryTranslatedExample
6
7
8 class CanContributeInRepositoryValidator(object):
9 def __call__(self, value):
10 user_authorization = value.get_user_authorization(
11 self.request.user)
12 if not user_authorization.can_contribute:
13 raise PermissionDenied(
14 _('You can\'t contribute in this repository'))
15
16 def set_context(self, serializer):
17 self.request = serializer.context.get('request')
18
19
20 class CanContributeInRepositoryExampleValidator(object):
21 def __call__(self, value):
22 repository = value.repository_update.repository
23 user_authorization = repository.get_user_authorization(
24 self.request.user)
25 if not user_authorization.can_contribute:
26 raise PermissionDenied(
27 _('You can\'t contribute in this repository'))
28
29 def set_context(self, serializer):
30 self.request = serializer.context.get('request')
31
32
33 class CanContributeInRepositoryTranslatedExampleValidator(object):
34 def __call__(self, value):
35 repository = value.original_example.repository_update.repository
36 user_authorization = repository.get_user_authorization(
37 self.request.user)
38 if not user_authorization.can_contribute:
39 raise PermissionDenied(
40 _('You can\'t contribute in this repository'))
41
42 def set_context(self, serializer):
43 self.request = serializer.context.get('request')
44
45
46 class TranslatedExampleEntitiesValidator(object):
47 def __call__(self, attrs):
48 original_example = attrs.get('original_example')
49 entities_valid = RepositoryTranslatedExample.same_entities_validator(
50 list(map(lambda x: dict(x), attrs.get('entities'))),
51 list(map(lambda x: x.to_dict, original_example.entities.all())))
52 if not entities_valid:
53 raise ValidationError({'entities': _('Invalid entities')})
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bothub/api/serializers/translate.py b/bothub/api/serializers/translate.py
--- a/bothub/api/serializers/translate.py
+++ b/bothub/api/serializers/translate.py
@@ -9,6 +9,7 @@
from ..validators import CanContributeInRepositoryTranslatedExampleValidator
from ..validators import CanContributeInRepositoryExampleValidator
from ..validators import TranslatedExampleEntitiesValidator
+from ..validators import TranslatedExampleLanguageValidator
class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):
@@ -95,6 +96,7 @@
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validators.append(TranslatedExampleEntitiesValidator())
+ self.validators.append(TranslatedExampleLanguageValidator())
original_example = serializers.PrimaryKeyRelatedField(
queryset=RepositoryExample.objects,
diff --git a/bothub/api/validators.py b/bothub/api/validators.py
--- a/bothub/api/validators.py
+++ b/bothub/api/validators.py
@@ -51,3 +51,11 @@
list(map(lambda x: x.to_dict, original_example.entities.all())))
if not entities_valid:
raise ValidationError({'entities': _('Invalid entities')})
+
+
+class TranslatedExampleLanguageValidator(object):
+ def __call__(self, attrs):
+ original_example = attrs.get('original_example')
+ language = attrs.get('language')
+ if original_example.repository_update.language == language:
+ raise ValidationError({'language': _('Can\'t translate to same language')})
| {"golden_diff": "diff --git a/bothub/api/serializers/translate.py b/bothub/api/serializers/translate.py\n--- a/bothub/api/serializers/translate.py\n+++ b/bothub/api/serializers/translate.py\n@@ -9,6 +9,7 @@\n from ..validators import CanContributeInRepositoryTranslatedExampleValidator\n from ..validators import CanContributeInRepositoryExampleValidator\n from ..validators import TranslatedExampleEntitiesValidator\n+from ..validators import TranslatedExampleLanguageValidator\n \n \n class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n@@ -95,6 +96,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n+ self.validators.append(TranslatedExampleLanguageValidator())\n \n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\ndiff --git a/bothub/api/validators.py b/bothub/api/validators.py\n--- a/bothub/api/validators.py\n+++ b/bothub/api/validators.py\n@@ -51,3 +51,11 @@\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n+\n+\n+class TranslatedExampleLanguageValidator(object):\n+ def __call__(self, attrs):\n+ original_example = attrs.get('original_example')\n+ language = attrs.get('language')\n+ if original_example.repository_update.language == language:\n+ raise ValidationError({'language': _('Can\\'t translate to same language')})\n", "issue": "Is possible translate example to same language\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom django.utils.translation import gettext as _\n\nfrom bothub.common.models import RepositoryTranslatedExampleEntity\nfrom bothub.common.models import RepositoryTranslatedExample\nfrom bothub.common.models import RepositoryExample\n\nfrom ..validators import CanContributeInRepositoryTranslatedExampleValidator\nfrom ..validators import CanContributeInRepositoryExampleValidator\nfrom ..validators import TranslatedExampleEntitiesValidator\n\n\nclass RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'id',\n 'repository_translated_example',\n 'start',\n 'end',\n 'entity',\n 'created_at',\n 'value',\n ]\n\n repository_translated_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryTranslatedExample.objects,\n validators=[\n CanContributeInRepositoryTranslatedExampleValidator(),\n ],\n help_text='Example translation ID')\n value = serializers.SerializerMethodField()\n\n def get_value(self, obj):\n return obj.value\n\n\nclass RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'from_language',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n 'created_at',\n ]\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n from_language = serializers.SerializerMethodField()\n has_valid_entities = serializers.SerializerMethodField()\n entities = RepositoryTranslatedExampleEntitySeralizer(\n many=True,\n read_only=True)\n\n def get_from_language(self, obj):\n return obj.original_example.repository_update.language\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n\nclass NewRepositoryTranslatedExampleEntitySeralizer(\n serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'start',\n 'end',\n 'entity',\n ]\n\n\nclass NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n has_valid_entities = serializers.SerializerMethodField()\n entities = NewRepositoryTranslatedExampleEntitySeralizer(\n many=True,\n style={'text_field': 'text'})\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n def create(self, validated_data):\n entities_data = validated_data.pop('entities')\n\n translated = self.Meta.model.objects.create(**validated_data)\n for entity_data in entities_data:\n RepositoryTranslatedExampleEntity.objects.create(\n repository_translated_example=translated,\n **entity_data)\n return translated\n", "path": "bothub/api/serializers/translate.py"}, {"content": "from django.utils.translation import gettext as _\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.exceptions import ValidationError\n\nfrom bothub.common.models import RepositoryTranslatedExample\n\n\nclass CanContributeInRepositoryValidator(object):\n def __call__(self, value):\n user_authorization = value.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryExampleValidator(object):\n def __call__(self, value):\n repository = value.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryTranslatedExampleValidator(object):\n def __call__(self, value):\n repository = value.original_example.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass TranslatedExampleEntitiesValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n entities_valid = RepositoryTranslatedExample.same_entities_validator(\n list(map(lambda x: dict(x), attrs.get('entities'))),\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n", "path": "bothub/api/validators.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom django.utils.translation import gettext as _\n\nfrom bothub.common.models import RepositoryTranslatedExampleEntity\nfrom bothub.common.models import RepositoryTranslatedExample\nfrom bothub.common.models import RepositoryExample\n\nfrom ..validators import CanContributeInRepositoryTranslatedExampleValidator\nfrom ..validators import CanContributeInRepositoryExampleValidator\nfrom ..validators import TranslatedExampleEntitiesValidator\nfrom ..validators import TranslatedExampleLanguageValidator\n\n\nclass RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'id',\n 'repository_translated_example',\n 'start',\n 'end',\n 'entity',\n 'created_at',\n 'value',\n ]\n\n repository_translated_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryTranslatedExample.objects,\n validators=[\n CanContributeInRepositoryTranslatedExampleValidator(),\n ],\n help_text='Example translation ID')\n value = serializers.SerializerMethodField()\n\n def get_value(self, obj):\n return obj.value\n\n\nclass RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'from_language',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n 'created_at',\n ]\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n from_language = serializers.SerializerMethodField()\n has_valid_entities = serializers.SerializerMethodField()\n entities = RepositoryTranslatedExampleEntitySeralizer(\n many=True,\n read_only=True)\n\n def get_from_language(self, obj):\n return obj.original_example.repository_update.language\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n\nclass NewRepositoryTranslatedExampleEntitySeralizer(\n serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'start',\n 'end',\n 'entity',\n ]\n\n\nclass NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n self.validators.append(TranslatedExampleLanguageValidator())\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n has_valid_entities = serializers.SerializerMethodField()\n entities = NewRepositoryTranslatedExampleEntitySeralizer(\n many=True,\n style={'text_field': 'text'})\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n def create(self, validated_data):\n entities_data = validated_data.pop('entities')\n\n translated = self.Meta.model.objects.create(**validated_data)\n for entity_data in entities_data:\n RepositoryTranslatedExampleEntity.objects.create(\n repository_translated_example=translated,\n **entity_data)\n return translated\n", "path": "bothub/api/serializers/translate.py"}, {"content": "from django.utils.translation import gettext as _\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.exceptions import ValidationError\n\nfrom bothub.common.models import RepositoryTranslatedExample\n\n\nclass CanContributeInRepositoryValidator(object):\n def __call__(self, value):\n user_authorization = value.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryExampleValidator(object):\n def __call__(self, value):\n repository = value.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryTranslatedExampleValidator(object):\n def __call__(self, value):\n repository = value.original_example.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass TranslatedExampleEntitiesValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n entities_valid = RepositoryTranslatedExample.same_entities_validator(\n list(map(lambda x: dict(x), attrs.get('entities'))),\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n\n\nclass TranslatedExampleLanguageValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n language = attrs.get('language')\n if original_example.repository_update.language == language:\n raise ValidationError({'language': _('Can\\'t translate to same language')})\n", "path": "bothub/api/validators.py"}]} | 1,713 | 347 |
gh_patches_debug_18321 | rasdani/github-patches | git_diff | crytic__slither-2394 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
filter `name-reused` to only run on Truffle projects
The detector should check which platform was used with https://crytic.github.io/crytic-compile/crytic_compile/crytic_compile.html#CryticCompile.platform and https://crytic.github.io/slither/slither/core/compilation_unit.html#SlitherCompilationUnit.crytic_compile
https://github.com/crytic/slither/blob/13d7d9f66a6be4f798478fa3735fb63444b46c3d/slither/detectors/slither/name_reused.py#L51-L61
https://github.com/crytic/crytic-compile/blob/b5c538aaa66be44b7a68d9723881a7eba2c20898/crytic_compile/platform/truffle.py#L83-L90
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `slither/detectors/slither/name_reused.py`
Content:
```
1 from collections import defaultdict
2 from typing import List
3
4 from slither.core.compilation_unit import SlitherCompilationUnit
5 from slither.core.declarations import Contract
6 from slither.detectors.abstract_detector import (
7 AbstractDetector,
8 DetectorClassification,
9 DETECTOR_INFO,
10 )
11 from slither.utils.output import Output
12
13
14 def _find_missing_inheritance(compilation_unit: SlitherCompilationUnit) -> List[Contract]:
15 """
16 Filter contracts with missing inheritance to return only the "most base" contracts
17 in the inheritance tree.
18 :param slither:
19 :return:
20 """
21 missings = compilation_unit.contracts_with_missing_inheritance
22
23 ret = []
24 for b in missings:
25 is_most_base = True
26 for inheritance in b.immediate_inheritance:
27 if inheritance in missings:
28 is_most_base = False
29 if is_most_base:
30 ret.append(b)
31
32 return ret
33
34
35 class NameReused(AbstractDetector):
36 ARGUMENT = "name-reused"
37 HELP = "Contract's name reused"
38 IMPACT = DetectorClassification.HIGH
39 CONFIDENCE = DetectorClassification.HIGH
40
41 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#name-reused"
42
43 WIKI_TITLE = "Name reused"
44
45 # region wiki_description
46 WIKI_DESCRIPTION = """If a codebase has two contracts the similar names, the compilation artifacts
47 will not contain one of the contracts with the duplicate name."""
48 # endregion wiki_description
49
50 # region wiki_exploit_scenario
51 WIKI_EXPLOIT_SCENARIO = """
52 Bob's `truffle` codebase has two contracts named `ERC20`.
53 When `truffle compile` runs, only one of the two contracts will generate artifacts in `build/contracts`.
54 As a result, the second contract cannot be analyzed.
55 """
56 # endregion wiki_exploit_scenario
57
58 WIKI_RECOMMENDATION = "Rename the contract."
59
60 # pylint: disable=too-many-locals,too-many-branches
61 def _detect(self) -> List[Output]:
62 results = []
63 compilation_unit = self.compilation_unit
64
65 all_contracts = compilation_unit.contracts
66 all_contracts_name = [c.name for c in all_contracts]
67 contracts_name_reused = {
68 contract for contract in all_contracts_name if all_contracts_name.count(contract) > 1
69 }
70
71 names_reused = {
72 name: compilation_unit.get_contract_from_name(name) for name in contracts_name_reused
73 }
74
75 # First show the contracts that we know are missing
76 incorrectly_constructed = [
77 contract
78 for contract in compilation_unit.contracts
79 if contract.is_incorrectly_constructed
80 ]
81
82 inheritance_corrupted = defaultdict(list)
83 for contract in incorrectly_constructed:
84 for father in contract.inheritance:
85 inheritance_corrupted[father.name].append(contract)
86
87 for contract_name, files in names_reused.items():
88 info: DETECTOR_INFO = [contract_name, " is re-used:\n"]
89 for file in files:
90 if file is None:
91 info += ["\t- In an file not found, most likely in\n"]
92 else:
93 info += ["\t- ", file, "\n"]
94
95 if contract_name in inheritance_corrupted:
96 info += ["\tAs a result, the inherited contracts are not correctly analyzed:\n"]
97 for corrupted in inheritance_corrupted[contract_name]:
98 info += ["\t\t- ", corrupted, "\n"]
99 res = self.generate_result(info)
100 results.append(res)
101
102 # Then show the contracts for which one of the father was not found
103 # Here we are not able to know
104 most_base_with_missing_inheritance = _find_missing_inheritance(compilation_unit)
105
106 for b in most_base_with_missing_inheritance:
107 info = [b, " inherits from a contract for which the name is reused.\n"]
108 if b.inheritance:
109 info += ["\t- Slither could not determine which contract has a duplicate name:\n"]
110 for inheritance in b.inheritance:
111 info += ["\t\t-", inheritance, "\n"]
112 info += ["\t- Check if:\n"]
113 info += ["\t\t- A inherited contract is missing from this list,\n"]
114 info += ["\t\t- The contract are imported from the correct files.\n"]
115 if b.derived_contracts:
116 info += [f"\t- This issue impacts the contracts inheriting from {b.name}:\n"]
117 for derived in b.derived_contracts:
118 info += ["\t\t-", derived, "\n"]
119 res = self.generate_result(info)
120 results.append(res)
121 return results
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/slither/detectors/slither/name_reused.py b/slither/detectors/slither/name_reused.py
--- a/slither/detectors/slither/name_reused.py
+++ b/slither/detectors/slither/name_reused.py
@@ -1,6 +1,8 @@
from collections import defaultdict
from typing import List
+from crytic_compile.platform import Type as PlatformType
+
from slither.core.compilation_unit import SlitherCompilationUnit
from slither.core.declarations import Contract
from slither.detectors.abstract_detector import (
@@ -61,6 +63,8 @@
def _detect(self) -> List[Output]:
results = []
compilation_unit = self.compilation_unit
+ if compilation_unit.core.crytic_compile.platform != PlatformType.TRUFFLE:
+ return []
all_contracts = compilation_unit.contracts
all_contracts_name = [c.name for c in all_contracts]
| {"golden_diff": "diff --git a/slither/detectors/slither/name_reused.py b/slither/detectors/slither/name_reused.py\n--- a/slither/detectors/slither/name_reused.py\n+++ b/slither/detectors/slither/name_reused.py\n@@ -1,6 +1,8 @@\n from collections import defaultdict\n from typing import List\n \n+from crytic_compile.platform import Type as PlatformType\n+\n from slither.core.compilation_unit import SlitherCompilationUnit\n from slither.core.declarations import Contract\n from slither.detectors.abstract_detector import (\n@@ -61,6 +63,8 @@\n def _detect(self) -> List[Output]:\n results = []\n compilation_unit = self.compilation_unit\n+ if compilation_unit.core.crytic_compile.platform != PlatformType.TRUFFLE:\n+ return []\n \n all_contracts = compilation_unit.contracts\n all_contracts_name = [c.name for c in all_contracts]\n", "issue": "filter `name-reused` to only run on Truffle projects\nThe detector should check which platform was used with https://crytic.github.io/crytic-compile/crytic_compile/crytic_compile.html#CryticCompile.platform and https://crytic.github.io/slither/slither/core/compilation_unit.html#SlitherCompilationUnit.crytic_compile \r\nhttps://github.com/crytic/slither/blob/13d7d9f66a6be4f798478fa3735fb63444b46c3d/slither/detectors/slither/name_reused.py#L51-L61\r\n\r\nhttps://github.com/crytic/crytic-compile/blob/b5c538aaa66be44b7a68d9723881a7eba2c20898/crytic_compile/platform/truffle.py#L83-L90\n", "before_files": [{"content": "from collections import defaultdict\nfrom typing import List\n\nfrom slither.core.compilation_unit import SlitherCompilationUnit\nfrom slither.core.declarations import Contract\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.utils.output import Output\n\n\ndef _find_missing_inheritance(compilation_unit: SlitherCompilationUnit) -> List[Contract]:\n \"\"\"\n Filter contracts with missing inheritance to return only the \"most base\" contracts\n in the inheritance tree.\n :param slither:\n :return:\n \"\"\"\n missings = compilation_unit.contracts_with_missing_inheritance\n\n ret = []\n for b in missings:\n is_most_base = True\n for inheritance in b.immediate_inheritance:\n if inheritance in missings:\n is_most_base = False\n if is_most_base:\n ret.append(b)\n\n return ret\n\n\nclass NameReused(AbstractDetector):\n ARGUMENT = \"name-reused\"\n HELP = \"Contract's name reused\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#name-reused\"\n\n WIKI_TITLE = \"Name reused\"\n\n # region wiki_description\n WIKI_DESCRIPTION = \"\"\"If a codebase has two contracts the similar names, the compilation artifacts\nwill not contain one of the contracts with the duplicate name.\"\"\"\n # endregion wiki_description\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\nBob's `truffle` codebase has two contracts named `ERC20`.\nWhen `truffle compile` runs, only one of the two contracts will generate artifacts in `build/contracts`.\nAs a result, the second contract cannot be analyzed.\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Rename the contract.\"\n\n # pylint: disable=too-many-locals,too-many-branches\n def _detect(self) -> List[Output]:\n results = []\n compilation_unit = self.compilation_unit\n\n all_contracts = compilation_unit.contracts\n all_contracts_name = [c.name for c in all_contracts]\n contracts_name_reused = {\n contract for contract in all_contracts_name if all_contracts_name.count(contract) > 1\n }\n\n names_reused = {\n name: compilation_unit.get_contract_from_name(name) for name in contracts_name_reused\n }\n\n # First show the contracts that we know are missing\n incorrectly_constructed = [\n contract\n for contract in compilation_unit.contracts\n if contract.is_incorrectly_constructed\n ]\n\n inheritance_corrupted = defaultdict(list)\n for contract in incorrectly_constructed:\n for father in contract.inheritance:\n inheritance_corrupted[father.name].append(contract)\n\n for contract_name, files in names_reused.items():\n info: DETECTOR_INFO = [contract_name, \" is re-used:\\n\"]\n for file in files:\n if file is None:\n info += [\"\\t- In an file not found, most likely in\\n\"]\n else:\n info += [\"\\t- \", file, \"\\n\"]\n\n if contract_name in inheritance_corrupted:\n info += [\"\\tAs a result, the inherited contracts are not correctly analyzed:\\n\"]\n for corrupted in inheritance_corrupted[contract_name]:\n info += [\"\\t\\t- \", corrupted, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n\n # Then show the contracts for which one of the father was not found\n # Here we are not able to know\n most_base_with_missing_inheritance = _find_missing_inheritance(compilation_unit)\n\n for b in most_base_with_missing_inheritance:\n info = [b, \" inherits from a contract for which the name is reused.\\n\"]\n if b.inheritance:\n info += [\"\\t- Slither could not determine which contract has a duplicate name:\\n\"]\n for inheritance in b.inheritance:\n info += [\"\\t\\t-\", inheritance, \"\\n\"]\n info += [\"\\t- Check if:\\n\"]\n info += [\"\\t\\t- A inherited contract is missing from this list,\\n\"]\n info += [\"\\t\\t- The contract are imported from the correct files.\\n\"]\n if b.derived_contracts:\n info += [f\"\\t- This issue impacts the contracts inheriting from {b.name}:\\n\"]\n for derived in b.derived_contracts:\n info += [\"\\t\\t-\", derived, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n return results\n", "path": "slither/detectors/slither/name_reused.py"}], "after_files": [{"content": "from collections import defaultdict\nfrom typing import List\n\nfrom crytic_compile.platform import Type as PlatformType\n\nfrom slither.core.compilation_unit import SlitherCompilationUnit\nfrom slither.core.declarations import Contract\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.utils.output import Output\n\n\ndef _find_missing_inheritance(compilation_unit: SlitherCompilationUnit) -> List[Contract]:\n \"\"\"\n Filter contracts with missing inheritance to return only the \"most base\" contracts\n in the inheritance tree.\n :param slither:\n :return:\n \"\"\"\n missings = compilation_unit.contracts_with_missing_inheritance\n\n ret = []\n for b in missings:\n is_most_base = True\n for inheritance in b.immediate_inheritance:\n if inheritance in missings:\n is_most_base = False\n if is_most_base:\n ret.append(b)\n\n return ret\n\n\nclass NameReused(AbstractDetector):\n ARGUMENT = \"name-reused\"\n HELP = \"Contract's name reused\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#name-reused\"\n\n WIKI_TITLE = \"Name reused\"\n\n # region wiki_description\n WIKI_DESCRIPTION = \"\"\"If a codebase has two contracts the similar names, the compilation artifacts\nwill not contain one of the contracts with the duplicate name.\"\"\"\n # endregion wiki_description\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\nBob's `truffle` codebase has two contracts named `ERC20`.\nWhen `truffle compile` runs, only one of the two contracts will generate artifacts in `build/contracts`.\nAs a result, the second contract cannot be analyzed.\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Rename the contract.\"\n\n # pylint: disable=too-many-locals,too-many-branches\n def _detect(self) -> List[Output]:\n results = []\n compilation_unit = self.compilation_unit\n if compilation_unit.core.crytic_compile.platform != PlatformType.TRUFFLE:\n return []\n\n all_contracts = compilation_unit.contracts\n all_contracts_name = [c.name for c in all_contracts]\n contracts_name_reused = {\n contract for contract in all_contracts_name if all_contracts_name.count(contract) > 1\n }\n\n names_reused = {\n name: compilation_unit.get_contract_from_name(name) for name in contracts_name_reused\n }\n\n # First show the contracts that we know are missing\n incorrectly_constructed = [\n contract\n for contract in compilation_unit.contracts\n if contract.is_incorrectly_constructed\n ]\n\n inheritance_corrupted = defaultdict(list)\n for contract in incorrectly_constructed:\n for father in contract.inheritance:\n inheritance_corrupted[father.name].append(contract)\n\n for contract_name, files in names_reused.items():\n info: DETECTOR_INFO = [contract_name, \" is re-used:\\n\"]\n for file in files:\n if file is None:\n info += [\"\\t- In an file not found, most likely in\\n\"]\n else:\n info += [\"\\t- \", file, \"\\n\"]\n\n if contract_name in inheritance_corrupted:\n info += [\"\\tAs a result, the inherited contracts are not correctly analyzed:\\n\"]\n for corrupted in inheritance_corrupted[contract_name]:\n info += [\"\\t\\t- \", corrupted, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n\n # Then show the contracts for which one of the father was not found\n # Here we are not able to know\n most_base_with_missing_inheritance = _find_missing_inheritance(compilation_unit)\n\n for b in most_base_with_missing_inheritance:\n info = [b, \" inherits from a contract for which the name is reused.\\n\"]\n if b.inheritance:\n info += [\"\\t- Slither could not determine which contract has a duplicate name:\\n\"]\n for inheritance in b.inheritance:\n info += [\"\\t\\t-\", inheritance, \"\\n\"]\n info += [\"\\t- Check if:\\n\"]\n info += [\"\\t\\t- A inherited contract is missing from this list,\\n\"]\n info += [\"\\t\\t- The contract are imported from the correct files.\\n\"]\n if b.derived_contracts:\n info += [f\"\\t- This issue impacts the contracts inheriting from {b.name}:\\n\"]\n for derived in b.derived_contracts:\n info += [\"\\t\\t-\", derived, \"\\n\"]\n res = self.generate_result(info)\n results.append(res)\n return results\n", "path": "slither/detectors/slither/name_reused.py"}]} | 1,745 | 205 |
gh_patches_debug_64325 | rasdani/github-patches | git_diff | pex-tool__pex-1725 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.80
On the docket:
+ [x] Support booting via `/bin/sh` with `--sh-boot`. (#1721)
+ [x] Fix more pathologic lock creation slowness. (#1723)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.79"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.79"
+__version__ = "2.1.80"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.79\"\n+__version__ = \"2.1.80\"\n", "issue": "Release 2.1.80\nOn the docket:\r\n+ [x] Support booting via `/bin/sh` with `--sh-boot`. (#1721)\r\n+ [x] Fix more pathologic lock creation slowness. (#1723)\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.79\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.80\"\n", "path": "pex/version.py"}]} | 365 | 96 |
gh_patches_debug_43869 | rasdani/github-patches | git_diff | aws__aws-cli-3331 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aws configure get and aws configure set with multiword profile names are inconsistent
It seems that `aws configure set --profile "two words"` will add single quotes around the profile name, but `aws configure get --profile "two words"` will search for a profile name that does not have single quotes around the profile name.
These two methods should behave in a similar manner.
To reproduce:
```
$ aws --version
aws-cli/1.15.10 Python/3.6.5 Darwin/17.4.0 botocore/1.10.10
$ aws configure set aws_access_key_id test --profile "test profile"
$ aws configure get aws_access_key_id --profile "test profile"
The config profile (test profile) could not be found
$ aws configure get aws_access_key_id --profile "'test profile'"
test
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/customizations/configure/set.py`
Content:
```
1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import os
14
15 from awscli.customizations.commands import BasicCommand
16 from awscli.customizations.configure.writer import ConfigFileWriter
17
18 from . import PREDEFINED_SECTION_NAMES, profile_to_section
19
20
21 class ConfigureSetCommand(BasicCommand):
22 NAME = 'set'
23 DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',
24 '_description.rst')
25 SYNOPSIS = 'aws configure set varname value [--profile profile-name]'
26 EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')
27 ARG_TABLE = [
28 {'name': 'varname',
29 'help_text': 'The name of the config value to set.',
30 'action': 'store',
31 'cli_type_name': 'string', 'positional_arg': True},
32 {'name': 'value',
33 'help_text': 'The value to set.',
34 'action': 'store',
35 'no_paramfile': True, # To disable the default paramfile behavior
36 'cli_type_name': 'string', 'positional_arg': True},
37 ]
38 # Any variables specified in this list will be written to
39 # the ~/.aws/credentials file instead of ~/.aws/config.
40 _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',
41 'aws_session_token']
42
43 def __init__(self, session, config_writer=None):
44 super(ConfigureSetCommand, self).__init__(session)
45 if config_writer is None:
46 config_writer = ConfigFileWriter()
47 self._config_writer = config_writer
48
49 def _run_main(self, args, parsed_globals):
50 varname = args.varname
51 value = args.value
52 section = 'default'
53 # Before handing things off to the config writer,
54 # we need to find out three things:
55 # 1. What section we're writing to (section).
56 # 2. The name of the config key (varname)
57 # 3. The actual value (value).
58 if '.' not in varname:
59 # unqualified name, scope it to the current
60 # profile (or leave it as the 'default' section if
61 # no profile is set).
62 if self._session.profile is not None:
63 section = profile_to_section(self._session.profile)
64 else:
65 # First figure out if it's been scoped to a profile.
66 parts = varname.split('.')
67 if parts[0] in ('default', 'profile'):
68 # Then we know we're scoped to a profile.
69 if parts[0] == 'default':
70 section = 'default'
71 remaining = parts[1:]
72 else:
73 # [profile, profile_name, ...]
74 section = profile_to_section(parts[1])
75 remaining = parts[2:]
76 varname = remaining[0]
77 if len(remaining) == 2:
78 value = {remaining[1]: value}
79 elif parts[0] not in PREDEFINED_SECTION_NAMES:
80 if self._session.profile is not None:
81 section = profile_to_section(self._session.profile)
82 else:
83 profile_name = self._session.get_config_variable('profile')
84 if profile_name is not None:
85 section = profile_name
86 varname = parts[0]
87 if len(parts) == 2:
88 value = {parts[1]: value}
89 elif len(parts) == 2:
90 # Otherwise it's something like "set preview.service true"
91 # of something in the [plugin] section.
92 section, varname = parts
93 config_filename = os.path.expanduser(
94 self._session.get_config_variable('config_file'))
95 updated_config = {'__section__': section, varname: value}
96 if varname in self._WRITE_TO_CREDS_FILE:
97 config_filename = os.path.expanduser(
98 self._session.get_config_variable('credentials_file'))
99 section_name = updated_config['__section__']
100 if section_name.startswith('profile '):
101 updated_config['__section__'] = section_name[8:]
102 self._config_writer.update_config(updated_config, config_filename)
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/customizations/configure/set.py b/awscli/customizations/configure/set.py
--- a/awscli/customizations/configure/set.py
+++ b/awscli/customizations/configure/set.py
@@ -46,13 +46,17 @@
config_writer = ConfigFileWriter()
self._config_writer = config_writer
+ def _get_config_file(self, path):
+ config_path = self._session.get_config_variable(path)
+ return os.path.expanduser(config_path)
+
def _run_main(self, args, parsed_globals):
varname = args.varname
value = args.value
- section = 'default'
+ profile = 'default'
# Before handing things off to the config writer,
# we need to find out three things:
- # 1. What section we're writing to (section).
+ # 1. What section we're writing to (profile).
# 2. The name of the config key (varname)
# 3. The actual value (value).
if '.' not in varname:
@@ -60,43 +64,44 @@
# profile (or leave it as the 'default' section if
# no profile is set).
if self._session.profile is not None:
- section = profile_to_section(self._session.profile)
+ profile = self._session.profile
else:
# First figure out if it's been scoped to a profile.
parts = varname.split('.')
if parts[0] in ('default', 'profile'):
# Then we know we're scoped to a profile.
if parts[0] == 'default':
- section = 'default'
+ profile = 'default'
remaining = parts[1:]
else:
# [profile, profile_name, ...]
- section = profile_to_section(parts[1])
+ profile = parts[1]
remaining = parts[2:]
varname = remaining[0]
if len(remaining) == 2:
value = {remaining[1]: value}
elif parts[0] not in PREDEFINED_SECTION_NAMES:
if self._session.profile is not None:
- section = profile_to_section(self._session.profile)
+ profile = self._session.profile
else:
profile_name = self._session.get_config_variable('profile')
if profile_name is not None:
- section = profile_name
+ profile = profile_name
varname = parts[0]
if len(parts) == 2:
value = {parts[1]: value}
elif len(parts) == 2:
# Otherwise it's something like "set preview.service true"
# of something in the [plugin] section.
- section, varname = parts
- config_filename = os.path.expanduser(
- self._session.get_config_variable('config_file'))
- updated_config = {'__section__': section, varname: value}
+ profile, varname = parts
+ config_filename = self._get_config_file('config_file')
if varname in self._WRITE_TO_CREDS_FILE:
- config_filename = os.path.expanduser(
- self._session.get_config_variable('credentials_file'))
- section_name = updated_config['__section__']
- if section_name.startswith('profile '):
- updated_config['__section__'] = section_name[8:]
+ # When writing to the creds file, the section is just the profile
+ section = profile
+ config_filename = self._get_config_file('credentials_file')
+ elif profile in PREDEFINED_SECTION_NAMES or profile == 'default':
+ section = profile
+ else:
+ section = profile_to_section(profile)
+ updated_config = {'__section__': section, varname: value}
self._config_writer.update_config(updated_config, config_filename)
| {"golden_diff": "diff --git a/awscli/customizations/configure/set.py b/awscli/customizations/configure/set.py\n--- a/awscli/customizations/configure/set.py\n+++ b/awscli/customizations/configure/set.py\n@@ -46,13 +46,17 @@\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n \n+ def _get_config_file(self, path):\n+ config_path = self._session.get_config_variable(path)\n+ return os.path.expanduser(config_path)\n+\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = args.value\n- section = 'default'\n+ profile = 'default'\n # Before handing things off to the config writer,\n # we need to find out three things:\n- # 1. What section we're writing to (section).\n+ # 1. What section we're writing to (profile).\n # 2. The name of the config key (varname)\n # 3. The actual value (value).\n if '.' not in varname:\n@@ -60,43 +64,44 @@\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n- section = profile_to_section(self._session.profile)\n+ profile = self._session.profile\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n if parts[0] in ('default', 'profile'):\n # Then we know we're scoped to a profile.\n if parts[0] == 'default':\n- section = 'default'\n+ profile = 'default'\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n- section = profile_to_section(parts[1])\n+ profile = parts[1]\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n- section = profile_to_section(self._session.profile)\n+ profile = self._session.profile\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n- section = profile_name\n+ profile = profile_name\n varname = parts[0]\n if len(parts) == 2:\n value = {parts[1]: value}\n elif len(parts) == 2:\n # Otherwise it's something like \"set preview.service true\"\n # of something in the [plugin] section.\n- section, varname = parts\n- config_filename = os.path.expanduser(\n- self._session.get_config_variable('config_file'))\n- updated_config = {'__section__': section, varname: value}\n+ profile, varname = parts\n+ config_filename = self._get_config_file('config_file')\n if varname in self._WRITE_TO_CREDS_FILE:\n- config_filename = os.path.expanduser(\n- self._session.get_config_variable('credentials_file'))\n- section_name = updated_config['__section__']\n- if section_name.startswith('profile '):\n- updated_config['__section__'] = section_name[8:]\n+ # When writing to the creds file, the section is just the profile\n+ section = profile\n+ config_filename = self._get_config_file('credentials_file')\n+ elif profile in PREDEFINED_SECTION_NAMES or profile == 'default':\n+ section = profile\n+ else:\n+ section = profile_to_section(profile)\n+ updated_config = {'__section__': section, varname: value}\n self._config_writer.update_config(updated_config, config_filename)\n", "issue": "aws configure get and aws configure set with multiword profile names are inconsistent\nIt seems that `aws configure set --profile \"two words\"` will add single quotes around the profile name, but `aws configure get --profile \"two words\"` will search for a profile name that does not have single quotes around the profile name.\r\n\r\nThese two methods should behave in a similar manner.\r\n\r\nTo reproduce:\r\n\r\n```\r\n$ aws --version\r\naws-cli/1.15.10 Python/3.6.5 Darwin/17.4.0 botocore/1.10.10\r\n$ aws configure set aws_access_key_id test --profile \"test profile\"\r\n$ aws configure get aws_access_key_id --profile \"test profile\"\r\nThe config profile (test profile) could not be found\r\n$ aws configure get aws_access_key_id --profile \"'test profile'\"\r\ntest\r\n```\n", "before_files": [{"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import PREDEFINED_SECTION_NAMES, profile_to_section\n\n\nclass ConfigureSetCommand(BasicCommand):\n NAME = 'set'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',\n '_description.rst')\n SYNOPSIS = 'aws configure set varname value [--profile profile-name]'\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n 'help_text': 'The name of the config value to set.',\n 'action': 'store',\n 'cli_type_name': 'string', 'positional_arg': True},\n {'name': 'value',\n 'help_text': 'The value to set.',\n 'action': 'store',\n 'no_paramfile': True, # To disable the default paramfile behavior\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n # Any variables specified in this list will be written to\n # the ~/.aws/credentials file instead of ~/.aws/config.\n _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',\n 'aws_session_token']\n\n def __init__(self, session, config_writer=None):\n super(ConfigureSetCommand, self).__init__(session)\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = args.value\n section = 'default'\n # Before handing things off to the config writer,\n # we need to find out three things:\n # 1. What section we're writing to (section).\n # 2. The name of the config key (varname)\n # 3. The actual value (value).\n if '.' not in varname:\n # unqualified name, scope it to the current\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n section = profile_to_section(self._session.profile)\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n if parts[0] in ('default', 'profile'):\n # Then we know we're scoped to a profile.\n if parts[0] == 'default':\n section = 'default'\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n section = profile_to_section(parts[1])\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n section = profile_to_section(self._session.profile)\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n section = profile_name\n varname = parts[0]\n if len(parts) == 2:\n value = {parts[1]: value}\n elif len(parts) == 2:\n # Otherwise it's something like \"set preview.service true\"\n # of something in the [plugin] section.\n section, varname = parts\n config_filename = os.path.expanduser(\n self._session.get_config_variable('config_file'))\n updated_config = {'__section__': section, varname: value}\n if varname in self._WRITE_TO_CREDS_FILE:\n config_filename = os.path.expanduser(\n self._session.get_config_variable('credentials_file'))\n section_name = updated_config['__section__']\n if section_name.startswith('profile '):\n updated_config['__section__'] = section_name[8:]\n self._config_writer.update_config(updated_config, config_filename)\n", "path": "awscli/customizations/configure/set.py"}], "after_files": [{"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\n\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import PREDEFINED_SECTION_NAMES, profile_to_section\n\n\nclass ConfigureSetCommand(BasicCommand):\n NAME = 'set'\n DESCRIPTION = BasicCommand.FROM_FILE('configure', 'set',\n '_description.rst')\n SYNOPSIS = 'aws configure set varname value [--profile profile-name]'\n EXAMPLES = BasicCommand.FROM_FILE('configure', 'set', '_examples.rst')\n ARG_TABLE = [\n {'name': 'varname',\n 'help_text': 'The name of the config value to set.',\n 'action': 'store',\n 'cli_type_name': 'string', 'positional_arg': True},\n {'name': 'value',\n 'help_text': 'The value to set.',\n 'action': 'store',\n 'no_paramfile': True, # To disable the default paramfile behavior\n 'cli_type_name': 'string', 'positional_arg': True},\n ]\n # Any variables specified in this list will be written to\n # the ~/.aws/credentials file instead of ~/.aws/config.\n _WRITE_TO_CREDS_FILE = ['aws_access_key_id', 'aws_secret_access_key',\n 'aws_session_token']\n\n def __init__(self, session, config_writer=None):\n super(ConfigureSetCommand, self).__init__(session)\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _get_config_file(self, path):\n config_path = self._session.get_config_variable(path)\n return os.path.expanduser(config_path)\n\n def _run_main(self, args, parsed_globals):\n varname = args.varname\n value = args.value\n profile = 'default'\n # Before handing things off to the config writer,\n # we need to find out three things:\n # 1. What section we're writing to (profile).\n # 2. The name of the config key (varname)\n # 3. The actual value (value).\n if '.' not in varname:\n # unqualified name, scope it to the current\n # profile (or leave it as the 'default' section if\n # no profile is set).\n if self._session.profile is not None:\n profile = self._session.profile\n else:\n # First figure out if it's been scoped to a profile.\n parts = varname.split('.')\n if parts[0] in ('default', 'profile'):\n # Then we know we're scoped to a profile.\n if parts[0] == 'default':\n profile = 'default'\n remaining = parts[1:]\n else:\n # [profile, profile_name, ...]\n profile = parts[1]\n remaining = parts[2:]\n varname = remaining[0]\n if len(remaining) == 2:\n value = {remaining[1]: value}\n elif parts[0] not in PREDEFINED_SECTION_NAMES:\n if self._session.profile is not None:\n profile = self._session.profile\n else:\n profile_name = self._session.get_config_variable('profile')\n if profile_name is not None:\n profile = profile_name\n varname = parts[0]\n if len(parts) == 2:\n value = {parts[1]: value}\n elif len(parts) == 2:\n # Otherwise it's something like \"set preview.service true\"\n # of something in the [plugin] section.\n profile, varname = parts\n config_filename = self._get_config_file('config_file')\n if varname in self._WRITE_TO_CREDS_FILE:\n # When writing to the creds file, the section is just the profile\n section = profile\n config_filename = self._get_config_file('credentials_file')\n elif profile in PREDEFINED_SECTION_NAMES or profile == 'default':\n section = profile\n else:\n section = profile_to_section(profile)\n updated_config = {'__section__': section, varname: value}\n self._config_writer.update_config(updated_config, config_filename)\n", "path": "awscli/customizations/configure/set.py"}]} | 1,641 | 833 |
gh_patches_debug_6234 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-3564 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
moderators for b-plans
if an initiator starts a b-plan via meinBerlin (as e.g. SenWohn does, they don't have imperia) or an external project he/she is automatically added as moderator and gets mails as the one below. This is confusing because:
a) you don't see moderators in dashboard
b) you can't follow a b-plan/external project
c) the link does not go to the external page (in this case it goes here: https://mein.berlin.de/projects/bebauungsplan-8-66-buckower-felder/)
Should we take out this rule for these two templates or stop sending mails?
<img width="698" alt="bildschirmfoto 2019-02-04 um 13 32 08" src="https://user-images.githubusercontent.com/35491681/52208589-762c0780-2881-11e9-9781-21826347abe4.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/notifications/signals.py`
Content:
```
1 from django.contrib.auth import get_user_model
2 from django.db.models import signals
3 from django.dispatch import receiver
4
5 from adhocracy4.actions.models import Action
6 from adhocracy4.actions.verbs import Verbs
7 from adhocracy4.dashboard import signals as dashboard_signals
8 from adhocracy4.follows.models import Follow
9 from adhocracy4.projects.models import Project
10
11 from . import emails
12
13 User = get_user_model()
14
15
16 @receiver(signals.post_save, sender=Action)
17 def send_notifications(instance, created, **kwargs):
18 action = instance
19 verb = Verbs(action.verb)
20
21 if action.type in ('item', 'comment') \
22 and verb in (Verbs.CREATE, Verbs.ADD):
23 emails.NotifyCreatorEmail.send(action)
24
25 if action.project:
26 emails.NotifyModeratorsEmail.send(action)
27
28 elif action.type == 'phase':
29 if verb == Verbs.START:
30 emails.NotifyFollowersOnPhaseStartedEmail.send(action)
31 elif verb == Verbs.SCHEDULE:
32 emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)
33
34 elif action.type == 'offlineevent' and verb == Verbs.START:
35 emails.NotifyFollowersOnUpcommingEventEmail.send(action)
36
37
38 @receiver(dashboard_signals.project_created)
39 def send_project_created_notifications(**kwargs):
40 project = kwargs.get('project')
41 creator = kwargs.get('user')
42 emails.NotifyInitiatorsOnProjectCreatedEmail.send(
43 project, creator_pk=creator.pk)
44
45
46 @receiver(signals.m2m_changed, sender=Project.moderators.through)
47 def autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):
48 if action == 'post_add':
49 autofollow_project(instance, pk_set, reverse)
50
51
52 def autofollow_project(instance, pk_set, reverse):
53 if not reverse:
54 project = instance
55 users_pks = pk_set
56
57 for user_pk in users_pks:
58 Follow.objects.update_or_create(
59 project=project,
60 creator_id=user_pk,
61 defaults={
62 'enabled': True
63 }
64 )
65 else:
66 user = instance
67 project_pks = pk_set
68
69 for project_pk in project_pks:
70 Follow.objects.update_or_create(
71 project_id=project_pk,
72 creator=user,
73 defaults={
74 'enabled': True
75 }
76 )
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/notifications/signals.py b/meinberlin/apps/notifications/signals.py
--- a/meinberlin/apps/notifications/signals.py
+++ b/meinberlin/apps/notifications/signals.py
@@ -25,7 +25,8 @@
if action.project:
emails.NotifyModeratorsEmail.send(action)
- elif action.type == 'phase':
+ elif (action.type == 'phase' and
+ action.project.project_type == 'a4projects.Project'):
if verb == Verbs.START:
emails.NotifyFollowersOnPhaseStartedEmail.send(action)
elif verb == Verbs.SCHEDULE:
| {"golden_diff": "diff --git a/meinberlin/apps/notifications/signals.py b/meinberlin/apps/notifications/signals.py\n--- a/meinberlin/apps/notifications/signals.py\n+++ b/meinberlin/apps/notifications/signals.py\n@@ -25,7 +25,8 @@\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n \n- elif action.type == 'phase':\n+ elif (action.type == 'phase' and\n+ action.project.project_type == 'a4projects.Project'):\n if verb == Verbs.START:\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n elif verb == Verbs.SCHEDULE:\n", "issue": "moderators for b-plans\nif an initiator starts a b-plan via meinBerlin (as e.g. SenWohn does, they don't have imperia) or an external project he/she is automatically added as moderator and gets mails as the one below. This is confusing because:\r\na) you don't see moderators in dashboard\r\nb) you can't follow a b-plan/external project\r\nc) the link does not go to the external page (in this case it goes here: https://mein.berlin.de/projects/bebauungsplan-8-66-buckower-felder/)\r\n\r\nShould we take out this rule for these two templates or stop sending mails?\r\n\r\n\r\n<img width=\"698\" alt=\"bildschirmfoto 2019-02-04 um 13 32 08\" src=\"https://user-images.githubusercontent.com/35491681/52208589-762c0780-2881-11e9-9781-21826347abe4.png\">\r\n\n", "before_files": [{"content": "from django.contrib.auth import get_user_model\nfrom django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom adhocracy4.dashboard import signals as dashboard_signals\nfrom adhocracy4.follows.models import Follow\nfrom adhocracy4.projects.models import Project\n\nfrom . import emails\n\nUser = get_user_model()\n\n\n@receiver(signals.post_save, sender=Action)\ndef send_notifications(instance, created, **kwargs):\n action = instance\n verb = Verbs(action.verb)\n\n if action.type in ('item', 'comment') \\\n and verb in (Verbs.CREATE, Verbs.ADD):\n emails.NotifyCreatorEmail.send(action)\n\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n\n elif action.type == 'phase':\n if verb == Verbs.START:\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n elif verb == Verbs.SCHEDULE:\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n\n elif action.type == 'offlineevent' and verb == Verbs.START:\n emails.NotifyFollowersOnUpcommingEventEmail.send(action)\n\n\n@receiver(dashboard_signals.project_created)\ndef send_project_created_notifications(**kwargs):\n project = kwargs.get('project')\n creator = kwargs.get('user')\n emails.NotifyInitiatorsOnProjectCreatedEmail.send(\n project, creator_pk=creator.pk)\n\n\n@receiver(signals.m2m_changed, sender=Project.moderators.through)\ndef autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\n if action == 'post_add':\n autofollow_project(instance, pk_set, reverse)\n\n\ndef autofollow_project(instance, pk_set, reverse):\n if not reverse:\n project = instance\n users_pks = pk_set\n\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project,\n creator_id=user_pk,\n defaults={\n 'enabled': True\n }\n )\n else:\n user = instance\n project_pks = pk_set\n\n for project_pk in project_pks:\n Follow.objects.update_or_create(\n project_id=project_pk,\n creator=user,\n defaults={\n 'enabled': True\n }\n )\n", "path": "meinberlin/apps/notifications/signals.py"}], "after_files": [{"content": "from django.contrib.auth import get_user_model\nfrom django.db.models import signals\nfrom django.dispatch import receiver\n\nfrom adhocracy4.actions.models import Action\nfrom adhocracy4.actions.verbs import Verbs\nfrom adhocracy4.dashboard import signals as dashboard_signals\nfrom adhocracy4.follows.models import Follow\nfrom adhocracy4.projects.models import Project\n\nfrom . import emails\n\nUser = get_user_model()\n\n\n@receiver(signals.post_save, sender=Action)\ndef send_notifications(instance, created, **kwargs):\n action = instance\n verb = Verbs(action.verb)\n\n if action.type in ('item', 'comment') \\\n and verb in (Verbs.CREATE, Verbs.ADD):\n emails.NotifyCreatorEmail.send(action)\n\n if action.project:\n emails.NotifyModeratorsEmail.send(action)\n\n elif (action.type == 'phase' and\n action.project.project_type == 'a4projects.Project'):\n if verb == Verbs.START:\n emails.NotifyFollowersOnPhaseStartedEmail.send(action)\n elif verb == Verbs.SCHEDULE:\n emails.NotifyFollowersOnPhaseIsOverSoonEmail.send(action)\n\n elif action.type == 'offlineevent' and verb == Verbs.START:\n emails.NotifyFollowersOnUpcommingEventEmail.send(action)\n\n\n@receiver(dashboard_signals.project_created)\ndef send_project_created_notifications(**kwargs):\n project = kwargs.get('project')\n creator = kwargs.get('user')\n emails.NotifyInitiatorsOnProjectCreatedEmail.send(\n project, creator_pk=creator.pk)\n\n\n@receiver(signals.m2m_changed, sender=Project.moderators.through)\ndef autofollow_project_moderators(instance, action, pk_set, reverse, **kwargs):\n if action == 'post_add':\n autofollow_project(instance, pk_set, reverse)\n\n\ndef autofollow_project(instance, pk_set, reverse):\n if not reverse:\n project = instance\n users_pks = pk_set\n\n for user_pk in users_pks:\n Follow.objects.update_or_create(\n project=project,\n creator_id=user_pk,\n defaults={\n 'enabled': True\n }\n )\n else:\n user = instance\n project_pks = pk_set\n\n for project_pk in project_pks:\n Follow.objects.update_or_create(\n project_id=project_pk,\n creator=user,\n defaults={\n 'enabled': True\n }\n )\n", "path": "meinberlin/apps/notifications/signals.py"}]} | 1,152 | 142 |
gh_patches_debug_16491 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-734 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
E0000 found unknown escape character ‘/’
version:1:1
cfn-lint --template vpc.cf.json
E0000 found unknown escape character ‘/’
vpc.cf.json:12:135
this is the string that it says container the escape character error. this however works fine when deployed to the CFN service.
"^([0-9]{1,3}\\.){3}[0-9]{1,3}(\\\/([0-9]|[1-2][0-9]|3[0-2]))?$"

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/decode/__init__.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import sys
18 import logging
19 import six
20 try:
21 from json.decoder import JSONDecodeError
22 except ImportError:
23 JSONDecodeError = ValueError
24 from yaml.parser import ParserError, ScannerError
25 from yaml import YAMLError
26 import cfnlint.decode.cfn_yaml
27 import cfnlint.decode.cfn_json
28
29
30 LOGGER = logging.getLogger(__name__)
31
32
33 def decode(filename, ignore_bad_template):
34 """
35 Decode filename into an object
36 """
37 template = None
38 matches = []
39 try:
40 template = cfnlint.decode.cfn_yaml.load(filename)
41 except IOError as e:
42 if e.errno == 2:
43 LOGGER.error('Template file not found: %s', filename)
44 matches.append(create_match_file_error(filename, 'Template file not found: %s' % filename))
45 elif e.errno == 21:
46 LOGGER.error('Template references a directory, not a file: %s', filename)
47 matches.append(create_match_file_error(filename, 'Template references a directory, not a file: %s' % filename))
48 elif e.errno == 13:
49 LOGGER.error('Permission denied when accessing template file: %s', filename)
50 matches.append(create_match_file_error(filename, 'Permission denied when accessing template file: %s' % filename))
51
52 if matches:
53 return(None, matches)
54 except UnicodeDecodeError as err:
55 LOGGER.error('Cannot read file contents: %s', filename)
56 matches.append(create_match_file_error(filename, 'Cannot read file contents: %s' % filename))
57 except cfnlint.decode.cfn_yaml.CfnParseError as err:
58 err.match.Filename = filename
59 matches = [err.match]
60
61 except ParserError as err:
62 matches = [create_match_yaml_parser_error(err, filename)]
63 except ScannerError as err:
64 if err.problem == 'found character \'\\t\' that cannot start any token':
65 try:
66 template = cfnlint.decode.cfn_json.load(filename)
67 except cfnlint.decode.cfn_json.JSONDecodeError as json_err:
68 json_err.match.filename = filename
69 matches = [json_err.match]
70 except JSONDecodeError as json_err:
71 matches = [create_match_json_parser_error(json_err, filename)]
72 except Exception as json_err: # pylint: disable=W0703
73 if ignore_bad_template:
74 LOGGER.info('Template %s is malformed: %s', filename, err.problem)
75 LOGGER.info('Tried to parse %s as JSON but got error: %s', filename, str(json_err))
76 else:
77 LOGGER.error('Template %s is malformed: %s', filename, err.problem)
78 LOGGER.error('Tried to parse %s as JSON but got error: %s', filename, str(json_err))
79 return(None, [create_match_file_error(filename, 'Tried to parse %s as JSON but got error: %s' % (filename, str(json_err)))])
80 else:
81 matches = [create_match_yaml_parser_error(err, filename)]
82 except YAMLError as err:
83 matches = [create_match_file_error(filename, err)]
84
85 if not isinstance(template, dict) and not matches:
86 # Template isn't a dict which means nearly nothing will work
87 matches = [cfnlint.Match(1, 1, 1, 1, filename, cfnlint.ParseError(), message='Template needs to be an object.')]
88 return (template, matches)
89
90
91 def create_match_yaml_parser_error(parser_error, filename):
92 """Create a Match for a parser error"""
93 lineno = parser_error.problem_mark.line + 1
94 colno = parser_error.problem_mark.column + 1
95 msg = parser_error.problem
96 return cfnlint.Match(
97 lineno, colno, lineno, colno + 1, filename,
98 cfnlint.ParseError(), message=msg)
99
100
101 def create_match_file_error(filename, msg):
102 """Create a Match for a parser error"""
103 return cfnlint.Match(
104 linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2,
105 filename=filename, rule=cfnlint.ParseError(), message=msg)
106
107
108 def create_match_json_parser_error(parser_error, filename):
109 """Create a Match for a parser error"""
110 if sys.version_info[0] == 3:
111 lineno = parser_error.lineno
112 colno = parser_error.colno
113 msg = parser_error.msg
114 elif sys.version_info[0] == 2:
115 lineno = 1
116 colno = 1
117 msg = parser_error.message
118 return cfnlint.Match(
119 lineno, colno, lineno, colno + 1, filename, cfnlint.ParseError(), message=msg)
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/decode/__init__.py b/src/cfnlint/decode/__init__.py
--- a/src/cfnlint/decode/__init__.py
+++ b/src/cfnlint/decode/__init__.py
@@ -57,11 +57,12 @@
except cfnlint.decode.cfn_yaml.CfnParseError as err:
err.match.Filename = filename
matches = [err.match]
-
except ParserError as err:
matches = [create_match_yaml_parser_error(err, filename)]
except ScannerError as err:
- if err.problem == 'found character \'\\t\' that cannot start any token':
+ if err.problem in [
+ 'found character \'\\t\' that cannot start any token',
+ 'found unknown escape character']:
try:
template = cfnlint.decode.cfn_json.load(filename)
except cfnlint.decode.cfn_json.JSONDecodeError as json_err:
| {"golden_diff": "diff --git a/src/cfnlint/decode/__init__.py b/src/cfnlint/decode/__init__.py\n--- a/src/cfnlint/decode/__init__.py\n+++ b/src/cfnlint/decode/__init__.py\n@@ -57,11 +57,12 @@\n except cfnlint.decode.cfn_yaml.CfnParseError as err:\n err.match.Filename = filename\n matches = [err.match]\n-\n except ParserError as err:\n matches = [create_match_yaml_parser_error(err, filename)]\n except ScannerError as err:\n- if err.problem == 'found character \\'\\\\t\\' that cannot start any token':\n+ if err.problem in [\n+ 'found character \\'\\\\t\\' that cannot start any token',\n+ 'found unknown escape character']:\n try:\n template = cfnlint.decode.cfn_json.load(filename)\n except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\n", "issue": "E0000 found unknown escape character \u2018/\u2019\nversion:1:1\r\n\r\ncfn-lint --template vpc.cf.json\r\nE0000 found unknown escape character \u2018/\u2019\r\nvpc.cf.json:12:135\r\n\r\nthis is the string that it says container the escape character error. this however works fine when deployed to the CFN service. \r\n\r\n\"^([0-9]{1,3}\\\\.){3}[0-9]{1,3}(\\\\\\/([0-9]|[1-2][0-9]|3[0-2]))?$\"\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport sys\nimport logging\nimport six\ntry:\n from json.decoder import JSONDecodeError\nexcept ImportError:\n JSONDecodeError = ValueError\nfrom yaml.parser import ParserError, ScannerError\nfrom yaml import YAMLError\nimport cfnlint.decode.cfn_yaml\nimport cfnlint.decode.cfn_json\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef decode(filename, ignore_bad_template):\n \"\"\"\n Decode filename into an object\n \"\"\"\n template = None\n matches = []\n try:\n template = cfnlint.decode.cfn_yaml.load(filename)\n except IOError as e:\n if e.errno == 2:\n LOGGER.error('Template file not found: %s', filename)\n matches.append(create_match_file_error(filename, 'Template file not found: %s' % filename))\n elif e.errno == 21:\n LOGGER.error('Template references a directory, not a file: %s', filename)\n matches.append(create_match_file_error(filename, 'Template references a directory, not a file: %s' % filename))\n elif e.errno == 13:\n LOGGER.error('Permission denied when accessing template file: %s', filename)\n matches.append(create_match_file_error(filename, 'Permission denied when accessing template file: %s' % filename))\n\n if matches:\n return(None, matches)\n except UnicodeDecodeError as err:\n LOGGER.error('Cannot read file contents: %s', filename)\n matches.append(create_match_file_error(filename, 'Cannot read file contents: %s' % filename))\n except cfnlint.decode.cfn_yaml.CfnParseError as err:\n err.match.Filename = filename\n matches = [err.match]\n\n except ParserError as err:\n matches = [create_match_yaml_parser_error(err, filename)]\n except ScannerError as err:\n if err.problem == 'found character \\'\\\\t\\' that cannot start any token':\n try:\n template = cfnlint.decode.cfn_json.load(filename)\n except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\n json_err.match.filename = filename\n matches = [json_err.match]\n except JSONDecodeError as json_err:\n matches = [create_match_json_parser_error(json_err, filename)]\n except Exception as json_err: # pylint: disable=W0703\n if ignore_bad_template:\n LOGGER.info('Template %s is malformed: %s', filename, err.problem)\n LOGGER.info('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n else:\n LOGGER.error('Template %s is malformed: %s', filename, err.problem)\n LOGGER.error('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n return(None, [create_match_file_error(filename, 'Tried to parse %s as JSON but got error: %s' % (filename, str(json_err)))])\n else:\n matches = [create_match_yaml_parser_error(err, filename)]\n except YAMLError as err:\n matches = [create_match_file_error(filename, err)]\n\n if not isinstance(template, dict) and not matches:\n # Template isn't a dict which means nearly nothing will work\n matches = [cfnlint.Match(1, 1, 1, 1, filename, cfnlint.ParseError(), message='Template needs to be an object.')]\n return (template, matches)\n\n\ndef create_match_yaml_parser_error(parser_error, filename):\n \"\"\"Create a Match for a parser error\"\"\"\n lineno = parser_error.problem_mark.line + 1\n colno = parser_error.problem_mark.column + 1\n msg = parser_error.problem\n return cfnlint.Match(\n lineno, colno, lineno, colno + 1, filename,\n cfnlint.ParseError(), message=msg)\n\n\ndef create_match_file_error(filename, msg):\n \"\"\"Create a Match for a parser error\"\"\"\n return cfnlint.Match(\n linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2,\n filename=filename, rule=cfnlint.ParseError(), message=msg)\n\n\ndef create_match_json_parser_error(parser_error, filename):\n \"\"\"Create a Match for a parser error\"\"\"\n if sys.version_info[0] == 3:\n lineno = parser_error.lineno\n colno = parser_error.colno\n msg = parser_error.msg\n elif sys.version_info[0] == 2:\n lineno = 1\n colno = 1\n msg = parser_error.message\n return cfnlint.Match(\n lineno, colno, lineno, colno + 1, filename, cfnlint.ParseError(), message=msg)\n", "path": "src/cfnlint/decode/__init__.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport sys\nimport logging\nimport six\ntry:\n from json.decoder import JSONDecodeError\nexcept ImportError:\n JSONDecodeError = ValueError\nfrom yaml.parser import ParserError, ScannerError\nfrom yaml import YAMLError\nimport cfnlint.decode.cfn_yaml\nimport cfnlint.decode.cfn_json\n\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef decode(filename, ignore_bad_template):\n \"\"\"\n Decode filename into an object\n \"\"\"\n template = None\n matches = []\n try:\n template = cfnlint.decode.cfn_yaml.load(filename)\n except IOError as e:\n if e.errno == 2:\n LOGGER.error('Template file not found: %s', filename)\n matches.append(create_match_file_error(filename, 'Template file not found: %s' % filename))\n elif e.errno == 21:\n LOGGER.error('Template references a directory, not a file: %s', filename)\n matches.append(create_match_file_error(filename, 'Template references a directory, not a file: %s' % filename))\n elif e.errno == 13:\n LOGGER.error('Permission denied when accessing template file: %s', filename)\n matches.append(create_match_file_error(filename, 'Permission denied when accessing template file: %s' % filename))\n\n if matches:\n return(None, matches)\n except UnicodeDecodeError as err:\n LOGGER.error('Cannot read file contents: %s', filename)\n matches.append(create_match_file_error(filename, 'Cannot read file contents: %s' % filename))\n except cfnlint.decode.cfn_yaml.CfnParseError as err:\n err.match.Filename = filename\n matches = [err.match]\n except ParserError as err:\n matches = [create_match_yaml_parser_error(err, filename)]\n except ScannerError as err:\n if err.problem in [\n 'found character \\'\\\\t\\' that cannot start any token',\n 'found unknown escape character']:\n try:\n template = cfnlint.decode.cfn_json.load(filename)\n except cfnlint.decode.cfn_json.JSONDecodeError as json_err:\n json_err.match.filename = filename\n matches = [json_err.match]\n except JSONDecodeError as json_err:\n matches = [create_match_json_parser_error(json_err, filename)]\n except Exception as json_err: # pylint: disable=W0703\n if ignore_bad_template:\n LOGGER.info('Template %s is malformed: %s', filename, err.problem)\n LOGGER.info('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n else:\n LOGGER.error('Template %s is malformed: %s', filename, err.problem)\n LOGGER.error('Tried to parse %s as JSON but got error: %s', filename, str(json_err))\n return(None, [create_match_file_error(filename, 'Tried to parse %s as JSON but got error: %s' % (filename, str(json_err)))])\n else:\n matches = [create_match_yaml_parser_error(err, filename)]\n except YAMLError as err:\n matches = [create_match_file_error(filename, err)]\n\n if not isinstance(template, dict) and not matches:\n # Template isn't a dict which means nearly nothing will work\n matches = [cfnlint.Match(1, 1, 1, 1, filename, cfnlint.ParseError(), message='Template needs to be an object.')]\n return (template, matches)\n\n\ndef create_match_yaml_parser_error(parser_error, filename):\n \"\"\"Create a Match for a parser error\"\"\"\n lineno = parser_error.problem_mark.line + 1\n colno = parser_error.problem_mark.column + 1\n msg = parser_error.problem\n return cfnlint.Match(\n lineno, colno, lineno, colno + 1, filename,\n cfnlint.ParseError(), message=msg)\n\n\ndef create_match_file_error(filename, msg):\n \"\"\"Create a Match for a parser error\"\"\"\n return cfnlint.Match(\n linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2,\n filename=filename, rule=cfnlint.ParseError(), message=msg)\n\n\ndef create_match_json_parser_error(parser_error, filename):\n \"\"\"Create a Match for a parser error\"\"\"\n if sys.version_info[0] == 3:\n lineno = parser_error.lineno\n colno = parser_error.colno\n msg = parser_error.msg\n elif sys.version_info[0] == 2:\n lineno = 1\n colno = 1\n msg = parser_error.message\n return cfnlint.Match(\n lineno, colno, lineno, colno + 1, filename, cfnlint.ParseError(), message=msg)\n", "path": "src/cfnlint/decode/__init__.py"}]} | 1,922 | 206 |
gh_patches_debug_2520 | rasdani/github-patches | git_diff | cal-itp__benefits-1215 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor Agency dynamic headline into model prop
Right now we are hardcoding the [Agency index headline PO key](https://github.com/cal-itp/benefits/blob/dev/benefits/core/views.py#L62):
```python
page = viewmodels.Page(
title=_("core.pages.agency_index.title"),
headline=_("core.pages.agency_index.mst_cc.headline"),
button=button,
classes="home",
)
```
This is fine for MST. We need to make this a dynamic key coming from an `agency` prop for the future.
## Acceptance Criteria
<!-- Remember to consider edge cases -->
- [ ] `agency_index` gets its headline from the selected `agency`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/core/views.py`
Content:
```
1 """
2 The core application: view definition for the root of the webapp.
3 """
4 from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
5 from django.template import loader
6 from django.template.response import TemplateResponse
7 from django.urls import reverse
8 from django.utils.translation import pgettext, gettext as _
9
10 from . import models, session, viewmodels
11 from .middleware import pageview_decorator
12
13 ROUTE_INDEX = "core:index"
14 ROUTE_ELIGIBILITY = "eligibility:index"
15 ROUTE_HELP = "core:help"
16 ROUTE_LOGGED_OUT = "core:logged_out"
17
18 TEMPLATE_INDEX = "core/index.html"
19 TEMPLATE_AGENCY = "core/agency_index.html"
20 TEMPLATE_HELP = "core/help.html"
21 TEMPLATE_LOGGED_OUT = "core/logged_out.html"
22
23
24 @pageview_decorator
25 def index(request):
26 """View handler for the main entry page."""
27 session.reset(request)
28
29 page = viewmodels.Page(
30 title=_("core.pages.index.title"),
31 headline=_("core.pages.index.headline"),
32 modal=viewmodels.AgencySelector(
33 id="agency-selector",
34 aria_labelledby_id="agency-selector-modal-label",
35 button_text=_("core.pages.index.button"),
36 ),
37 )
38
39 return TemplateResponse(request, TEMPLATE_INDEX, page.context_dict())
40
41
42 @pageview_decorator
43 def agency_index(request, agency):
44 """View handler for an agency entry page."""
45 session.reset(request)
46 session.update(request, agency=agency, origin=agency.index_url)
47
48 button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse(ROUTE_ELIGIBILITY))
49
50 page = viewmodels.Page(
51 title=_("core.pages.agency_index.title"),
52 headline=_("core.pages.agency_index.mst_cc.headline"),
53 button=button,
54 )
55
56 return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())
57
58
59 @pageview_decorator
60 def agency_public_key(request, agency):
61 """View handler returns an agency's public key as plain text."""
62 return HttpResponse(agency.public_key_data, content_type="text/plain")
63
64
65 @pageview_decorator
66 def help(request):
67 """View handler for the help page."""
68 if session.active_agency(request):
69 agency = session.agency(request)
70 buttons = viewmodels.Button.agency_contact_links(agency)
71 else:
72 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]
73
74 buttons.append(viewmodels.Button.home(request, _("core.buttons.back")))
75
76 page = viewmodels.Page(
77 title=_("core.buttons.help"),
78 headline=_("core.buttons.help"),
79 buttons=buttons,
80 )
81
82 return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())
83
84
85 @pageview_decorator
86 def bad_request(request, exception, template_name="400.html"):
87 """View handler for HTTP 400 Bad Request responses."""
88 if session.active_agency(request):
89 session.update(request, origin=session.agency(request).index_url)
90 else:
91 session.update(request, origin=reverse(ROUTE_INDEX))
92
93 home = viewmodels.Button.home(request)
94 page = viewmodels.ErrorPage.server_error(button=home)
95 t = loader.get_template(template_name)
96
97 return HttpResponseBadRequest(t.render(page.context_dict()))
98
99
100 @pageview_decorator
101 def csrf_failure(request, reason):
102 """
103 View handler for CSRF_FAILURE_VIEW with custom data.
104 """
105 if session.active_agency(request):
106 session.update(request, origin=session.agency(request).index_url)
107 else:
108 session.update(request, origin=reverse(ROUTE_INDEX))
109
110 home = viewmodels.Button.home(request)
111 page = viewmodels.ErrorPage.not_found(button=home, path=request.path)
112 t = loader.get_template("400.html")
113
114 return HttpResponseNotFound(t.render(page.context_dict()))
115
116
117 @pageview_decorator
118 def page_not_found(request, exception, template_name="404.html"):
119 """View handler for HTTP 404 Not Found responses."""
120 if session.active_agency(request):
121 session.update(request, origin=session.agency(request).index_url)
122 else:
123 session.update(request, origin=reverse(ROUTE_INDEX))
124
125 home = viewmodels.Button.home(request)
126 # show a more user-friendly message instead of not_found
127 page = viewmodels.ErrorPage.user_error(button=home, path=request.path)
128 t = loader.get_template(template_name)
129
130 return HttpResponseNotFound(t.render(page.context_dict()))
131
132
133 @pageview_decorator
134 def server_error(request, template_name="500.html"):
135 """View handler for HTTP 500 Server Error responses."""
136 if session.active_agency(request):
137 session.update(request, origin=session.agency(request).index_url)
138 else:
139 session.update(request, origin=reverse(ROUTE_INDEX))
140
141 home = viewmodels.Button.home(request)
142 page = viewmodels.ErrorPage.server_error(button=home)
143 t = loader.get_template(template_name)
144
145 return HttpResponseServerError(t.render(page.context_dict()))
146
147
148 def logged_out(request):
149 """View handler for the final log out confirmation message."""
150 page = viewmodels.Page(
151 title=_("core.pages.logged_out.title"),
152 icon=viewmodels.Icon("happybus", pgettext("image alt text", "core.icons.happybus")),
153 )
154
155 return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/core/views.py b/benefits/core/views.py
--- a/benefits/core/views.py
+++ b/benefits/core/views.py
@@ -49,7 +49,8 @@
page = viewmodels.Page(
title=_("core.pages.agency_index.title"),
- headline=_("core.pages.agency_index.mst_cc.headline"),
+ headline=_("core.pages.agency_index.headline%(transit_agency_short_name)s")
+ % {"transit_agency_short_name": agency.short_name},
button=button,
)
| {"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -49,7 +49,8 @@\n \n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n- headline=_(\"core.pages.agency_index.mst_cc.headline\"),\n+ headline=_(\"core.pages.agency_index.headline%(transit_agency_short_name)s\")\n+ % {\"transit_agency_short_name\": agency.short_name},\n button=button,\n )\n", "issue": "Refactor Agency dynamic headline into model prop\nRight now we are hardcoding the [Agency index headline PO key](https://github.com/cal-itp/benefits/blob/dev/benefits/core/views.py#L62):\r\n\r\n```python\r\npage = viewmodels.Page(\r\n title=_(\"core.pages.agency_index.title\"),\r\n headline=_(\"core.pages.agency_index.mst_cc.headline\"),\r\n button=button,\r\n classes=\"home\",\r\n )\r\n```\r\n\r\nThis is fine for MST. We need to make this a dynamic key coming from an `agency` prop for the future.\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [ ] `agency_index` gets its headline from the selected `agency`\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\nROUTE_INDEX = \"core:index\"\nROUTE_ELIGIBILITY = \"eligibility:index\"\nROUTE_HELP = \"core:help\"\nROUTE_LOGGED_OUT = \"core:logged_out\"\n\nTEMPLATE_INDEX = \"core/index.html\"\nTEMPLATE_AGENCY = \"core/agency_index.html\"\nTEMPLATE_HELP = \"core/help.html\"\nTEMPLATE_LOGGED_OUT = \"core/logged_out.html\"\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n headline=_(\"core.pages.index.headline\"),\n modal=viewmodels.AgencySelector(\n id=\"agency-selector\",\n aria_labelledby_id=\"agency-selector-modal-label\",\n button_text=_(\"core.pages.index.button\"),\n ),\n )\n\n return TemplateResponse(request, TEMPLATE_INDEX, page.context_dict())\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n headline=_(\"core.pages.agency_index.mst_cc.headline\"),\n button=button,\n )\n\n return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n\n\n@pageview_decorator\ndef agency_public_key(request, agency):\n \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n headline=_(\"core.buttons.help\"),\n buttons=buttons,\n )\n\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n # show a more user-friendly message instead of not_found\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n\n\ndef logged_out(request):\n \"\"\"View handler for the final log out confirmation message.\"\"\"\n page = viewmodels.Page(\n title=_(\"core.pages.logged_out.title\"),\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n )\n\n return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())\n", "path": "benefits/core/views.py"}], "after_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\nROUTE_INDEX = \"core:index\"\nROUTE_ELIGIBILITY = \"eligibility:index\"\nROUTE_HELP = \"core:help\"\nROUTE_LOGGED_OUT = \"core:logged_out\"\n\nTEMPLATE_INDEX = \"core/index.html\"\nTEMPLATE_AGENCY = \"core/agency_index.html\"\nTEMPLATE_HELP = \"core/help.html\"\nTEMPLATE_LOGGED_OUT = \"core/logged_out.html\"\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n headline=_(\"core.pages.index.headline\"),\n modal=viewmodels.AgencySelector(\n id=\"agency-selector\",\n aria_labelledby_id=\"agency-selector-modal-label\",\n button_text=_(\"core.pages.index.button\"),\n ),\n )\n\n return TemplateResponse(request, TEMPLATE_INDEX, page.context_dict())\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n headline=_(\"core.pages.agency_index.headline%(transit_agency_short_name)s\")\n % {\"transit_agency_short_name\": agency.short_name},\n button=button,\n )\n\n return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n\n\n@pageview_decorator\ndef agency_public_key(request, agency):\n \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n headline=_(\"core.buttons.help\"),\n buttons=buttons,\n )\n\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n # show a more user-friendly message instead of not_found\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n\n\ndef logged_out(request):\n \"\"\"View handler for the final log out confirmation message.\"\"\"\n page = viewmodels.Page(\n title=_(\"core.pages.logged_out.title\"),\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n )\n\n return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())\n", "path": "benefits/core/views.py"}]} | 1,893 | 123 |
gh_patches_debug_13285 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-341 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
La Veneciana (Argentina)
Ice cream shop.
HTML webpage to scrape: http://www.laveneciana.com.ar/sucursales.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/laveneciana.py`
Content:
```
1 import scrapy
2 import re
3 from locations.items import GeojsonPointItem
4 class LavenecianaSpider(scrapy.Spider):
5 name = "laveneciana"
6 allowed_domains = ["www.laveneciana.com.ar"]
7 download_delay = 0.5
8 start_urls = (
9 'http://www.laveneciana.com.ar/sucursales.html',
10 )
11 def parse(self, response):
12 stores = response.xpath('//div[@class="navigation-container"]/div[@id="thumbs"]/ul[@class="thumbs noscript"]/li')
13 for store in stores:
14 addr_full_tel = store.xpath('normalize-space(./div[@class="caption"]/div[@class="image-desc"]/text())').extract_first()
15 location = store.xpath('normalize-space(./div[@class="caption"]/div[@class="ubicacion"]/iframe/@src)').extract_first()
16 position = re.findall(r"ll=[0-9-.,]+" ,location)
17 id = re.findall(r"cid=[0-9]+" ,location)
18 if(len(position)>0):
19 lat =float( position[0][3:].split(',')[0])
20 lon = float(position[0][3:].split(',')[1])
21 id = id[0][4:]
22 else:
23 lat=''
24 lon=''
25 id=''
26 addr_full = re.findall(r"^[^()]{4}[^(.)]+" , addr_full_tel)[0]
27 phone_number = re.findall(r"[0-9]{4}-[0-9]{4}",addr_full_tel)
28 if(len(phone_number)>0):
29 phone_number = phone_number[0]
30 else:
31 phone_number =''
32 if(addr_full!="Direccion"):
33 properties = {
34 'addr_full': addr_full,
35 'phone':phone_number,
36 'city': '',
37 'state': '',
38 'postcode':'',
39 'ref': id,
40 'website': response.url,
41 'lat': lat,
42 'lon': lon,
43 }
44 yield GeojsonPointItem(**properties)
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/laveneciana.py b/locations/spiders/laveneciana.py
--- a/locations/spiders/laveneciana.py
+++ b/locations/spiders/laveneciana.py
@@ -23,13 +23,9 @@
lat=''
lon=''
id=''
- addr_full = re.findall(r"^[^()]{4}[^(.)]+" , addr_full_tel)[0]
- phone_number = re.findall(r"[0-9]{4}-[0-9]{4}",addr_full_tel)
- if(len(phone_number)>0):
- phone_number = phone_number[0]
- else:
- phone_number =''
- if(addr_full!="Direccion"):
+ addr_full = addr_full_tel.split('Tel.: ')[0]
+ phone_number = addr_full_tel.split('Tel.: ')[1]
+ if(addr_full!="Direccion... "):
properties = {
'addr_full': addr_full,
'phone':phone_number,
| {"golden_diff": "diff --git a/locations/spiders/laveneciana.py b/locations/spiders/laveneciana.py\n--- a/locations/spiders/laveneciana.py\n+++ b/locations/spiders/laveneciana.py\n@@ -23,13 +23,9 @@\n lat=''\n lon=''\n id=''\n- addr_full = re.findall(r\"^[^()]{4}[^(.)]+\" , addr_full_tel)[0]\n- phone_number = re.findall(r\"[0-9]{4}-[0-9]{4}\",addr_full_tel)\n- if(len(phone_number)>0):\n- phone_number = phone_number[0]\n- else:\n- phone_number =''\n- if(addr_full!=\"Direccion\"):\n+ addr_full = addr_full_tel.split('Tel.: ')[0]\n+ phone_number = addr_full_tel.split('Tel.: ')[1]\n+ if(addr_full!=\"Direccion... \"):\n properties = {\n 'addr_full': addr_full,\n 'phone':phone_number,\n", "issue": "La Veneciana (Argentina)\nIce cream shop.\r\n\r\nHTML webpage to scrape: http://www.laveneciana.com.ar/sucursales.html\n", "before_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\nclass LavenecianaSpider(scrapy.Spider):\n name = \"laveneciana\"\n allowed_domains = [\"www.laveneciana.com.ar\"]\n download_delay = 0.5\n start_urls = (\n 'http://www.laveneciana.com.ar/sucursales.html',\n )\n def parse(self, response):\n stores = response.xpath('//div[@class=\"navigation-container\"]/div[@id=\"thumbs\"]/ul[@class=\"thumbs noscript\"]/li')\n for store in stores:\n addr_full_tel = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"image-desc\"]/text())').extract_first()\n location = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"ubicacion\"]/iframe/@src)').extract_first()\n position = re.findall(r\"ll=[0-9-.,]+\" ,location)\n id = re.findall(r\"cid=[0-9]+\" ,location)\n if(len(position)>0):\n lat =float( position[0][3:].split(',')[0])\n lon = float(position[0][3:].split(',')[1])\n id = id[0][4:]\n else:\n lat=''\n lon=''\n id=''\n addr_full = re.findall(r\"^[^()]{4}[^(.)]+\" , addr_full_tel)[0]\n phone_number = re.findall(r\"[0-9]{4}-[0-9]{4}\",addr_full_tel)\n if(len(phone_number)>0):\n phone_number = phone_number[0]\n else:\n phone_number =''\n if(addr_full!=\"Direccion\"):\n properties = {\n 'addr_full': addr_full,\n 'phone':phone_number,\n 'city': '',\n 'state': '',\n 'postcode':'',\n 'ref': id,\n 'website': response.url,\n 'lat': lat,\n 'lon': lon,\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/laveneciana.py"}], "after_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\nclass LavenecianaSpider(scrapy.Spider):\n name = \"laveneciana\"\n allowed_domains = [\"www.laveneciana.com.ar\"]\n download_delay = 0.5\n start_urls = (\n 'http://www.laveneciana.com.ar/sucursales.html',\n )\n def parse(self, response):\n stores = response.xpath('//div[@class=\"navigation-container\"]/div[@id=\"thumbs\"]/ul[@class=\"thumbs noscript\"]/li')\n for store in stores:\n addr_full_tel = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"image-desc\"]/text())').extract_first()\n location = store.xpath('normalize-space(./div[@class=\"caption\"]/div[@class=\"ubicacion\"]/iframe/@src)').extract_first()\n position = re.findall(r\"ll=[0-9-.,]+\" ,location)\n id = re.findall(r\"cid=[0-9]+\" ,location)\n if(len(position)>0):\n lat =float( position[0][3:].split(',')[0])\n lon = float(position[0][3:].split(',')[1])\n id = id[0][4:]\n else:\n lat=''\n lon=''\n id=''\n addr_full = addr_full_tel.split('Tel.: ')[0]\n phone_number = addr_full_tel.split('Tel.: ')[1]\n if(addr_full!=\"Direccion... \"):\n properties = {\n 'addr_full': addr_full,\n 'phone':phone_number,\n 'city': '',\n 'state': '',\n 'postcode':'',\n 'ref': id,\n 'website': response.url,\n 'lat': lat,\n 'lon': lon,\n }\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/laveneciana.py"}]} | 801 | 219 |
gh_patches_debug_60373 | rasdani/github-patches | git_diff | UTNkar__moore-151 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Paragraph block alignment
<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->
See image:

[Description of the issue]
### Steps to Reproduce
1. [First Step]
2. [Second Step]
3. [and so on...]
<!-- Please select the appropriate "topic category"/blue and "issue type"/yellow label -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/blocks/models.py`
Content:
```
1 from wagtail.wagtailcore import blocks
2 from wagtail.wagtailimages.blocks import ImageChooserBlock
3
4 from django.utils.translation import ugettext_lazy as _
5
6
7 class CountersBlock(blocks.StructBlock):
8 title = blocks.CharBlock()
9 counters = blocks.ListBlock(blocks.StructBlock([
10 ('icon', blocks.CharBlock(
11 help_text=_('Material icon font icon text, as found on: '
12 'https://material.io/icons'),
13 )),
14 ('value', blocks.CharBlock()),
15 ('description', blocks.CharBlock(required=False))
16 ]))
17 style = blocks.ChoiceBlock(choices=[
18 ('light', _('Light')),
19 ('dark', _('Dark')),
20 ])
21
22 class Meta:
23 label = _('Counters')
24 icon = 'fa-balance-scale'
25 template = 'blocks/counter.html'
26
27
28 class HeadingBlock(blocks.StructBlock):
29 title = blocks.CharBlock(required=True)
30 subtitle = blocks.CharBlock(required=False)
31
32 class Meta:
33 label = _('Heading')
34 icon = 'fa-header'
35 template = 'blocks/title.html'
36
37
38 class ImageDescriptionBlock(blocks.StructBlock):
39 description = blocks.RichTextBlock()
40 image = ImageChooserBlock()
41 image_alignment = blocks.ChoiceBlock(choices=[
42 ('left', _('Left')),
43 ('right', _('Right')),
44 ])
45 hide_on_med = blocks.BooleanBlock(required=False)
46
47 class Meta:
48 label = _('Image + Description')
49 icon = 'fa-file-image-o '
50 template = 'blocks/image_description.html'
51
52
53 class ImageIconsBlock(blocks.StructBlock):
54 title = blocks.CharBlock()
55 image = ImageChooserBlock()
56 image_alignment = blocks.ChoiceBlock(choices=[
57 ('left', _('Left')),
58 ('right', _('Right')),
59 ])
60 icons = blocks.ListBlock(blocks.StructBlock([
61 ('icon', blocks.CharBlock(
62 help_text=_('Material icon font icon text, as found on: '
63 'https://material.io/icons'),
64 )),
65 ('title', blocks.CharBlock()),
66 ('description', blocks.CharBlock())
67 ]))
68 hide_on_med = blocks.BooleanBlock(required=False)
69
70 class Meta:
71 label = _('Image + Icons')
72 icon = 'fa-file-excel-o'
73 template = 'blocks/image_icons.html'
74
75
76 class OverlayBlock(blocks.StructBlock):
77 image = ImageChooserBlock()
78 title = blocks.CharBlock(required=False)
79 description = blocks.CharBlock(required=False)
80
81 link = blocks.URLBlock(required=False)
82 button = blocks.CharBlock(required=False)
83
84 class Meta:
85 label = _('Image overlay')
86 icon = 'fa-clone'
87 template = 'blocks/overlay.html'
88
89
90 WAGTAIL_STATIC_BLOCKTYPES = [
91 ('heading', HeadingBlock()),
92 ('paragraph', blocks.RichTextBlock()),
93 ('image_description', ImageIconsBlock()),
94 ('image_icons', ImageDescriptionBlock()),
95 ('overlay', OverlayBlock()),
96 ('logos', blocks.ListBlock(
97 ImageChooserBlock(),
98 icon='fa-pied-piper',
99 template='blocks/logos.html',
100 label=_('Logos'),
101 )),
102 ('counters', CountersBlock()),
103 ('image', ImageChooserBlock(template='blocks/image.html')),
104 ]
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/blocks/models.py b/website/blocks/models.py
--- a/website/blocks/models.py
+++ b/website/blocks/models.py
@@ -89,7 +89,7 @@
WAGTAIL_STATIC_BLOCKTYPES = [
('heading', HeadingBlock()),
- ('paragraph', blocks.RichTextBlock()),
+ ('paragraph', blocks.RichTextBlock(template='blocks/paragraph.html')),
('image_description', ImageIconsBlock()),
('image_icons', ImageDescriptionBlock()),
('overlay', OverlayBlock()),
| {"golden_diff": "diff --git a/website/blocks/models.py b/website/blocks/models.py\n--- a/website/blocks/models.py\n+++ b/website/blocks/models.py\n@@ -89,7 +89,7 @@\n \n WAGTAIL_STATIC_BLOCKTYPES = [\n ('heading', HeadingBlock()),\n- ('paragraph', blocks.RichTextBlock()),\n+ ('paragraph', blocks.RichTextBlock(template='blocks/paragraph.html')),\n ('image_description', ImageIconsBlock()),\n ('image_icons', ImageDescriptionBlock()),\n ('overlay', OverlayBlock()),\n", "issue": "Paragraph block alignment\n<!-- Do you want to ask a question? Are you looking for support? The system administrator can help you: [email protected] -->\r\n\r\nSee image:\r\n\r\n\r\n\r\n[Description of the issue]\r\n\r\n### Steps to Reproduce\r\n\r\n1. [First Step]\r\n2. [Second Step]\r\n3. [and so on...]\r\n\r\n<!-- Please select the appropriate \"topic category\"/blue and \"issue type\"/yellow label -->\r\n\n", "before_files": [{"content": "from wagtail.wagtailcore import blocks\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass CountersBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n counters = blocks.ListBlock(blocks.StructBlock([\n ('icon', blocks.CharBlock(\n help_text=_('Material icon font icon text, as found on: '\n 'https://material.io/icons'),\n )),\n ('value', blocks.CharBlock()),\n ('description', blocks.CharBlock(required=False))\n ]))\n style = blocks.ChoiceBlock(choices=[\n ('light', _('Light')),\n ('dark', _('Dark')),\n ])\n\n class Meta:\n label = _('Counters')\n icon = 'fa-balance-scale'\n template = 'blocks/counter.html'\n\n\nclass HeadingBlock(blocks.StructBlock):\n title = blocks.CharBlock(required=True)\n subtitle = blocks.CharBlock(required=False)\n\n class Meta:\n label = _('Heading')\n icon = 'fa-header'\n template = 'blocks/title.html'\n\n\nclass ImageDescriptionBlock(blocks.StructBlock):\n description = blocks.RichTextBlock()\n image = ImageChooserBlock()\n image_alignment = blocks.ChoiceBlock(choices=[\n ('left', _('Left')),\n ('right', _('Right')),\n ])\n hide_on_med = blocks.BooleanBlock(required=False)\n\n class Meta:\n label = _('Image + Description')\n icon = 'fa-file-image-o '\n template = 'blocks/image_description.html'\n\n\nclass ImageIconsBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n image = ImageChooserBlock()\n image_alignment = blocks.ChoiceBlock(choices=[\n ('left', _('Left')),\n ('right', _('Right')),\n ])\n icons = blocks.ListBlock(blocks.StructBlock([\n ('icon', blocks.CharBlock(\n help_text=_('Material icon font icon text, as found on: '\n 'https://material.io/icons'),\n )),\n ('title', blocks.CharBlock()),\n ('description', blocks.CharBlock())\n ]))\n hide_on_med = blocks.BooleanBlock(required=False)\n\n class Meta:\n label = _('Image + Icons')\n icon = 'fa-file-excel-o'\n template = 'blocks/image_icons.html'\n\n\nclass OverlayBlock(blocks.StructBlock):\n image = ImageChooserBlock()\n title = blocks.CharBlock(required=False)\n description = blocks.CharBlock(required=False)\n\n link = blocks.URLBlock(required=False)\n button = blocks.CharBlock(required=False)\n\n class Meta:\n label = _('Image overlay')\n icon = 'fa-clone'\n template = 'blocks/overlay.html'\n\n\nWAGTAIL_STATIC_BLOCKTYPES = [\n ('heading', HeadingBlock()),\n ('paragraph', blocks.RichTextBlock()),\n ('image_description', ImageIconsBlock()),\n ('image_icons', ImageDescriptionBlock()),\n ('overlay', OverlayBlock()),\n ('logos', blocks.ListBlock(\n ImageChooserBlock(),\n icon='fa-pied-piper',\n template='blocks/logos.html',\n label=_('Logos'),\n )),\n ('counters', CountersBlock()),\n ('image', ImageChooserBlock(template='blocks/image.html')),\n]\n", "path": "website/blocks/models.py"}], "after_files": [{"content": "from wagtail.wagtailcore import blocks\nfrom wagtail.wagtailimages.blocks import ImageChooserBlock\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass CountersBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n counters = blocks.ListBlock(blocks.StructBlock([\n ('icon', blocks.CharBlock(\n help_text=_('Material icon font icon text, as found on: '\n 'https://material.io/icons'),\n )),\n ('value', blocks.CharBlock()),\n ('description', blocks.CharBlock(required=False))\n ]))\n style = blocks.ChoiceBlock(choices=[\n ('light', _('Light')),\n ('dark', _('Dark')),\n ])\n\n class Meta:\n label = _('Counters')\n icon = 'fa-balance-scale'\n template = 'blocks/counter.html'\n\n\nclass HeadingBlock(blocks.StructBlock):\n title = blocks.CharBlock(required=True)\n subtitle = blocks.CharBlock(required=False)\n\n class Meta:\n label = _('Heading')\n icon = 'fa-header'\n template = 'blocks/title.html'\n\n\nclass ImageDescriptionBlock(blocks.StructBlock):\n description = blocks.RichTextBlock()\n image = ImageChooserBlock()\n image_alignment = blocks.ChoiceBlock(choices=[\n ('left', _('Left')),\n ('right', _('Right')),\n ])\n hide_on_med = blocks.BooleanBlock(required=False)\n\n class Meta:\n label = _('Image + Description')\n icon = 'fa-file-image-o '\n template = 'blocks/image_description.html'\n\n\nclass ImageIconsBlock(blocks.StructBlock):\n title = blocks.CharBlock()\n image = ImageChooserBlock()\n image_alignment = blocks.ChoiceBlock(choices=[\n ('left', _('Left')),\n ('right', _('Right')),\n ])\n icons = blocks.ListBlock(blocks.StructBlock([\n ('icon', blocks.CharBlock(\n help_text=_('Material icon font icon text, as found on: '\n 'https://material.io/icons'),\n )),\n ('title', blocks.CharBlock()),\n ('description', blocks.CharBlock())\n ]))\n hide_on_med = blocks.BooleanBlock(required=False)\n\n class Meta:\n label = _('Image + Icons')\n icon = 'fa-file-excel-o'\n template = 'blocks/image_icons.html'\n\n\nclass OverlayBlock(blocks.StructBlock):\n image = ImageChooserBlock()\n title = blocks.CharBlock(required=False)\n description = blocks.CharBlock(required=False)\n\n link = blocks.URLBlock(required=False)\n button = blocks.CharBlock(required=False)\n\n class Meta:\n label = _('Image overlay')\n icon = 'fa-clone'\n template = 'blocks/overlay.html'\n\n\nWAGTAIL_STATIC_BLOCKTYPES = [\n ('heading', HeadingBlock()),\n ('paragraph', blocks.RichTextBlock(template='blocks/paragraph.html')),\n ('image_description', ImageIconsBlock()),\n ('image_icons', ImageDescriptionBlock()),\n ('overlay', OverlayBlock()),\n ('logos', blocks.ListBlock(\n ImageChooserBlock(),\n icon='fa-pied-piper',\n template='blocks/logos.html',\n label=_('Logos'),\n )),\n ('counters', CountersBlock()),\n ('image', ImageChooserBlock(template='blocks/image.html')),\n]\n", "path": "website/blocks/models.py"}]} | 1,279 | 117 |
gh_patches_debug_16424 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-3520 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
libeay32.dll and ssleay32.dll needs to be manually bundled to use PyQt5.QNetwork with SSL
If you are having errors like:
```
qt.network.ssl: QSslSocket: cannot call unresolved function SSLv23_client_method
qt.network.ssl: QSslSocket: cannot call unresolved function SSL_CTX_new
qt.network.ssl: QSslSocket: cannot call unresolved function SSL_library_init
qt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error
qt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error
```
with PyInstaller and PyQt5 on Windows, you need to manually add libeay32.dll and ssleay32.dll from your PyQt5 site-packages (probably located somewhere in `PyQt5\Qt\bin\`) to your output dir or your frozen binary in a similar path.
In my final specfile, it looks like this:
```python
# -*- mode: python -*-
block_cipher = None
a = Analysis(['cddagl\\launcher.py'],
pathex=['C:\\Program Files (x86)\\Windows Kits\\10\\Redist\\ucrt\\DLLs\\x86\\', 'C:\\Users\\remy\\Projects\\CDDA-Game-Launcher'],
binaries=[('C:\\Users\\remy\\VirtualEnvs\\CDDA-Game-Launcher\\lib\\site-packages\\PyQt5\\Qt\\bin\\libeay32.dll', 'PyQt5\\Qt\\bin'), ('C:\\Users\\remy\\VirtualEnvs\\CDDA-Game-Launcher\\lib\\site-packages\\PyQt5\\Qt\\bin\\ssleay32.dll', 'PyQt5\\Qt\\bin')],
datas=[('alembic', 'alembic'), ('bin/updated.bat', '.'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('C:\\Users\\remy\\AppData\\Local\\Programs\\Python\\Python36-32\\unrar.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],
hiddenimports=['lxml.cssselect', 'babel.numbers'],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name='launcher',
debug=True,
strip=False,
upx=False,
runtime_tmpdir=None,
console=True , icon='cddagl\\resources\\launcher.ico')
```
I'm not sure how this can be improved, but this solution should work if you are having a similar issue. This was tested with **PyInstaller==3.4.dev0+1033a8770**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/hook-PyQt5.QtNetwork.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2018, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 from PyInstaller.utils.hooks import add_qt5_dependencies
10
11 hiddenimports, binaries, datas = add_qt5_dependencies(__file__)
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py
--- a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py
+++ b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py
@@ -6,6 +6,21 @@
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-from PyInstaller.utils.hooks import add_qt5_dependencies
+import os.path
+
+from PyInstaller.utils.hooks import pyqt5_library_info, add_qt5_dependencies
+from PyInstaller.compat import is_win
hiddenimports, binaries, datas = add_qt5_dependencies(__file__)
+
+# Add libraries needed for SSL. See issue #3520.
+if is_win:
+ rel_data_path = ['PyQt5', 'Qt', 'bin']
+ binaries += [
+ (os.path.join(pyqt5_library_info.location['BinariesPath'],
+ 'libeay32.dll'),
+ os.path.join(*rel_data_path)),
+ (os.path.join(pyqt5_library_info.location['BinariesPath'],
+ 'ssleay32.dll'),
+ os.path.join(*rel_data_path))
+ ]
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n--- a/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n+++ b/PyInstaller/hooks/hook-PyQt5.QtNetwork.py\n@@ -6,6 +6,21 @@\n #\n # The full license is in the file COPYING.txt, distributed with this software.\n #-----------------------------------------------------------------------------\n-from PyInstaller.utils.hooks import add_qt5_dependencies\n+import os.path\n+\n+from PyInstaller.utils.hooks import pyqt5_library_info, add_qt5_dependencies\n+from PyInstaller.compat import is_win\n \n hiddenimports, binaries, datas = add_qt5_dependencies(__file__)\n+\n+# Add libraries needed for SSL. See issue #3520.\n+if is_win:\n+ rel_data_path = ['PyQt5', 'Qt', 'bin']\n+ binaries += [\n+ (os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ 'libeay32.dll'),\n+ os.path.join(*rel_data_path)),\n+ (os.path.join(pyqt5_library_info.location['BinariesPath'],\n+ 'ssleay32.dll'),\n+ os.path.join(*rel_data_path))\n+ ]\n", "issue": "libeay32.dll and ssleay32.dll needs to be manually bundled to use PyQt5.QNetwork with SSL\nIf you are having errors like:\r\n\r\n```\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSLv23_client_method\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSL_CTX_new\r\nqt.network.ssl: QSslSocket: cannot call unresolved function SSL_library_init\r\nqt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error\r\nqt.network.ssl: QSslSocket: cannot call unresolved function ERR_get_error\r\n```\r\n\r\nwith PyInstaller and PyQt5 on Windows, you need to manually add libeay32.dll and ssleay32.dll from your PyQt5 site-packages (probably located somewhere in `PyQt5\\Qt\\bin\\`) to your output dir or your frozen binary in a similar path.\r\n\r\nIn my final specfile, it looks like this:\r\n\r\n```python\r\n# -*- mode: python -*-\r\n\r\nblock_cipher = None\r\n\r\n\r\na = Analysis(['cddagl\\\\launcher.py'],\r\n pathex=['C:\\\\Program Files (x86)\\\\Windows Kits\\\\10\\\\Redist\\\\ucrt\\\\DLLs\\\\x86\\\\', 'C:\\\\Users\\\\remy\\\\Projects\\\\CDDA-Game-Launcher'],\r\n binaries=[('C:\\\\Users\\\\remy\\\\VirtualEnvs\\\\CDDA-Game-Launcher\\\\lib\\\\site-packages\\\\PyQt5\\\\Qt\\\\bin\\\\libeay32.dll', 'PyQt5\\\\Qt\\\\bin'), ('C:\\\\Users\\\\remy\\\\VirtualEnvs\\\\CDDA-Game-Launcher\\\\lib\\\\site-packages\\\\PyQt5\\\\Qt\\\\bin\\\\ssleay32.dll', 'PyQt5\\\\Qt\\\\bin')],\r\n datas=[('alembic', 'alembic'), ('bin/updated.bat', '.'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('C:\\\\Users\\\\remy\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python36-32\\\\unrar.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],\r\n hiddenimports=['lxml.cssselect', 'babel.numbers'],\r\n hookspath=[],\r\n runtime_hooks=[],\r\n excludes=[],\r\n win_no_prefer_redirects=False,\r\n win_private_assemblies=False,\r\n cipher=block_cipher)\r\npyz = PYZ(a.pure, a.zipped_data,\r\n cipher=block_cipher)\r\nexe = EXE(pyz,\r\n a.scripts,\r\n a.binaries,\r\n a.zipfiles,\r\n a.datas,\r\n name='launcher',\r\n debug=True,\r\n strip=False,\r\n upx=False,\r\n runtime_tmpdir=None,\r\n console=True , icon='cddagl\\\\resources\\\\launcher.ico')\r\n```\r\n\r\nI'm not sure how this can be improved, but this solution should work if you are having a similar issue. This was tested with **PyInstaller==3.4.dev0+1033a8770**\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import add_qt5_dependencies\n\nhiddenimports, binaries, datas = add_qt5_dependencies(__file__)\n", "path": "PyInstaller/hooks/hook-PyQt5.QtNetwork.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2018, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nimport os.path\n\nfrom PyInstaller.utils.hooks import pyqt5_library_info, add_qt5_dependencies\nfrom PyInstaller.compat import is_win\n\nhiddenimports, binaries, datas = add_qt5_dependencies(__file__)\n\n# Add libraries needed for SSL. See issue #3520.\nif is_win:\n rel_data_path = ['PyQt5', 'Qt', 'bin']\n binaries += [\n (os.path.join(pyqt5_library_info.location['BinariesPath'],\n 'libeay32.dll'),\n os.path.join(*rel_data_path)),\n (os.path.join(pyqt5_library_info.location['BinariesPath'],\n 'ssleay32.dll'),\n os.path.join(*rel_data_path))\n ]\n", "path": "PyInstaller/hooks/hook-PyQt5.QtNetwork.py"}]} | 1,121 | 278 |
gh_patches_debug_9014 | rasdani/github-patches | git_diff | stephenmcd__mezzanine-1517 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Duplicate form fields in admin for user profiles
As discussed here:
https://groups.google.com/forum/#!topic/mezzanine-users/3QmiqfNZjUM
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mezzanine/accounts/admin.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from django.contrib import admin
4 from django.contrib.auth import get_user_model
5 from mezzanine.accounts import get_profile_model, ProfileNotConfigured
6
7 from mezzanine.core.admin import SitePermissionUserAdmin
8 from mezzanine.conf import settings
9 from mezzanine.utils.email import send_approved_mail, send_verification_mail
10
11
12 User = get_user_model()
13
14 user_list_display = SitePermissionUserAdmin.list_display
15 user_list_display += ("is_active", "date_joined", "last_login")
16
17
18 class UserProfileAdmin(SitePermissionUserAdmin):
19
20 list_display = user_list_display
21
22 def save_model(self, request, obj, form, change):
23 """
24 If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``,
25 send a notification email to the user being saved if their
26 ``active`` status has changed to ``True``.
27 If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``,
28 send a verification email instead.
29 """
30 must_send_verification_mail_after_save = False
31 if change and settings.ACCOUNTS_APPROVAL_REQUIRED:
32 if obj.is_active and not User.objects.get(id=obj.id).is_active:
33 if settings.ACCOUNTS_VERIFICATION_REQUIRED:
34 # Accounts verification requires an inactive account
35 obj.is_active = False
36 # The token generated by send_verification_mail()
37 # must match the _saved_ User object,
38 # so postpone send_verification_mail() until later
39 must_send_verification_mail_after_save = True
40 else:
41 send_approved_mail(request, obj)
42 super(UserProfileAdmin, self).save_model(request, obj, form, change)
43 if must_send_verification_mail_after_save:
44 user = User.objects.get(id=obj.id)
45 send_verification_mail(request, user, "signup_verify")
46
47
48 try:
49 class ProfileInline(admin.StackedInline):
50 model = get_profile_model()
51 can_delete = False
52 template = "admin/profile_inline.html"
53 extra = 0
54 UserProfileAdmin.inlines += (ProfileInline,)
55 except ProfileNotConfigured:
56 pass
57
58
59 if User in admin.site._registry:
60 admin.site.unregister(User)
61 admin.site.register(User, UserProfileAdmin)
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mezzanine/accounts/admin.py b/mezzanine/accounts/admin.py
--- a/mezzanine/accounts/admin.py
+++ b/mezzanine/accounts/admin.py
@@ -51,6 +51,13 @@
can_delete = False
template = "admin/profile_inline.html"
extra = 0
+
+ def get_min_num(self, request, obj=None, **kwargs):
+ """This causes profile forms to be shown when editing but hidden
+ when creating. If min_num is fixed at 1, Django's initial user
+ creation form fails if the profile model has a required field."""
+ return 0 if obj is None else 1
+
UserProfileAdmin.inlines += (ProfileInline,)
except ProfileNotConfigured:
pass
| {"golden_diff": "diff --git a/mezzanine/accounts/admin.py b/mezzanine/accounts/admin.py\n--- a/mezzanine/accounts/admin.py\n+++ b/mezzanine/accounts/admin.py\n@@ -51,6 +51,13 @@\n can_delete = False\n template = \"admin/profile_inline.html\"\n extra = 0\n+\n+ def get_min_num(self, request, obj=None, **kwargs):\n+ \"\"\"This causes profile forms to be shown when editing but hidden\n+ when creating. If min_num is fixed at 1, Django's initial user\n+ creation form fails if the profile model has a required field.\"\"\"\n+ return 0 if obj is None else 1\n+\n UserProfileAdmin.inlines += (ProfileInline,)\n except ProfileNotConfigured:\n pass\n", "issue": "Duplicate form fields in admin for user profiles\nAs discussed here:\n\nhttps://groups.google.com/forum/#!topic/mezzanine-users/3QmiqfNZjUM\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom django.contrib.auth import get_user_model\nfrom mezzanine.accounts import get_profile_model, ProfileNotConfigured\n\nfrom mezzanine.core.admin import SitePermissionUserAdmin\nfrom mezzanine.conf import settings\nfrom mezzanine.utils.email import send_approved_mail, send_verification_mail\n\n\nUser = get_user_model()\n\nuser_list_display = SitePermissionUserAdmin.list_display\nuser_list_display += (\"is_active\", \"date_joined\", \"last_login\")\n\n\nclass UserProfileAdmin(SitePermissionUserAdmin):\n\n list_display = user_list_display\n\n def save_model(self, request, obj, form, change):\n \"\"\"\n If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``,\n send a notification email to the user being saved if their\n ``active`` status has changed to ``True``.\n If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``,\n send a verification email instead.\n \"\"\"\n must_send_verification_mail_after_save = False\n if change and settings.ACCOUNTS_APPROVAL_REQUIRED:\n if obj.is_active and not User.objects.get(id=obj.id).is_active:\n if settings.ACCOUNTS_VERIFICATION_REQUIRED:\n # Accounts verification requires an inactive account\n obj.is_active = False\n # The token generated by send_verification_mail()\n # must match the _saved_ User object,\n # so postpone send_verification_mail() until later\n must_send_verification_mail_after_save = True\n else:\n send_approved_mail(request, obj)\n super(UserProfileAdmin, self).save_model(request, obj, form, change)\n if must_send_verification_mail_after_save:\n user = User.objects.get(id=obj.id)\n send_verification_mail(request, user, \"signup_verify\")\n\n\ntry:\n class ProfileInline(admin.StackedInline):\n model = get_profile_model()\n can_delete = False\n template = \"admin/profile_inline.html\"\n extra = 0\n UserProfileAdmin.inlines += (ProfileInline,)\nexcept ProfileNotConfigured:\n pass\n\n\nif User in admin.site._registry:\n admin.site.unregister(User)\nadmin.site.register(User, UserProfileAdmin)\n", "path": "mezzanine/accounts/admin.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.contrib import admin\nfrom django.contrib.auth import get_user_model\nfrom mezzanine.accounts import get_profile_model, ProfileNotConfigured\n\nfrom mezzanine.core.admin import SitePermissionUserAdmin\nfrom mezzanine.conf import settings\nfrom mezzanine.utils.email import send_approved_mail, send_verification_mail\n\n\nUser = get_user_model()\n\nuser_list_display = SitePermissionUserAdmin.list_display\nuser_list_display += (\"is_active\", \"date_joined\", \"last_login\")\n\n\nclass UserProfileAdmin(SitePermissionUserAdmin):\n\n list_display = user_list_display\n\n def save_model(self, request, obj, form, change):\n \"\"\"\n If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``,\n send a notification email to the user being saved if their\n ``active`` status has changed to ``True``.\n If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``,\n send a verification email instead.\n \"\"\"\n must_send_verification_mail_after_save = False\n if change and settings.ACCOUNTS_APPROVAL_REQUIRED:\n if obj.is_active and not User.objects.get(id=obj.id).is_active:\n if settings.ACCOUNTS_VERIFICATION_REQUIRED:\n # Accounts verification requires an inactive account\n obj.is_active = False\n # The token generated by send_verification_mail()\n # must match the _saved_ User object,\n # so postpone send_verification_mail() until later\n must_send_verification_mail_after_save = True\n else:\n send_approved_mail(request, obj)\n super(UserProfileAdmin, self).save_model(request, obj, form, change)\n if must_send_verification_mail_after_save:\n user = User.objects.get(id=obj.id)\n send_verification_mail(request, user, \"signup_verify\")\n\n\ntry:\n class ProfileInline(admin.StackedInline):\n model = get_profile_model()\n can_delete = False\n template = \"admin/profile_inline.html\"\n extra = 0\n\n def get_min_num(self, request, obj=None, **kwargs):\n \"\"\"This causes profile forms to be shown when editing but hidden\n when creating. If min_num is fixed at 1, Django's initial user\n creation form fails if the profile model has a required field.\"\"\"\n return 0 if obj is None else 1\n\n UserProfileAdmin.inlines += (ProfileInline,)\nexcept ProfileNotConfigured:\n pass\n\n\nif User in admin.site._registry:\n admin.site.unregister(User)\nadmin.site.register(User, UserProfileAdmin)\n", "path": "mezzanine/accounts/admin.py"}]} | 879 | 168 |
gh_patches_debug_2536 | rasdani/github-patches | git_diff | optuna__optuna-122 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`TPESampler._sample_categorical` fails with PostgreSQL backend
`TPESampler._sample_categorical` fails with PostgreSQL backend. This happens because:
- `TPESampler._sample_categorical` returns an integer as `numpy.int32`.
- The integer value is input to storage class without any cast.
- SQLAlchemy with psycopg2 backend does not support `numpy.int32` input but does `int` one.
**Repro Steps**
With any objective function using categorical sampling (e.g., example one in `chainer_mnist.py`), invoke `minimize` as:
```
study = pfnopt.create_study(storage=SOME_POSTGRES_URL)
pfnopt.minimize(objective, n_trials=100, study=study)
```
It fails after running trials `n_startup_trails` times.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pfnopt/samplers/tpe.py`
Content:
```
1 import math
2 import numpy
3 from typing import List # NOQA
4 from typing import Optional # NOQA
5
6 from pfnopt import distributions # NOQA
7 from pfnopt.samplers import _hyperopt
8 from pfnopt.samplers import base
9 from pfnopt.samplers import random
10 from pfnopt.storages.base import BaseStorage # NOQA
11
12
13 class TPESampler(base.BaseSampler):
14
15 def __init__(self,
16 prior_weight=_hyperopt.default_prior_weight,
17 n_startup_trials=_hyperopt.default_n_startup_trials,
18 n_ei_candidates=_hyperopt.default_n_ei_candidates,
19 gamma=_hyperopt.default_gamma,
20 seed=None):
21 # type: (float, int, int, float, Optional[int]) -> None
22 self.prior_weight = prior_weight
23 self.n_startup_trials = n_startup_trials
24 self.n_ei_candidates = n_ei_candidates
25 self.gamma = gamma
26 self.seed = seed
27
28 self.rng = numpy.random.RandomState(seed)
29 self.random_sampler = random.RandomSampler(seed=seed)
30
31 def sample(self, storage, study_id, param_name, param_distribution):
32 # type: (BaseStorage, int, str, distributions.BaseDistribution) -> float
33 observation_pairs = storage.get_trial_param_result_pairs(
34 study_id, param_name)
35 n = len(observation_pairs)
36
37 # TODO(Akiba): this behavior is slightly different from hyperopt
38 if n < self.n_startup_trials:
39 return self.random_sampler.sample(storage, study_id, param_name, param_distribution)
40
41 below_param_values, above_param_values = _hyperopt.ap_filter_trials(
42 range(n), [p[0] for p in observation_pairs],
43 range(n), [p[1] for p in observation_pairs],
44 self.gamma)
45
46 if isinstance(param_distribution, distributions.UniformDistribution):
47 return self._sample_uniform(
48 param_distribution, below_param_values, above_param_values)
49 elif isinstance(param_distribution, distributions.LogUniformDistribution):
50 return self._sample_loguniform(
51 param_distribution, below_param_values, above_param_values)
52 elif isinstance(param_distribution, distributions.CategoricalDistribution):
53 return self._sample_categorical(
54 param_distribution, below_param_values, above_param_values)
55 else:
56 raise NotImplementedError
57
58 def _sample_uniform(self, distribution, below, above):
59 # type: (distributions.UniformDistribution, List[float], List[float]) -> float
60 return _hyperopt.sample_uniform(
61 obs_below=below, obs_above=above, prior_weight=self.prior_weight,
62 low=distribution.low, high=distribution.high,
63 size=(self.n_ei_candidates,), rng=self.rng)
64
65 def _sample_loguniform(self, distribution, below, above):
66 # type: (distributions.LogUniformDistribution, List[float], List[float]) -> float
67
68 return _hyperopt.sample_loguniform(
69 obs_below=below, obs_above=above, prior_weight=self.prior_weight,
70 # `sample_loguniform` generates values in [exp(low), exp(high)]
71 low=math.log(distribution.low),
72 high=math.log(distribution.high),
73 size=(self.n_ei_candidates,), rng=self.rng)
74
75 def _sample_categorical(self, distribution, below, above):
76 # type: (distributions.CategoricalDistribution, List[float], List[float]) -> float
77 choices = distribution.choices
78 below = list(map(int, below))
79 above = list(map(int, above))
80 idx = _hyperopt.sample_categorical(
81 obs_below=below, obs_above=above, prior_weight=self.prior_weight,
82 upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)
83 return idx
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pfnopt/samplers/tpe.py b/pfnopt/samplers/tpe.py
--- a/pfnopt/samplers/tpe.py
+++ b/pfnopt/samplers/tpe.py
@@ -80,4 +80,4 @@
idx = _hyperopt.sample_categorical(
obs_below=below, obs_above=above, prior_weight=self.prior_weight,
upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)
- return idx
+ return int(idx)
| {"golden_diff": "diff --git a/pfnopt/samplers/tpe.py b/pfnopt/samplers/tpe.py\n--- a/pfnopt/samplers/tpe.py\n+++ b/pfnopt/samplers/tpe.py\n@@ -80,4 +80,4 @@\n idx = _hyperopt.sample_categorical(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\n- return idx\n+ return int(idx)\n", "issue": "`TPESampler._sample_categorical` fails with PostgreSQL backend\n`TPESampler._sample_categorical` fails with PostgreSQL backend. This happens because:\r\n- `TPESampler._sample_categorical` returns an integer as `numpy.int32`.\r\n- The integer value is input to storage class without any cast.\r\n- SQLAlchemy with psycopg2 backend does not support `numpy.int32` input but does `int` one.\r\n\r\n**Repro Steps**\r\nWith any objective function using categorical sampling (e.g., example one in `chainer_mnist.py`), invoke `minimize` as:\r\n```\r\nstudy = pfnopt.create_study(storage=SOME_POSTGRES_URL)\r\npfnopt.minimize(objective, n_trials=100, study=study)\r\n```\r\n\r\nIt fails after running trials `n_startup_trails` times.\n", "before_files": [{"content": "import math\nimport numpy\nfrom typing import List # NOQA\nfrom typing import Optional # NOQA\n\nfrom pfnopt import distributions # NOQA\nfrom pfnopt.samplers import _hyperopt\nfrom pfnopt.samplers import base\nfrom pfnopt.samplers import random\nfrom pfnopt.storages.base import BaseStorage # NOQA\n\n\nclass TPESampler(base.BaseSampler):\n\n def __init__(self,\n prior_weight=_hyperopt.default_prior_weight,\n n_startup_trials=_hyperopt.default_n_startup_trials,\n n_ei_candidates=_hyperopt.default_n_ei_candidates,\n gamma=_hyperopt.default_gamma,\n seed=None):\n # type: (float, int, int, float, Optional[int]) -> None\n self.prior_weight = prior_weight\n self.n_startup_trials = n_startup_trials\n self.n_ei_candidates = n_ei_candidates\n self.gamma = gamma\n self.seed = seed\n\n self.rng = numpy.random.RandomState(seed)\n self.random_sampler = random.RandomSampler(seed=seed)\n\n def sample(self, storage, study_id, param_name, param_distribution):\n # type: (BaseStorage, int, str, distributions.BaseDistribution) -> float\n observation_pairs = storage.get_trial_param_result_pairs(\n study_id, param_name)\n n = len(observation_pairs)\n\n # TODO(Akiba): this behavior is slightly different from hyperopt\n if n < self.n_startup_trials:\n return self.random_sampler.sample(storage, study_id, param_name, param_distribution)\n\n below_param_values, above_param_values = _hyperopt.ap_filter_trials(\n range(n), [p[0] for p in observation_pairs],\n range(n), [p[1] for p in observation_pairs],\n self.gamma)\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self._sample_uniform(\n param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n return self._sample_loguniform(\n param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n return self._sample_categorical(\n param_distribution, below_param_values, above_param_values)\n else:\n raise NotImplementedError\n\n def _sample_uniform(self, distribution, below, above):\n # type: (distributions.UniformDistribution, List[float], List[float]) -> float\n return _hyperopt.sample_uniform(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n low=distribution.low, high=distribution.high,\n size=(self.n_ei_candidates,), rng=self.rng)\n\n def _sample_loguniform(self, distribution, below, above):\n # type: (distributions.LogUniformDistribution, List[float], List[float]) -> float\n\n return _hyperopt.sample_loguniform(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n # `sample_loguniform` generates values in [exp(low), exp(high)]\n low=math.log(distribution.low),\n high=math.log(distribution.high),\n size=(self.n_ei_candidates,), rng=self.rng)\n\n def _sample_categorical(self, distribution, below, above):\n # type: (distributions.CategoricalDistribution, List[float], List[float]) -> float\n choices = distribution.choices\n below = list(map(int, below))\n above = list(map(int, above))\n idx = _hyperopt.sample_categorical(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\n return idx\n", "path": "pfnopt/samplers/tpe.py"}], "after_files": [{"content": "import math\nimport numpy\nfrom typing import List # NOQA\nfrom typing import Optional # NOQA\n\nfrom pfnopt import distributions # NOQA\nfrom pfnopt.samplers import _hyperopt\nfrom pfnopt.samplers import base\nfrom pfnopt.samplers import random\nfrom pfnopt.storages.base import BaseStorage # NOQA\n\n\nclass TPESampler(base.BaseSampler):\n\n def __init__(self,\n prior_weight=_hyperopt.default_prior_weight,\n n_startup_trials=_hyperopt.default_n_startup_trials,\n n_ei_candidates=_hyperopt.default_n_ei_candidates,\n gamma=_hyperopt.default_gamma,\n seed=None):\n # type: (float, int, int, float, Optional[int]) -> None\n self.prior_weight = prior_weight\n self.n_startup_trials = n_startup_trials\n self.n_ei_candidates = n_ei_candidates\n self.gamma = gamma\n self.seed = seed\n\n self.rng = numpy.random.RandomState(seed)\n self.random_sampler = random.RandomSampler(seed=seed)\n\n def sample(self, storage, study_id, param_name, param_distribution):\n # type: (BaseStorage, int, str, distributions.BaseDistribution) -> float\n observation_pairs = storage.get_trial_param_result_pairs(\n study_id, param_name)\n n = len(observation_pairs)\n\n # TODO(Akiba): this behavior is slightly different from hyperopt\n if n < self.n_startup_trials:\n return self.random_sampler.sample(storage, study_id, param_name, param_distribution)\n\n below_param_values, above_param_values = _hyperopt.ap_filter_trials(\n range(n), [p[0] for p in observation_pairs],\n range(n), [p[1] for p in observation_pairs],\n self.gamma)\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self._sample_uniform(\n param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n return self._sample_loguniform(\n param_distribution, below_param_values, above_param_values)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n return self._sample_categorical(\n param_distribution, below_param_values, above_param_values)\n else:\n raise NotImplementedError\n\n def _sample_uniform(self, distribution, below, above):\n # type: (distributions.UniformDistribution, List[float], List[float]) -> float\n return _hyperopt.sample_uniform(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n low=distribution.low, high=distribution.high,\n size=(self.n_ei_candidates,), rng=self.rng)\n\n def _sample_loguniform(self, distribution, below, above):\n # type: (distributions.LogUniformDistribution, List[float], List[float]) -> float\n\n return _hyperopt.sample_loguniform(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n # `sample_loguniform` generates values in [exp(low), exp(high)]\n low=math.log(distribution.low),\n high=math.log(distribution.high),\n size=(self.n_ei_candidates,), rng=self.rng)\n\n def _sample_categorical(self, distribution, below, above):\n # type: (distributions.CategoricalDistribution, List[float], List[float]) -> float\n choices = distribution.choices\n below = list(map(int, below))\n above = list(map(int, above))\n idx = _hyperopt.sample_categorical(\n obs_below=below, obs_above=above, prior_weight=self.prior_weight,\n upper=len(choices), size=(self.n_ei_candidates, ), rng=self.rng)\n return int(idx)\n", "path": "pfnopt/samplers/tpe.py"}]} | 1,385 | 121 |
gh_patches_debug_599 | rasdani/github-patches | git_diff | pex-tool__pex-1834 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.95
On the docket:
+ [x] Lock creation should skip Windows-only requirements and / or allow selecting target platforms (OS classes). #1821
+ [x] Feature request: "universal" lock mode can reject unsupported platforms #1595
+ [x] Avoid ENOEXEC for --venv shebangs. #1828
+ [x] pex3 lock export does't seem to respect the platform flag. #1826
+ [x] Clarify pex3 lock export command. #1645
+ [x] Support exporting PYTHONPATH before running user code #1825
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.94"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.94"
+__version__ = "2.1.95"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.94\"\n+__version__ = \"2.1.95\"\n", "issue": "Release 2.1.95\nOn the docket:\r\n+ [x] Lock creation should skip Windows-only requirements and / or allow selecting target platforms (OS classes). #1821\r\n+ [x] Feature request: \"universal\" lock mode can reject unsupported platforms #1595\r\n+ [x] Avoid ENOEXEC for --venv shebangs. #1828 \r\n+ [x] pex3 lock export does't seem to respect the platform flag. #1826\r\n+ [x] Clarify pex3 lock export command. #1645\r\n+ [x] Support exporting PYTHONPATH before running user code #1825\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.94\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.95\"\n", "path": "pex/version.py"}]} | 454 | 96 |
gh_patches_debug_20686 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3325 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider labreweries is broken
During the global build at 2021-07-21-14-42-39, spider **labreweries** failed with **0 features** and **88 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/labreweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/labreweries.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import re
4
5 from locations.items import GeojsonPointItem
6
7
8 class LaBreweriesSpider(scrapy.Spider):
9 name = "labreweries"
10 allowed_domains = ["labeerhop.com"]
11 start_urls = (
12 'http://labeerhop.com/breweries-sitemap.xml',
13 )
14
15 def store_hours(self, store_hours):
16 day_groups = []
17 this_day_group = None
18 for day in store_hours:
19 day = day.replace(' :-', ' 12:00 -')
20 day = day.split('<h5>Hours</h5>')[1].strip('<br>').strip('</aside>')
21 match = re.search(r'(closed|(\d{1,2})\S.\s*-\s*(\d{1,2})\S.)', day.lower())
22 open('/tmp/test1.txt', 'w').write(str(day))
23 (dow, f_hr, t_hr) = match.groups()
24 day_short = dow[:2]
25
26 f_hr = int(f_hr)
27 t_hr = int(t_hr)
28
29 hours = '{:02d}-{:02d}'.format(
30 f_hr,
31 t_hr,
32 )
33
34 if not this_day_group:
35 this_day_group = {
36 'from_day': day_short,
37 'to_day': day_short,
38 'hours': hours
39 }
40 elif this_day_group['hours'] != hours:
41 day_groups.append(this_day_group)
42 this_day_group = {
43 'from_day': day_short,
44 'to_day': day_short,
45 'hours': hours
46 }
47 elif this_day_group['hours'] == hours:
48 this_day_group['to_day'] = day_short
49
50 day_groups.append(this_day_group)
51
52 opening_hours = ""
53 if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
54 opening_hours = '24/7'
55 else:
56 for day_group in day_groups:
57 if day_group['from_day'] == day_group['to_day']:
58 opening_hours += '{from_day} {hours}; '.format(**day_group)
59 elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
60 opening_hours += '{hours}; '.format(**day_group)
61 else:
62 opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
63 opening_hours = opening_hours[:-2]
64
65 return opening_hours
66
67 def address(self, address):
68 if not address:
69 return None
70
71 addr_tags = {
72 "addr_full": address[0].split(',')[0].strip(),
73 "city": address[0].split(',')[1].strip(),
74 "state": address[0].split(' ')[-2].strip(),
75 "postcode": address[0].split(' ')[-1].strip(),
76 }
77
78 return addr_tags
79
80 def parse(self, response):
81 response.selector.remove_namespaces()
82 city_urls = response.xpath('//url/loc/text()').extract()
83 for path in city_urls:
84 if path not in "http://labeerhop.com/breweries/1056/":
85 yield scrapy.Request(
86 path.strip(),
87 callback=self.parse_store,
88 )
89
90 def parse_store(self, response):
91
92 properties = {
93 'website': response.xpath('//head/link[@rel="canonical"]/@href').extract_first(),
94 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip("['']"),
95 'opening_hours': re.sub(r'\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\t',' ').replace('\n','').replace('\r',' ')).strip(),
96 # 'lon': float(data['geo']['longitude']), # not lon on page
97 # 'lat': float(data['geo']['latitude']), # not lat on page
98 }
99
100 address = self.address(response.xpath('/html/body/div[1]/div[1]/aside/address/text()').extract())
101 if address:
102 properties.update(address)
103
104
105 yield GeojsonPointItem(**properties)
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/labreweries.py b/locations/spiders/labreweries.py
--- a/locations/spiders/labreweries.py
+++ b/locations/spiders/labreweries.py
@@ -92,7 +92,7 @@
properties = {
'website': response.xpath('//head/link[@rel="canonical"]/@href').extract_first(),
'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip("['']"),
- 'opening_hours': re.sub(r'\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\t',' ').replace('\n','').replace('\r',' ')).strip(),
+ 'opening_hours': re.sub(r'\s+', ' ', response.xpath('//*[@id="content"]/div/div[2]/div[3]').extract()[0].split('<h5 class="mb-2">Hours</h5>')[1].replace('<br>','').replace('</div>','').replace('\t',' ').replace('\n','').replace('\r',' ')).strip(),
# 'lon': float(data['geo']['longitude']), # not lon on page
# 'lat': float(data['geo']['latitude']), # not lat on page
}
| {"golden_diff": "diff --git a/locations/spiders/labreweries.py b/locations/spiders/labreweries.py\n--- a/locations/spiders/labreweries.py\n+++ b/locations/spiders/labreweries.py\n@@ -92,7 +92,7 @@\n properties = {\n 'website': response.xpath('//head/link[@rel=\"canonical\"]/@href').extract_first(),\n 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\"['']\"),\n- 'opening_hours': re.sub(r'\\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n+ 'opening_hours': re.sub(r'\\s+', ' ', response.xpath('//*[@id=\"content\"]/div/div[2]/div[3]').extract()[0].split('<h5 class=\"mb-2\">Hours</h5>')[1].replace('<br>','').replace('</div>','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n # 'lon': float(data['geo']['longitude']), # not lon on page\n # 'lat': float(data['geo']['latitude']), # not lat on page\n }\n", "issue": "Spider labreweries is broken\nDuring the global build at 2021-07-21-14-42-39, spider **labreweries** failed with **0 features** and **88 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/logs/labreweries.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-21-14-42-39/output/labreweries.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass LaBreweriesSpider(scrapy.Spider):\n name = \"labreweries\"\n allowed_domains = [\"labeerhop.com\"]\n start_urls = (\n 'http://labeerhop.com/breweries-sitemap.xml',\n )\n\n def store_hours(self, store_hours):\n day_groups = []\n this_day_group = None\n for day in store_hours:\n day = day.replace(' :-', ' 12:00 -')\n day = day.split('<h5>Hours</h5>')[1].strip('<br>').strip('</aside>')\n match = re.search(r'(closed|(\\d{1,2})\\S.\\s*-\\s*(\\d{1,2})\\S.)', day.lower())\n open('/tmp/test1.txt', 'w').write(str(day))\n (dow, f_hr, t_hr) = match.groups()\n day_short = dow[:2]\n\n f_hr = int(f_hr)\n t_hr = int(t_hr)\n\n hours = '{:02d}-{:02d}'.format(\n f_hr,\n t_hr,\n )\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day_short,\n 'to_day': day_short,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day_short,\n 'to_day': day_short,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day_short\n\n day_groups.append(this_day_group)\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n\n def address(self, address):\n if not address:\n return None\n\n addr_tags = {\n \"addr_full\": address[0].split(',')[0].strip(),\n \"city\": address[0].split(',')[1].strip(),\n \"state\": address[0].split(' ')[-2].strip(),\n \"postcode\": address[0].split(' ')[-1].strip(),\n }\n\n return addr_tags\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path not in \"http://labeerhop.com/breweries/1056/\":\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n properties = {\n 'website': response.xpath('//head/link[@rel=\"canonical\"]/@href').extract_first(),\n 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\"['']\"),\n 'opening_hours': re.sub(r'\\s+', ' ', response.css('#secondary').extract()[0].split('<h5>Hours</h5>')[1].replace('<br>','').replace('</aside>','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n # 'lon': float(data['geo']['longitude']), # not lon on page\n # 'lat': float(data['geo']['latitude']), # not lat on page\n }\n\n address = self.address(response.xpath('/html/body/div[1]/div[1]/aside/address/text()').extract())\n if address:\n properties.update(address)\n\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/labreweries.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\n\nfrom locations.items import GeojsonPointItem\n\n\nclass LaBreweriesSpider(scrapy.Spider):\n name = \"labreweries\"\n allowed_domains = [\"labeerhop.com\"]\n start_urls = (\n 'http://labeerhop.com/breweries-sitemap.xml',\n )\n\n def store_hours(self, store_hours):\n day_groups = []\n this_day_group = None\n for day in store_hours:\n day = day.replace(' :-', ' 12:00 -')\n day = day.split('<h5>Hours</h5>')[1].strip('<br>').strip('</aside>')\n match = re.search(r'(closed|(\\d{1,2})\\S.\\s*-\\s*(\\d{1,2})\\S.)', day.lower())\n open('/tmp/test1.txt', 'w').write(str(day))\n (dow, f_hr, t_hr) = match.groups()\n day_short = dow[:2]\n\n f_hr = int(f_hr)\n t_hr = int(t_hr)\n\n hours = '{:02d}-{:02d}'.format(\n f_hr,\n t_hr,\n )\n\n if not this_day_group:\n this_day_group = {\n 'from_day': day_short,\n 'to_day': day_short,\n 'hours': hours\n }\n elif this_day_group['hours'] != hours:\n day_groups.append(this_day_group)\n this_day_group = {\n 'from_day': day_short,\n 'to_day': day_short,\n 'hours': hours\n }\n elif this_day_group['hours'] == hours:\n this_day_group['to_day'] = day_short\n\n day_groups.append(this_day_group)\n\n opening_hours = \"\"\n if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):\n opening_hours = '24/7'\n else:\n for day_group in day_groups:\n if day_group['from_day'] == day_group['to_day']:\n opening_hours += '{from_day} {hours}; '.format(**day_group)\n elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':\n opening_hours += '{hours}; '.format(**day_group)\n else:\n opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)\n opening_hours = opening_hours[:-2]\n\n return opening_hours\n\n def address(self, address):\n if not address:\n return None\n\n addr_tags = {\n \"addr_full\": address[0].split(',')[0].strip(),\n \"city\": address[0].split(',')[1].strip(),\n \"state\": address[0].split(' ')[-2].strip(),\n \"postcode\": address[0].split(' ')[-1].strip(),\n }\n\n return addr_tags\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n if path not in \"http://labeerhop.com/breweries/1056/\":\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_store(self, response):\n\n properties = {\n 'website': response.xpath('//head/link[@rel=\"canonical\"]/@href').extract_first(),\n 'ref': str(response.xpath('/html/body/div[1]/div[1]/header/h1/text()').extract()).strip(\"['']\"),\n 'opening_hours': re.sub(r'\\s+', ' ', response.xpath('//*[@id=\"content\"]/div/div[2]/div[3]').extract()[0].split('<h5 class=\"mb-2\">Hours</h5>')[1].replace('<br>','').replace('</div>','').replace('\\t',' ').replace('\\n','').replace('\\r',' ')).strip(),\n # 'lon': float(data['geo']['longitude']), # not lon on page\n # 'lat': float(data['geo']['latitude']), # not lat on page\n }\n\n address = self.address(response.xpath('/html/body/div[1]/div[1]/aside/address/text()').extract())\n if address:\n properties.update(address)\n\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/labreweries.py"}]} | 1,609 | 299 |
gh_patches_debug_47927 | rasdani/github-patches | git_diff | uccser__cs-unplugged-885 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Minor adjustments to navbar and homepage
## Navbar
- [x] There should be more space between logo and 'Topics'.
- [x] The search bar can be ~20% smaller.
## Hompeage
- [x] Navbar should be transparent and fade in when user scrolls down.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `csunplugged/general/views.py`
Content:
```
1 """Views for the general application."""
2
3 from django.views.generic import TemplateView
4 from django.http import HttpResponse
5
6
7 class GeneralIndexView(TemplateView):
8 """View for the homepage that renders from a template."""
9
10 template_name = "general/index.html"
11
12
13 class GeneralAboutView(TemplateView):
14 """View for the about page that renders from a template."""
15
16 template_name = "general/about.html"
17
18
19 class GeneralContactView(TemplateView):
20 """View for the contact page that renders from a template."""
21
22 template_name = "general/contact.html"
23
24
25 class GeneralPeopleView(TemplateView):
26 """View for the people page that renders from a template."""
27
28 template_name = "general/people.html"
29
30
31 class GeneralPrinciplesView(TemplateView):
32 """View for the princples page that renders from a template."""
33
34 template_name = "general/principles.html"
35
36
37 class WhatIsCSView(TemplateView):
38 """View for the 'What is Computer Science?' page that renders from a template."""
39
40 template_name = "general/what-is-computer-science.html"
41
42
43 class ComputationalThinkingView(TemplateView):
44 """View for the Computational Thinking page that renders from a template."""
45
46 template_name = "general/computational-thinking.html"
47
48
49 class HowDoITeachCSUnpluggedView(TemplateView):
50 """View for the 'How do I teach CS Unplugged?' page that renders from a template."""
51
52 template_name = "general/how-do-i-teach-cs-unplugged.html"
53
54
55 def health_check(request):
56 """Return heath check response for Google App Engine.
57
58 Returns a 200 HTTP response for Google App Engine to detect the system
59 is running.
60 """
61 return HttpResponse(status=200)
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py
--- a/csunplugged/general/views.py
+++ b/csunplugged/general/views.py
@@ -9,6 +9,16 @@
template_name = "general/index.html"
+ def get_context_data(self, **kwargs):
+ """Provide the context data for the homepage.
+
+ Returns:
+ Dictionary of context data.
+ """
+ context = super(GeneralIndexView, self).get_context_data(**kwargs)
+ context["homepage"] = True
+ return context
+
class GeneralAboutView(TemplateView):
"""View for the about page that renders from a template."""
| {"golden_diff": "diff --git a/csunplugged/general/views.py b/csunplugged/general/views.py\n--- a/csunplugged/general/views.py\n+++ b/csunplugged/general/views.py\n@@ -9,6 +9,16 @@\n \n template_name = \"general/index.html\"\n \n+ def get_context_data(self, **kwargs):\n+ \"\"\"Provide the context data for the homepage.\n+\n+ Returns:\n+ Dictionary of context data.\n+ \"\"\"\n+ context = super(GeneralIndexView, self).get_context_data(**kwargs)\n+ context[\"homepage\"] = True\n+ return context\n+\n \n class GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n", "issue": "Minor adjustments to navbar and homepage\n## Navbar\r\n\r\n- [x] There should be more space between logo and 'Topics'.\r\n- [x] The search bar can be ~20% smaller.\r\n\r\n## Hompeage\r\n\r\n- [x] Navbar should be transparent and fade in when user scrolls down.\n", "before_files": [{"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = \"general/index.html\"\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = \"general/about.html\"\n\n\nclass GeneralContactView(TemplateView):\n \"\"\"View for the contact page that renders from a template.\"\"\"\n\n template_name = \"general/contact.html\"\n\n\nclass GeneralPeopleView(TemplateView):\n \"\"\"View for the people page that renders from a template.\"\"\"\n\n template_name = \"general/people.html\"\n\n\nclass GeneralPrinciplesView(TemplateView):\n \"\"\"View for the princples page that renders from a template.\"\"\"\n\n template_name = \"general/principles.html\"\n\n\nclass WhatIsCSView(TemplateView):\n \"\"\"View for the 'What is Computer Science?' page that renders from a template.\"\"\"\n\n template_name = \"general/what-is-computer-science.html\"\n\n\nclass ComputationalThinkingView(TemplateView):\n \"\"\"View for the Computational Thinking page that renders from a template.\"\"\"\n\n template_name = \"general/computational-thinking.html\"\n\n\nclass HowDoITeachCSUnpluggedView(TemplateView):\n \"\"\"View for the 'How do I teach CS Unplugged?' page that renders from a template.\"\"\"\n\n template_name = \"general/how-do-i-teach-cs-unplugged.html\"\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}], "after_files": [{"content": "\"\"\"Views for the general application.\"\"\"\n\nfrom django.views.generic import TemplateView\nfrom django.http import HttpResponse\n\n\nclass GeneralIndexView(TemplateView):\n \"\"\"View for the homepage that renders from a template.\"\"\"\n\n template_name = \"general/index.html\"\n\n def get_context_data(self, **kwargs):\n \"\"\"Provide the context data for the homepage.\n\n Returns:\n Dictionary of context data.\n \"\"\"\n context = super(GeneralIndexView, self).get_context_data(**kwargs)\n context[\"homepage\"] = True\n return context\n\n\nclass GeneralAboutView(TemplateView):\n \"\"\"View for the about page that renders from a template.\"\"\"\n\n template_name = \"general/about.html\"\n\n\nclass GeneralContactView(TemplateView):\n \"\"\"View for the contact page that renders from a template.\"\"\"\n\n template_name = \"general/contact.html\"\n\n\nclass GeneralPeopleView(TemplateView):\n \"\"\"View for the people page that renders from a template.\"\"\"\n\n template_name = \"general/people.html\"\n\n\nclass GeneralPrinciplesView(TemplateView):\n \"\"\"View for the princples page that renders from a template.\"\"\"\n\n template_name = \"general/principles.html\"\n\n\nclass WhatIsCSView(TemplateView):\n \"\"\"View for the 'What is Computer Science?' page that renders from a template.\"\"\"\n\n template_name = \"general/what-is-computer-science.html\"\n\n\nclass ComputationalThinkingView(TemplateView):\n \"\"\"View for the Computational Thinking page that renders from a template.\"\"\"\n\n template_name = \"general/computational-thinking.html\"\n\n\nclass HowDoITeachCSUnpluggedView(TemplateView):\n \"\"\"View for the 'How do I teach CS Unplugged?' page that renders from a template.\"\"\"\n\n template_name = \"general/how-do-i-teach-cs-unplugged.html\"\n\n\ndef health_check(request):\n \"\"\"Return heath check response for Google App Engine.\n\n Returns a 200 HTTP response for Google App Engine to detect the system\n is running.\n \"\"\"\n return HttpResponse(status=200)\n", "path": "csunplugged/general/views.py"}]} | 822 | 154 |
gh_patches_debug_40993 | rasdani/github-patches | git_diff | apluslms__a-plus-1062 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature request: send email when teacher adds news
Hi
It would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.
What do you think?
Thanks!
Feature request: send email when teacher adds news
Hi
It would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.
What do you think?
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `news/forms.py`
Content:
```
1 from django import forms
2
3 from .models import News
4
5
6 class NewsForm(forms.ModelForm):
7
8 class Meta:
9 model = News
10 fields = [
11 'audience',
12 'publish',
13 'pin',
14 'title',
15 'body',
16 ]
17
```
Path: `lib/email_messages.py`
Content:
```
1 import logging
2 import traceback
3 from django.conf import settings
4 from django.core.mail import send_mail
5 from django.urls import reverse
6 from .helpers import build_aplus_url
7
8
9 logger = logging.getLogger('aplus.lib.email_messages')
10
11
12 def email_course_instance(instance, subject, message, everyone=False) -> bool:
13 """
14 Sends an email to a course instance's technical support emails or teachers if technical support not set.
15 If everyone == True, sends emails to teachers anyway.
16 """
17 recipients = []
18 if instance.technical_error_emails:
19 recipients = instance.technical_error_emails.split(",")
20 if everyone or not recipients:
21 recipients = instance.teachers.exclude(user__email='').values_list("user__email", flat=True)
22
23 if not recipients:
24 raise ValueError("No recipients")
25
26 try:
27 return send_mail(subject, message, settings.SERVER_EMAIL, recipients, True) == 1
28 except:
29 logger.exception('Failed to send course instance emails.')
30 raise
31
32
33 def email_course_error(request, exercise, message, exception=True):
34 """
35 Sends error message to course instance's teachers or technical support emails if set.
36 """
37 instance = exercise.course_instance
38
39 error_trace = "-"
40 if exception:
41 error_trace = traceback.format_exc()
42
43 if request:
44 request_fields = repr(request)
45 else:
46 request_fields = "No request available"
47
48 subject = settings.EXERCISE_ERROR_SUBJECT.format(
49 course=instance.course.code,
50 exercise=str(exercise))
51 body = settings.EXERCISE_ERROR_DESCRIPTION.format(
52 message=message,
53 exercise_url=build_aplus_url(
54 exercise.get_absolute_url(), user_url=True),
55 course_edit_url=build_aplus_url(
56 instance.get_url('course-details'), user_url=True),
57 error_trace=error_trace,
58 request_fields=request_fields)
59
60 try:
61 email_course_instance(instance, subject, body)
62 except:
63 pass
64
```
Path: `news/views.py`
Content:
```
1 from django.core.exceptions import PermissionDenied
2 from django.http import Http404
3 from django.shortcuts import get_object_or_404
4
5 from authorization.permissions import ACCESS
6 from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin
7 from lib.viewbase import BaseFormView, BaseRedirectView
8 from .forms import NewsForm
9 from .models import News
10
11
12 class ListNewsView(CourseInstanceBaseView):
13 access_mode = ACCESS.TEACHER
14 template_name = "news/list.html"
15
16 def get_common_objects(self):
17 super().get_common_objects()
18 self.news = self.instance.news.all()
19 self.note("news")
20
21
22 class EditNewsView(CourseInstanceMixin, BaseFormView):
23 access_mode = ACCESS.TEACHER
24 template_name = "news/edit.html"
25 form_class = NewsForm
26 news_item_kw = "news_id"
27
28 def get_form_kwargs(self):
29 kwargs = super().get_form_kwargs()
30
31 news_id = self._get_kwarg(self.news_item_kw, default=None)
32 if news_id:
33 self.news_item = get_object_or_404(
34 News,
35 pk=news_id,
36 course_instance=self.instance
37 )
38 self.note("news_item")
39 else:
40 self.news_item = News(course_instance=self.instance)
41
42 kwargs["instance"] = self.news_item
43 return kwargs
44
45 def get_success_url(self):
46 return self.instance.get_url("news-list")
47
48 def form_valid(self, form):
49 form.save()
50 return super().form_valid(form)
51
52
53 class RemoveNewsView(CourseInstanceMixin, BaseRedirectView):
54 access_mode = ACCESS.TEACHER
55 news_item_kw = "news_id"
56
57 def get_resource_objects(self):
58 super().get_resource_objects()
59 self.news_item = get_object_or_404(
60 News,
61 id=self._get_kwarg(self.news_item_kw),
62 course_instance=self.instance,
63 )
64 self.note("news_item")
65
66 def post(self, request, *args, **kwargs):
67 self.news_item.delete()
68 return self.redirect(self.instance.get_url("news-list"))
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/email_messages.py b/lib/email_messages.py
--- a/lib/email_messages.py
+++ b/lib/email_messages.py
@@ -1,9 +1,10 @@
import logging
import traceback
from django.conf import settings
-from django.core.mail import send_mail
-from django.urls import reverse
-from .helpers import build_aplus_url
+from django.core.mail import send_mail, send_mass_mail
+
+from .helpers import Enum, build_aplus_url
+from course.models import CourseInstance
logger = logging.getLogger('aplus.lib.email_messages')
@@ -61,3 +62,30 @@
email_course_instance(instance, subject, body)
except:
pass
+
+
+def email_course_students(
+ instance: CourseInstance,
+ subject: str,
+ message: str,
+ audience: Enum = CourseInstance.ENROLLMENT_AUDIENCE.ALL_USERS,
+ ) -> int:
+ """
+ Sends an email to students on the course. Audience parameter controls whether the mail goes
+ to all (default), just internal, or just external students.
+ Returns number of emails sent, or -1 in case of error.
+ """
+ students = instance.students
+ if audience == CourseInstance.ENROLLMENT_AUDIENCE.INTERNAL_USERS:
+ students = students.filter(organization=settings.LOCAL_ORGANIZATION)
+ elif audience == CourseInstance.ENROLLMENT_AUDIENCE.EXTERNAL_USERS:
+ students = students.exclude(organization=settings.LOCAL_ORGANIZATION)
+
+ recipients = students.exclude(user__email='').values_list("user__email", flat=True)
+ emails = tuple(map(lambda x: (subject, message, settings.SERVER_EMAIL, [x]), recipients))
+
+ try:
+ return send_mass_mail(emails)
+ except:
+ logger.exception('Failed to send course instance emails.')
+ return -1
diff --git a/news/forms.py b/news/forms.py
--- a/news/forms.py
+++ b/news/forms.py
@@ -1,16 +1,25 @@
+from typing import Any
+
from django import forms
+from django.utils.translation import gettext_lazy as _
from .models import News
class NewsForm(forms.ModelForm):
+ email = forms.BooleanField(
+ required=False,
+ label=_("SEND_EMAIL_TO_STUDENTS"),
+ )
+
class Meta:
model = News
fields = [
'audience',
'publish',
'pin',
+ 'email',
'title',
'body',
]
diff --git a/news/views.py b/news/views.py
--- a/news/views.py
+++ b/news/views.py
@@ -1,10 +1,14 @@
+from django.conf import settings
+from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.shortcuts import get_object_or_404
+from django.utils.translation import gettext_lazy as _
from authorization.permissions import ACCESS
from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin
from lib.viewbase import BaseFormView, BaseRedirectView
+from lib.email_messages import email_course_students
from .forms import NewsForm
from .models import News
@@ -47,6 +51,15 @@
def form_valid(self, form):
form.save()
+ if form.cleaned_data['email']:
+ subject = f"[{settings.BRAND_NAME} course news] {self.instance.course.code}: {self.news_item.title}"
+ if email_course_students(
+ self.instance,
+ subject,
+ self.news_item.body,
+ self.news_item.audience,
+ ) < 0:
+ messages.error(self.request, _('FAILED_TO_SEND_EMAIL'))
return super().form_valid(form)
| {"golden_diff": "diff --git a/lib/email_messages.py b/lib/email_messages.py\n--- a/lib/email_messages.py\n+++ b/lib/email_messages.py\n@@ -1,9 +1,10 @@\n import logging\n import traceback\n from django.conf import settings\n-from django.core.mail import send_mail\n-from django.urls import reverse\n-from .helpers import build_aplus_url\n+from django.core.mail import send_mail, send_mass_mail\n+\n+from .helpers import Enum, build_aplus_url\n+from course.models import CourseInstance\n \n \n logger = logging.getLogger('aplus.lib.email_messages')\n@@ -61,3 +62,30 @@\n email_course_instance(instance, subject, body)\n except:\n pass\n+\n+\n+def email_course_students(\n+ instance: CourseInstance,\n+ subject: str,\n+ message: str,\n+ audience: Enum = CourseInstance.ENROLLMENT_AUDIENCE.ALL_USERS,\n+ ) -> int:\n+ \"\"\"\n+ Sends an email to students on the course. Audience parameter controls whether the mail goes\n+ to all (default), just internal, or just external students.\n+ Returns number of emails sent, or -1 in case of error.\n+ \"\"\"\n+ students = instance.students\n+ if audience == CourseInstance.ENROLLMENT_AUDIENCE.INTERNAL_USERS:\n+ students = students.filter(organization=settings.LOCAL_ORGANIZATION)\n+ elif audience == CourseInstance.ENROLLMENT_AUDIENCE.EXTERNAL_USERS:\n+ students = students.exclude(organization=settings.LOCAL_ORGANIZATION)\n+\n+ recipients = students.exclude(user__email='').values_list(\"user__email\", flat=True)\n+ emails = tuple(map(lambda x: (subject, message, settings.SERVER_EMAIL, [x]), recipients))\n+\n+ try:\n+ return send_mass_mail(emails)\n+ except:\n+ logger.exception('Failed to send course instance emails.')\n+ return -1\ndiff --git a/news/forms.py b/news/forms.py\n--- a/news/forms.py\n+++ b/news/forms.py\n@@ -1,16 +1,25 @@\n+from typing import Any\n+\n from django import forms\n+from django.utils.translation import gettext_lazy as _\n \n from .models import News\n \n \n class NewsForm(forms.ModelForm):\n \n+ email = forms.BooleanField(\n+ required=False,\n+ label=_(\"SEND_EMAIL_TO_STUDENTS\"),\n+ )\n+\n class Meta:\n model = News\n fields = [\n 'audience',\n 'publish',\n 'pin',\n+ 'email',\n 'title',\n 'body',\n ]\ndiff --git a/news/views.py b/news/views.py\n--- a/news/views.py\n+++ b/news/views.py\n@@ -1,10 +1,14 @@\n+from django.conf import settings\n+from django.contrib import messages\n from django.core.exceptions import PermissionDenied\n from django.http import Http404\n from django.shortcuts import get_object_or_404\n+from django.utils.translation import gettext_lazy as _\n \n from authorization.permissions import ACCESS\n from course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\n from lib.viewbase import BaseFormView, BaseRedirectView\n+from lib.email_messages import email_course_students\n from .forms import NewsForm\n from .models import News\n \n@@ -47,6 +51,15 @@\n \n def form_valid(self, form):\n form.save()\n+ if form.cleaned_data['email']:\n+ subject = f\"[{settings.BRAND_NAME} course news] {self.instance.course.code}: {self.news_item.title}\"\n+ if email_course_students(\n+ self.instance,\n+ subject,\n+ self.news_item.body,\n+ self.news_item.audience,\n+ ) < 0:\n+ messages.error(self.request, _('FAILED_TO_SEND_EMAIL'))\n return super().form_valid(form)\n", "issue": "Feature request: send email when teacher adds news\nHi\r\n\r\nIt would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.\r\n\r\nWhat do you think?\r\n\r\nThanks!\nFeature request: send email when teacher adds news\nHi\r\n\r\nIt would be very nice to be able to notify students via email when teacher adds news. This should be an option so the teacher could decide on case-by-case basis whether to send the email or not.\r\n\r\nWhat do you think?\r\n\r\nThanks!\n", "before_files": [{"content": "from django import forms\n\nfrom .models import News\n\n\nclass NewsForm(forms.ModelForm):\n\n class Meta:\n model = News\n fields = [\n 'audience',\n 'publish',\n 'pin',\n 'title',\n 'body',\n ]\n", "path": "news/forms.py"}, {"content": "import logging\nimport traceback\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.urls import reverse\nfrom .helpers import build_aplus_url\n\n\nlogger = logging.getLogger('aplus.lib.email_messages')\n\n\ndef email_course_instance(instance, subject, message, everyone=False) -> bool:\n \"\"\"\n Sends an email to a course instance's technical support emails or teachers if technical support not set.\n If everyone == True, sends emails to teachers anyway.\n \"\"\"\n recipients = []\n if instance.technical_error_emails:\n recipients = instance.technical_error_emails.split(\",\")\n if everyone or not recipients:\n recipients = instance.teachers.exclude(user__email='').values_list(\"user__email\", flat=True)\n\n if not recipients:\n raise ValueError(\"No recipients\")\n\n try:\n return send_mail(subject, message, settings.SERVER_EMAIL, recipients, True) == 1\n except:\n logger.exception('Failed to send course instance emails.')\n raise\n\n\ndef email_course_error(request, exercise, message, exception=True):\n \"\"\"\n Sends error message to course instance's teachers or technical support emails if set.\n \"\"\"\n instance = exercise.course_instance\n\n error_trace = \"-\"\n if exception:\n error_trace = traceback.format_exc()\n\n if request:\n request_fields = repr(request)\n else:\n request_fields = \"No request available\"\n\n subject = settings.EXERCISE_ERROR_SUBJECT.format(\n course=instance.course.code,\n exercise=str(exercise))\n body = settings.EXERCISE_ERROR_DESCRIPTION.format(\n message=message,\n exercise_url=build_aplus_url(\n exercise.get_absolute_url(), user_url=True),\n course_edit_url=build_aplus_url(\n instance.get_url('course-details'), user_url=True),\n error_trace=error_trace,\n request_fields=request_fields)\n\n try:\n email_course_instance(instance, subject, body)\n except:\n pass\n", "path": "lib/email_messages.py"}, {"content": "from django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\n\nfrom authorization.permissions import ACCESS\nfrom course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\nfrom lib.viewbase import BaseFormView, BaseRedirectView\nfrom .forms import NewsForm\nfrom .models import News\n\n\nclass ListNewsView(CourseInstanceBaseView):\n access_mode = ACCESS.TEACHER\n template_name = \"news/list.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.news = self.instance.news.all()\n self.note(\"news\")\n\n\nclass EditNewsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n template_name = \"news/edit.html\"\n form_class = NewsForm\n news_item_kw = \"news_id\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n\n news_id = self._get_kwarg(self.news_item_kw, default=None)\n if news_id:\n self.news_item = get_object_or_404(\n News,\n pk=news_id,\n course_instance=self.instance\n )\n self.note(\"news_item\")\n else:\n self.news_item = News(course_instance=self.instance)\n\n kwargs[\"instance\"] = self.news_item\n return kwargs\n\n def get_success_url(self):\n return self.instance.get_url(\"news-list\")\n\n def form_valid(self, form):\n form.save()\n return super().form_valid(form)\n\n\nclass RemoveNewsView(CourseInstanceMixin, BaseRedirectView):\n access_mode = ACCESS.TEACHER\n news_item_kw = \"news_id\"\n\n def get_resource_objects(self):\n super().get_resource_objects()\n self.news_item = get_object_or_404(\n News,\n id=self._get_kwarg(self.news_item_kw),\n course_instance=self.instance,\n )\n self.note(\"news_item\")\n\n def post(self, request, *args, **kwargs):\n self.news_item.delete()\n return self.redirect(self.instance.get_url(\"news-list\"))\n", "path": "news/views.py"}], "after_files": [{"content": "from typing import Any\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .models import News\n\n\nclass NewsForm(forms.ModelForm):\n\n email = forms.BooleanField(\n required=False,\n label=_(\"SEND_EMAIL_TO_STUDENTS\"),\n )\n\n class Meta:\n model = News\n fields = [\n 'audience',\n 'publish',\n 'pin',\n 'email',\n 'title',\n 'body',\n ]\n", "path": "news/forms.py"}, {"content": "import logging\nimport traceback\nfrom django.conf import settings\nfrom django.core.mail import send_mail, send_mass_mail\n\nfrom .helpers import Enum, build_aplus_url\nfrom course.models import CourseInstance\n\n\nlogger = logging.getLogger('aplus.lib.email_messages')\n\n\ndef email_course_instance(instance, subject, message, everyone=False) -> bool:\n \"\"\"\n Sends an email to a course instance's technical support emails or teachers if technical support not set.\n If everyone == True, sends emails to teachers anyway.\n \"\"\"\n recipients = []\n if instance.technical_error_emails:\n recipients = instance.technical_error_emails.split(\",\")\n if everyone or not recipients:\n recipients = instance.teachers.exclude(user__email='').values_list(\"user__email\", flat=True)\n\n if not recipients:\n raise ValueError(\"No recipients\")\n\n try:\n return send_mail(subject, message, settings.SERVER_EMAIL, recipients, True) == 1\n except:\n logger.exception('Failed to send course instance emails.')\n raise\n\n\ndef email_course_error(request, exercise, message, exception=True):\n \"\"\"\n Sends error message to course instance's teachers or technical support emails if set.\n \"\"\"\n instance = exercise.course_instance\n\n error_trace = \"-\"\n if exception:\n error_trace = traceback.format_exc()\n\n if request:\n request_fields = repr(request)\n else:\n request_fields = \"No request available\"\n\n subject = settings.EXERCISE_ERROR_SUBJECT.format(\n course=instance.course.code,\n exercise=str(exercise))\n body = settings.EXERCISE_ERROR_DESCRIPTION.format(\n message=message,\n exercise_url=build_aplus_url(\n exercise.get_absolute_url(), user_url=True),\n course_edit_url=build_aplus_url(\n instance.get_url('course-details'), user_url=True),\n error_trace=error_trace,\n request_fields=request_fields)\n\n try:\n email_course_instance(instance, subject, body)\n except:\n pass\n\n\ndef email_course_students(\n instance: CourseInstance,\n subject: str,\n message: str,\n audience: Enum = CourseInstance.ENROLLMENT_AUDIENCE.ALL_USERS,\n ) -> int:\n \"\"\"\n Sends an email to students on the course. Audience parameter controls whether the mail goes\n to all (default), just internal, or just external students.\n Returns number of emails sent, or -1 in case of error.\n \"\"\"\n students = instance.students\n if audience == CourseInstance.ENROLLMENT_AUDIENCE.INTERNAL_USERS:\n students = students.filter(organization=settings.LOCAL_ORGANIZATION)\n elif audience == CourseInstance.ENROLLMENT_AUDIENCE.EXTERNAL_USERS:\n students = students.exclude(organization=settings.LOCAL_ORGANIZATION)\n\n recipients = students.exclude(user__email='').values_list(\"user__email\", flat=True)\n emails = tuple(map(lambda x: (subject, message, settings.SERVER_EMAIL, [x]), recipients))\n\n try:\n return send_mass_mail(emails)\n except:\n logger.exception('Failed to send course instance emails.')\n return -1\n", "path": "lib/email_messages.py"}, {"content": "from django.conf import settings\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import gettext_lazy as _\n\nfrom authorization.permissions import ACCESS\nfrom course.viewbase import CourseInstanceBaseView, CourseInstanceMixin\nfrom lib.viewbase import BaseFormView, BaseRedirectView\nfrom lib.email_messages import email_course_students\nfrom .forms import NewsForm\nfrom .models import News\n\n\nclass ListNewsView(CourseInstanceBaseView):\n access_mode = ACCESS.TEACHER\n template_name = \"news/list.html\"\n\n def get_common_objects(self):\n super().get_common_objects()\n self.news = self.instance.news.all()\n self.note(\"news\")\n\n\nclass EditNewsView(CourseInstanceMixin, BaseFormView):\n access_mode = ACCESS.TEACHER\n template_name = \"news/edit.html\"\n form_class = NewsForm\n news_item_kw = \"news_id\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n\n news_id = self._get_kwarg(self.news_item_kw, default=None)\n if news_id:\n self.news_item = get_object_or_404(\n News,\n pk=news_id,\n course_instance=self.instance\n )\n self.note(\"news_item\")\n else:\n self.news_item = News(course_instance=self.instance)\n\n kwargs[\"instance\"] = self.news_item\n return kwargs\n\n def get_success_url(self):\n return self.instance.get_url(\"news-list\")\n\n def form_valid(self, form):\n form.save()\n if form.cleaned_data['email']:\n subject = f\"[{settings.BRAND_NAME} course news] {self.instance.course.code}: {self.news_item.title}\"\n if email_course_students(\n self.instance,\n subject,\n self.news_item.body,\n self.news_item.audience,\n ) < 0:\n messages.error(self.request, _('FAILED_TO_SEND_EMAIL'))\n return super().form_valid(form)\n\n\nclass RemoveNewsView(CourseInstanceMixin, BaseRedirectView):\n access_mode = ACCESS.TEACHER\n news_item_kw = \"news_id\"\n\n def get_resource_objects(self):\n super().get_resource_objects()\n self.news_item = get_object_or_404(\n News,\n id=self._get_kwarg(self.news_item_kw),\n course_instance=self.instance,\n )\n self.note(\"news_item\")\n\n def post(self, request, *args, **kwargs):\n self.news_item.delete()\n return self.redirect(self.instance.get_url(\"news-list\"))\n", "path": "news/views.py"}]} | 1,596 | 812 |
gh_patches_debug_358 | rasdani/github-patches | git_diff | spacetelescope__jwql-550 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cron jobs for monitors currently failing
Traceback (most recent call last):
File "/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/bias_monitor.py", line 58, in <module>
from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks
File "/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/dark_monitor.py", line 77, in <module>
from jwql.jwql_monitors import monitor_mast
File "/home/jwqladm/repositories/jwql/jwql/jwql_monitors/monitor_mast.py", line 25, in <module>
from bokeh.embed import components
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/__init__.py", line 81, in <module>
from .util import logconfig
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/util/logconfig.py", line 87, in <module>
level = settings.py_log_level()
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py", line 310, in __call__
return self._convert(os.environ[self._env_var])
File "/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py", line 236, in convert_logging
raise ValueError("Cannot convert {} to log level, valid values are: {}".format(value, ", ".join(_log_levels)))
ValueError: Cannot convert WARN to log level, valid values are: CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE, NONE
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.22.0'
6
7 AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']
13 REQUIRES = [
14 'asdf>=2.3.3',
15 'astropy>=3.2.1',
16 'astroquery>=0.3.9',
17 'authlib',
18 'bokeh>=1.0',
19 'codecov',
20 'django>=2.0',
21 'flake8',
22 'inflection',
23 'ipython',
24 'jinja2',
25 'jsonschema==2.6.0',
26 'jwedb>=0.0.3',
27 'matplotlib',
28 'numpy',
29 'numpydoc',
30 'pandas',
31 'psycopg2',
32 'pysiaf',
33 'pytest',
34 'pytest-cov',
35 'scipy',
36 'sphinx',
37 'sqlalchemy',
38 'stsci_rtd_theme',
39 'twine'
40 ]
41
42 setup(
43 name='jwql',
44 version=VERSION,
45 description=DESCRIPTION,
46 url='https://github.com/spacetelescope/jwql.git',
47 author=AUTHORS,
48 author_email='[email protected]',
49 license='BSD',
50 keywords=['astronomy', 'python'],
51 classifiers=['Programming Language :: Python'],
52 packages=find_packages(),
53 install_requires=REQUIRES,
54 dependency_links=DEPENDENCY_LINKS,
55 include_package_data=True,
56 include_dirs=[np.get_include()],
57 )
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@
'astropy>=3.2.1',
'astroquery>=0.3.9',
'authlib',
- 'bokeh>=1.0',
+ 'bokeh>=1.0,<1.4',
'codecov',
'django>=2.0',
'flake8',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n- 'bokeh>=1.0',\n+ 'bokeh>=1.0,<1.4',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n", "issue": "Cron jobs for monitors currently failing \nTraceback (most recent call last):\r\n File \"/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/bias_monitor.py\", line 58, in <module>\r\n from jwql.instrument_monitors.common_monitors.dark_monitor import mast_query_darks\r\n File \"/home/jwqladm/repositories/jwql/jwql/instrument_monitors/common_monitors/dark_monitor.py\", line 77, in <module>\r\n from jwql.jwql_monitors import monitor_mast\r\n File \"/home/jwqladm/repositories/jwql/jwql/jwql_monitors/monitor_mast.py\", line 25, in <module>\r\n from bokeh.embed import components\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/__init__.py\", line 81, in <module>\r\n from .util import logconfig\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/util/logconfig.py\", line 87, in <module>\r\n level = settings.py_log_level()\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py\", line 310, in __call__\r\n return self._convert(os.environ[self._env_var])\r\n File \"/grp/jwst/ins/jwql/envs/miniconda3/envs/jwql-3.6/lib/python3.6/site-packages/bokeh/settings.py\", line 236, in convert_logging\r\n raise ValueError(\"Cannot convert {} to log level, valid values are: {}\".format(value, \", \".join(_log_levels)))\r\nValueError: Cannot convert WARN to log level, valid values are: CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE, NONE\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.22.0'\n\nAUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb>=0.0.3',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.22.0'\n\nAUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0,<1.4',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb>=0.0.3',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 1,221 | 101 |
gh_patches_debug_1710 | rasdani/github-patches | git_diff | encode__httpx-407 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ASGIDispatch and WSGIDispatch should be importable from the top-level httpx package
From #396:
> ``ASGIDispatch`` and ``WSGIDispatch`` are documented as top-level but aren't exposed at the top level. This is definitely an issue, I'd recommend the route of making both available top-level.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `httpx/dispatch/__init__.py`
Content:
```
1 """
2 Dispatch classes handle the raw network connections and the implementation
3 details of making the HTTP request and receiving the response.
4 """
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/httpx/dispatch/__init__.py b/httpx/dispatch/__init__.py
--- a/httpx/dispatch/__init__.py
+++ b/httpx/dispatch/__init__.py
@@ -2,3 +2,7 @@
Dispatch classes handle the raw network connections and the implementation
details of making the HTTP request and receiving the response.
"""
+from .asgi import ASGIDispatch
+from .wsgi import WSGIDispatch
+
+__all__ = ["ASGIDispatch", "WSGIDispatch"]
| {"golden_diff": "diff --git a/httpx/dispatch/__init__.py b/httpx/dispatch/__init__.py\n--- a/httpx/dispatch/__init__.py\n+++ b/httpx/dispatch/__init__.py\n@@ -2,3 +2,7 @@\n Dispatch classes handle the raw network connections and the implementation\n details of making the HTTP request and receiving the response.\n \"\"\"\n+from .asgi import ASGIDispatch\n+from .wsgi import WSGIDispatch\n+\n+__all__ = [\"ASGIDispatch\", \"WSGIDispatch\"]\n", "issue": "ASGIDispatch and WSGIDispatch should be importable from the top-level httpx package\nFrom #396:\r\n\r\n> ``ASGIDispatch`` and ``WSGIDispatch`` are documented as top-level but aren't exposed at the top level. This is definitely an issue, I'd recommend the route of making both available top-level.\n", "before_files": [{"content": "\"\"\"\nDispatch classes handle the raw network connections and the implementation\ndetails of making the HTTP request and receiving the response.\n\"\"\"\n", "path": "httpx/dispatch/__init__.py"}], "after_files": [{"content": "\"\"\"\nDispatch classes handle the raw network connections and the implementation\ndetails of making the HTTP request and receiving the response.\n\"\"\"\nfrom .asgi import ASGIDispatch\nfrom .wsgi import WSGIDispatch\n\n__all__ = [\"ASGIDispatch\", \"WSGIDispatch\"]\n", "path": "httpx/dispatch/__init__.py"}]} | 368 | 121 |
gh_patches_debug_21028 | rasdani/github-patches | git_diff | techmatters__terraso-backend-141 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Landscape creation and update
<!--
Use a concise title that describes the request.
Bad: localization
Good: Translate site into Spanish
Bad: customize hostname
Good: Determine hostname at build time from environment
-->
## Description
- Default landscape group should be created when a new landscape is created
- Manager should be assigned at the creation of a landscape
- Only managers can update landscape data
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `terraso_backend/apps/core/models/landscapes.py`
Content:
```
1 import structlog
2 from django.db import models
3
4 from apps.core import permission_rules as perm_rules
5
6 from .commons import BaseModel, SlugModel
7 from .groups import Group
8 from .users import User
9
10 logger = structlog.get_logger(__name__)
11
12
13 class Landscape(SlugModel):
14 """
15 This model represents a Landscape on Terraso platform.
16
17 A Landscape is a socio-ecological system that consists of natural
18 and/or human-modified ecosystems. Defined by its stakeholds, a
19 Landscape usually has geographical boundaries. It may correspond to,
20 or be a combination of, natural boundaries, distinct land features,
21 socially defined areas such as indigenous territories, and/or
22 jurisdictional and administrative boundaries. The boundaries of a
23 Landscape can cross several countries.
24 """
25
26 name = models.CharField(max_length=128, unique=True)
27 description = models.TextField(max_length=512, blank=True, default="")
28 website = models.URLField(blank=True, default="")
29 location = models.CharField(max_length=128, blank=True, default="")
30 area_polygon = models.JSONField(blank=True, null=True)
31
32 created_by = models.ForeignKey(
33 User,
34 blank=True,
35 null=True,
36 on_delete=models.PROTECT,
37 related_name="created_landscapes",
38 )
39 groups = models.ManyToManyField(Group, through="LandscapeGroup")
40
41 field_to_slug = "name"
42
43 class Meta(SlugModel.Meta):
44 rules_permissions = {
45 "change": perm_rules.allowed_to_change_landscape,
46 "delete": perm_rules.allowed_to_delete_landscape,
47 }
48
49 def get_default_group(self):
50 """
51 A default Group in a Landscape is that Group where any
52 individual (associated or not with other Groups) is added when
53 associating directly with a Landscape.
54 """
55 try:
56 # associated_groups is the related_name defined on
57 # LandscapeGroup relationship with Landscape. It returns a
58 # queryset of LandscapeGroup
59 landscape_group = self.associated_groups.get(is_default_landscape_group=True)
60 except LandscapeGroup.DoesNotExist:
61 logger.error(
62 "Landscape has no default group, but it must have", extra={"landscape_id": self.pk}
63 )
64 return None
65
66 return landscape_group.group
67
68 def __str__(self):
69 return self.name
70
71
72 class LandscapeGroup(BaseModel):
73 """
74 This model represents the association between a Landscape and a Group on
75 Terraso platform.
76 """
77
78 landscape = models.ForeignKey(
79 Landscape, on_delete=models.CASCADE, related_name="associated_groups"
80 )
81 group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name="associated_landscapes")
82
83 is_default_landscape_group = models.BooleanField(blank=True, default=False)
84
85 class Meta:
86 rules_permissions = {
87 "add": perm_rules.allowed_to_add_landscape_group,
88 "delete": perm_rules.allowed_to_delete_landscape_group,
89 }
90 constraints = (
91 models.UniqueConstraint(
92 fields=("group", "landscape"),
93 condition=models.Q(deleted_at__isnull=True),
94 name="unique_active_landscape_group",
95 ),
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/terraso_backend/apps/core/models/landscapes.py b/terraso_backend/apps/core/models/landscapes.py
--- a/terraso_backend/apps/core/models/landscapes.py
+++ b/terraso_backend/apps/core/models/landscapes.py
@@ -1,5 +1,5 @@
import structlog
-from django.db import models
+from django.db import models, transaction
from apps.core import permission_rules as perm_rules
@@ -46,6 +46,24 @@
"delete": perm_rules.allowed_to_delete_landscape,
}
+ def save(self, *args, **kwargs):
+ with transaction.atomic():
+ creating = not Landscape.objects.filter(pk=self.pk).exists()
+
+ super().save(*args, **kwargs)
+
+ if creating and self.created_by:
+ group = Group(
+ name="Group {}".format(self.slug),
+ description="",
+ created_by=self.created_by,
+ )
+ group.save()
+ landscape_group = LandscapeGroup(
+ group=group, landscape=self, is_default_landscape_group=True
+ )
+ landscape_group.save()
+
def get_default_group(self):
"""
A default Group in a Landscape is that Group where any
| {"golden_diff": "diff --git a/terraso_backend/apps/core/models/landscapes.py b/terraso_backend/apps/core/models/landscapes.py\n--- a/terraso_backend/apps/core/models/landscapes.py\n+++ b/terraso_backend/apps/core/models/landscapes.py\n@@ -1,5 +1,5 @@\n import structlog\n-from django.db import models\n+from django.db import models, transaction\n \n from apps.core import permission_rules as perm_rules\n \n@@ -46,6 +46,24 @@\n \"delete\": perm_rules.allowed_to_delete_landscape,\n }\n \n+ def save(self, *args, **kwargs):\n+ with transaction.atomic():\n+ creating = not Landscape.objects.filter(pk=self.pk).exists()\n+\n+ super().save(*args, **kwargs)\n+\n+ if creating and self.created_by:\n+ group = Group(\n+ name=\"Group {}\".format(self.slug),\n+ description=\"\",\n+ created_by=self.created_by,\n+ )\n+ group.save()\n+ landscape_group = LandscapeGroup(\n+ group=group, landscape=self, is_default_landscape_group=True\n+ )\n+ landscape_group.save()\n+\n def get_default_group(self):\n \"\"\"\n A default Group in a Landscape is that Group where any\n", "issue": "Landscape creation and update\n<!--\r\nUse a concise title that describes the request.\r\nBad: localization\r\nGood: Translate site into Spanish\r\n\r\nBad: customize hostname\r\nGood: Determine hostname at build time from environment\r\n-->\r\n\r\n## Description\r\n- Default landscape group should be created when a new landscape is created\r\n- Manager should be assigned at the creation of a landscape\r\n- Only managers can update landscape data\n", "before_files": [{"content": "import structlog\nfrom django.db import models\n\nfrom apps.core import permission_rules as perm_rules\n\nfrom .commons import BaseModel, SlugModel\nfrom .groups import Group\nfrom .users import User\n\nlogger = structlog.get_logger(__name__)\n\n\nclass Landscape(SlugModel):\n \"\"\"\n This model represents a Landscape on Terraso platform.\n\n A Landscape is a socio-ecological system that consists of natural\n and/or human-modified ecosystems. Defined by its stakeholds, a\n Landscape usually has geographical boundaries. It may correspond to,\n or be a combination of, natural boundaries, distinct land features,\n socially defined areas such as indigenous territories, and/or\n jurisdictional and administrative boundaries. The boundaries of a\n Landscape can cross several countries.\n \"\"\"\n\n name = models.CharField(max_length=128, unique=True)\n description = models.TextField(max_length=512, blank=True, default=\"\")\n website = models.URLField(blank=True, default=\"\")\n location = models.CharField(max_length=128, blank=True, default=\"\")\n area_polygon = models.JSONField(blank=True, null=True)\n\n created_by = models.ForeignKey(\n User,\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n related_name=\"created_landscapes\",\n )\n groups = models.ManyToManyField(Group, through=\"LandscapeGroup\")\n\n field_to_slug = \"name\"\n\n class Meta(SlugModel.Meta):\n rules_permissions = {\n \"change\": perm_rules.allowed_to_change_landscape,\n \"delete\": perm_rules.allowed_to_delete_landscape,\n }\n\n def get_default_group(self):\n \"\"\"\n A default Group in a Landscape is that Group where any\n individual (associated or not with other Groups) is added when\n associating directly with a Landscape.\n \"\"\"\n try:\n # associated_groups is the related_name defined on\n # LandscapeGroup relationship with Landscape. It returns a\n # queryset of LandscapeGroup\n landscape_group = self.associated_groups.get(is_default_landscape_group=True)\n except LandscapeGroup.DoesNotExist:\n logger.error(\n \"Landscape has no default group, but it must have\", extra={\"landscape_id\": self.pk}\n )\n return None\n\n return landscape_group.group\n\n def __str__(self):\n return self.name\n\n\nclass LandscapeGroup(BaseModel):\n \"\"\"\n This model represents the association between a Landscape and a Group on\n Terraso platform.\n \"\"\"\n\n landscape = models.ForeignKey(\n Landscape, on_delete=models.CASCADE, related_name=\"associated_groups\"\n )\n group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name=\"associated_landscapes\")\n\n is_default_landscape_group = models.BooleanField(blank=True, default=False)\n\n class Meta:\n rules_permissions = {\n \"add\": perm_rules.allowed_to_add_landscape_group,\n \"delete\": perm_rules.allowed_to_delete_landscape_group,\n }\n constraints = (\n models.UniqueConstraint(\n fields=(\"group\", \"landscape\"),\n condition=models.Q(deleted_at__isnull=True),\n name=\"unique_active_landscape_group\",\n ),\n )\n", "path": "terraso_backend/apps/core/models/landscapes.py"}], "after_files": [{"content": "import structlog\nfrom django.db import models, transaction\n\nfrom apps.core import permission_rules as perm_rules\n\nfrom .commons import BaseModel, SlugModel\nfrom .groups import Group\nfrom .users import User\n\nlogger = structlog.get_logger(__name__)\n\n\nclass Landscape(SlugModel):\n \"\"\"\n This model represents a Landscape on Terraso platform.\n\n A Landscape is a socio-ecological system that consists of natural\n and/or human-modified ecosystems. Defined by its stakeholds, a\n Landscape usually has geographical boundaries. It may correspond to,\n or be a combination of, natural boundaries, distinct land features,\n socially defined areas such as indigenous territories, and/or\n jurisdictional and administrative boundaries. The boundaries of a\n Landscape can cross several countries.\n \"\"\"\n\n name = models.CharField(max_length=128, unique=True)\n description = models.TextField(max_length=512, blank=True, default=\"\")\n website = models.URLField(blank=True, default=\"\")\n location = models.CharField(max_length=128, blank=True, default=\"\")\n area_polygon = models.JSONField(blank=True, null=True)\n\n created_by = models.ForeignKey(\n User,\n blank=True,\n null=True,\n on_delete=models.PROTECT,\n related_name=\"created_landscapes\",\n )\n groups = models.ManyToManyField(Group, through=\"LandscapeGroup\")\n\n field_to_slug = \"name\"\n\n class Meta(SlugModel.Meta):\n rules_permissions = {\n \"change\": perm_rules.allowed_to_change_landscape,\n \"delete\": perm_rules.allowed_to_delete_landscape,\n }\n\n def save(self, *args, **kwargs):\n with transaction.atomic():\n creating = not Landscape.objects.filter(pk=self.pk).exists()\n\n super().save(*args, **kwargs)\n\n if creating and self.created_by:\n group = Group(\n name=\"Group {}\".format(self.slug),\n description=\"\",\n created_by=self.created_by,\n )\n group.save()\n landscape_group = LandscapeGroup(\n group=group, landscape=self, is_default_landscape_group=True\n )\n landscape_group.save()\n\n def get_default_group(self):\n \"\"\"\n A default Group in a Landscape is that Group where any\n individual (associated or not with other Groups) is added when\n associating directly with a Landscape.\n \"\"\"\n try:\n # associated_groups is the related_name defined on\n # LandscapeGroup relationship with Landscape. It returns a\n # queryset of LandscapeGroup\n landscape_group = self.associated_groups.get(is_default_landscape_group=True)\n except LandscapeGroup.DoesNotExist:\n logger.error(\n \"Landscape has no default group, but it must have\", extra={\"landscape_id\": self.pk}\n )\n return None\n\n return landscape_group.group\n\n def __str__(self):\n return self.name\n\n\nclass LandscapeGroup(BaseModel):\n \"\"\"\n This model represents the association between a Landscape and a Group on\n Terraso platform.\n \"\"\"\n\n landscape = models.ForeignKey(\n Landscape, on_delete=models.CASCADE, related_name=\"associated_groups\"\n )\n group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name=\"associated_landscapes\")\n\n is_default_landscape_group = models.BooleanField(blank=True, default=False)\n\n class Meta:\n rules_permissions = {\n \"add\": perm_rules.allowed_to_add_landscape_group,\n \"delete\": perm_rules.allowed_to_delete_landscape_group,\n }\n constraints = (\n models.UniqueConstraint(\n fields=(\"group\", \"landscape\"),\n condition=models.Q(deleted_at__isnull=True),\n name=\"unique_active_landscape_group\",\n ),\n )\n", "path": "terraso_backend/apps/core/models/landscapes.py"}]} | 1,195 | 278 |
gh_patches_debug_20437 | rasdani/github-patches | git_diff | graspologic-org__graspologic-488 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove * imports in subgraph, utils
the` __init__.py` files in `graspy.subgraph` and `graspy.utils` shouldn't have import *.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `graspy/utils/__init__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation and contributors.
2 # Licensed under the MIT License.
3
4 from .utils import *
5 from .ptr import *
6
```
Path: `graspy/subgraph/__init__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation and contributors.
2 # Licensed under the MIT License.
3
4 from .sg import *
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/graspy/subgraph/__init__.py b/graspy/subgraph/__init__.py
--- a/graspy/subgraph/__init__.py
+++ b/graspy/subgraph/__init__.py
@@ -1,4 +1,7 @@
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
-from .sg import *
+
+from .sg import SignalSubgraph
+
+__all__ = [SignalSubgraph]
diff --git a/graspy/utils/__init__.py b/graspy/utils/__init__.py
--- a/graspy/utils/__init__.py
+++ b/graspy/utils/__init__.py
@@ -1,5 +1,42 @@
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
-from .utils import *
-from .ptr import *
+from .utils import (
+ import_graph,
+ import_edgelist,
+ is_symmetric,
+ is_loopless,
+ is_unweighted,
+ is_almost_symmetric,
+ symmetrize,
+ remove_loops,
+ to_laplace,
+ is_fully_connected,
+ get_lcc,
+ get_multigraph_union_lcc,
+ get_multigraph_intersect_lcc,
+ augment_diagonal,
+ binarize,
+ cartprod,
+)
+from .ptr import pass_to_ranks
+
+__all__ = [
+ import_graph,
+ import_edgelist,
+ is_symmetric,
+ is_loopless,
+ is_unweighted,
+ is_almost_symmetric,
+ symmetrize,
+ remove_loops,
+ to_laplace,
+ is_fully_connected,
+ get_lcc,
+ get_multigraph_union_lcc,
+ get_multigraph_intersect_lcc,
+ augment_diagonal,
+ binarize,
+ cartprod,
+ pass_to_ranks,
+]
| {"golden_diff": "diff --git a/graspy/subgraph/__init__.py b/graspy/subgraph/__init__.py\n--- a/graspy/subgraph/__init__.py\n+++ b/graspy/subgraph/__init__.py\n@@ -1,4 +1,7 @@\n # Copyright (c) Microsoft Corporation and contributors.\n # Licensed under the MIT License.\n \n-from .sg import *\n+\n+from .sg import SignalSubgraph\n+\n+__all__ = [SignalSubgraph]\ndiff --git a/graspy/utils/__init__.py b/graspy/utils/__init__.py\n--- a/graspy/utils/__init__.py\n+++ b/graspy/utils/__init__.py\n@@ -1,5 +1,42 @@\n # Copyright (c) Microsoft Corporation and contributors.\n # Licensed under the MIT License.\n \n-from .utils import *\n-from .ptr import *\n+from .utils import (\n+ import_graph,\n+ import_edgelist,\n+ is_symmetric,\n+ is_loopless,\n+ is_unweighted,\n+ is_almost_symmetric,\n+ symmetrize,\n+ remove_loops,\n+ to_laplace,\n+ is_fully_connected,\n+ get_lcc,\n+ get_multigraph_union_lcc,\n+ get_multigraph_intersect_lcc,\n+ augment_diagonal,\n+ binarize,\n+ cartprod,\n+)\n+from .ptr import pass_to_ranks\n+\n+__all__ = [\n+ import_graph,\n+ import_edgelist,\n+ is_symmetric,\n+ is_loopless,\n+ is_unweighted,\n+ is_almost_symmetric,\n+ symmetrize,\n+ remove_loops,\n+ to_laplace,\n+ is_fully_connected,\n+ get_lcc,\n+ get_multigraph_union_lcc,\n+ get_multigraph_intersect_lcc,\n+ augment_diagonal,\n+ binarize,\n+ cartprod,\n+ pass_to_ranks,\n+]\n", "issue": "Remove * imports in subgraph, utils\nthe` __init__.py` files in `graspy.subgraph` and `graspy.utils` shouldn't have import *.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom .utils import *\nfrom .ptr import *\n", "path": "graspy/utils/__init__.py"}, {"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom .sg import *\n", "path": "graspy/subgraph/__init__.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\nfrom .utils import (\n import_graph,\n import_edgelist,\n is_symmetric,\n is_loopless,\n is_unweighted,\n is_almost_symmetric,\n symmetrize,\n remove_loops,\n to_laplace,\n is_fully_connected,\n get_lcc,\n get_multigraph_union_lcc,\n get_multigraph_intersect_lcc,\n augment_diagonal,\n binarize,\n cartprod,\n)\nfrom .ptr import pass_to_ranks\n\n__all__ = [\n import_graph,\n import_edgelist,\n is_symmetric,\n is_loopless,\n is_unweighted,\n is_almost_symmetric,\n symmetrize,\n remove_loops,\n to_laplace,\n is_fully_connected,\n get_lcc,\n get_multigraph_union_lcc,\n get_multigraph_intersect_lcc,\n augment_diagonal,\n binarize,\n cartprod,\n pass_to_ranks,\n]\n", "path": "graspy/utils/__init__.py"}, {"content": "# Copyright (c) Microsoft Corporation and contributors.\n# Licensed under the MIT License.\n\n\nfrom .sg import SignalSubgraph\n\n__all__ = [SignalSubgraph]\n", "path": "graspy/subgraph/__init__.py"}]} | 382 | 429 |
gh_patches_debug_1928 | rasdani/github-patches | git_diff | goauthentik__authentik-3299 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Get username from mailcow source
**Is your feature request related to a problem? Please describe.**
I like to get a username from mailcow. With username the enrollment for new users is more simple.
**Describe the solution you'd like**
Set username to full_name provided by mailcow oauths source.
**Additional context**
For other sources the username is also set redundant to another attribute if there is no special source attribute:
azure_ad.py:
```
"username": info.get("displayName"),
"name": info.get("displayName"),
```
discord.py:
```
"username": info.get("username"),
"name": info.get("username"),
```
facebook.py:
```
"username": info.get("name"),
"name": info.get("name"),
```
reddit.py
```
"username": info.get("name"),
"name": info.get("name"),
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/oauth/types/mailcow.py`
Content:
```
1 """Mailcow OAuth Views"""
2 from typing import Any, Optional
3
4 from requests.exceptions import RequestException
5 from structlog.stdlib import get_logger
6
7 from authentik.sources.oauth.clients.oauth2 import OAuth2Client
8 from authentik.sources.oauth.types.manager import MANAGER, SourceType
9 from authentik.sources.oauth.views.callback import OAuthCallback
10 from authentik.sources.oauth.views.redirect import OAuthRedirect
11
12 LOGGER = get_logger()
13
14
15 class MailcowOAuthRedirect(OAuthRedirect):
16 """Mailcow OAuth2 Redirect"""
17
18 def get_additional_parameters(self, source): # pragma: no cover
19 return {
20 "scope": ["profile"],
21 }
22
23
24 class MailcowOAuth2Client(OAuth2Client):
25 """MailcowOAuth2Client, for some reason, mailcow does not like the default headers"""
26
27 def get_profile_info(self, token: dict[str, str]) -> Optional[dict[str, Any]]:
28 "Fetch user profile information."
29 profile_url = self.source.type.profile_url or ""
30 if self.source.type.urls_customizable and self.source.profile_url:
31 profile_url = self.source.profile_url
32 try:
33 response = self.session.request(
34 "get",
35 f"{profile_url}?access_token={token['access_token']}",
36 )
37 response.raise_for_status()
38 except RequestException as exc:
39 LOGGER.warning("Unable to fetch user profile", exc=exc, body=response.text)
40 return None
41 else:
42 return response.json()
43
44
45 class MailcowOAuth2Callback(OAuthCallback):
46 """Mailcow OAuth2 Callback"""
47
48 client_class = MailcowOAuth2Client
49
50 def get_user_enroll_context(
51 self,
52 info: dict[str, Any],
53 ) -> dict[str, Any]:
54 return {
55 "email": info.get("email"),
56 "name": info.get("full_name"),
57 }
58
59
60 @MANAGER.type()
61 class MailcowType(SourceType):
62 """Mailcow Type definition"""
63
64 callback_view = MailcowOAuth2Callback
65 redirect_view = MailcowOAuthRedirect
66 name = "Mailcow"
67 slug = "mailcow"
68
69 urls_customizable = True
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/sources/oauth/types/mailcow.py b/authentik/sources/oauth/types/mailcow.py
--- a/authentik/sources/oauth/types/mailcow.py
+++ b/authentik/sources/oauth/types/mailcow.py
@@ -52,6 +52,7 @@
info: dict[str, Any],
) -> dict[str, Any]:
return {
+ "username": info.get("full_name"),
"email": info.get("email"),
"name": info.get("full_name"),
}
| {"golden_diff": "diff --git a/authentik/sources/oauth/types/mailcow.py b/authentik/sources/oauth/types/mailcow.py\n--- a/authentik/sources/oauth/types/mailcow.py\n+++ b/authentik/sources/oauth/types/mailcow.py\n@@ -52,6 +52,7 @@\n info: dict[str, Any],\n ) -> dict[str, Any]:\n return {\n+ \"username\": info.get(\"full_name\"),\n \"email\": info.get(\"email\"),\n \"name\": info.get(\"full_name\"),\n }\n", "issue": "Get username from mailcow source\n**Is your feature request related to a problem? Please describe.**\r\nI like to get a username from mailcow. With username the enrollment for new users is more simple.\r\n\r\n**Describe the solution you'd like**\r\nSet username to full_name provided by mailcow oauths source.\r\n\r\n**Additional context**\r\nFor other sources the username is also set redundant to another attribute if there is no special source attribute:\r\nazure_ad.py:\r\n```\r\n \"username\": info.get(\"displayName\"),\r\n \"name\": info.get(\"displayName\"),\r\n```\r\n\r\ndiscord.py:\r\n```\r\n \"username\": info.get(\"username\"),\r\n \"name\": info.get(\"username\"),\r\n```\r\n\r\nfacebook.py:\r\n```\r\n \"username\": info.get(\"name\"),\r\n \"name\": info.get(\"name\"),\r\n```\r\n\r\nreddit.py\r\n```\r\n \"username\": info.get(\"name\"),\r\n \"name\": info.get(\"name\"),\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Mailcow OAuth Views\"\"\"\nfrom typing import Any, Optional\n\nfrom requests.exceptions import RequestException\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.oauth2 import OAuth2Client\nfrom authentik.sources.oauth.types.manager import MANAGER, SourceType\nfrom authentik.sources.oauth.views.callback import OAuthCallback\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\nLOGGER = get_logger()\n\n\nclass MailcowOAuthRedirect(OAuthRedirect):\n \"\"\"Mailcow OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source): # pragma: no cover\n return {\n \"scope\": [\"profile\"],\n }\n\n\nclass MailcowOAuth2Client(OAuth2Client):\n \"\"\"MailcowOAuth2Client, for some reason, mailcow does not like the default headers\"\"\"\n\n def get_profile_info(self, token: dict[str, str]) -> Optional[dict[str, Any]]:\n \"Fetch user profile information.\"\n profile_url = self.source.type.profile_url or \"\"\n if self.source.type.urls_customizable and self.source.profile_url:\n profile_url = self.source.profile_url\n try:\n response = self.session.request(\n \"get\",\n f\"{profile_url}?access_token={token['access_token']}\",\n )\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\"Unable to fetch user profile\", exc=exc, body=response.text)\n return None\n else:\n return response.json()\n\n\nclass MailcowOAuth2Callback(OAuthCallback):\n \"\"\"Mailcow OAuth2 Callback\"\"\"\n\n client_class = MailcowOAuth2Client\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n return {\n \"email\": info.get(\"email\"),\n \"name\": info.get(\"full_name\"),\n }\n\n\[email protected]()\nclass MailcowType(SourceType):\n \"\"\"Mailcow Type definition\"\"\"\n\n callback_view = MailcowOAuth2Callback\n redirect_view = MailcowOAuthRedirect\n name = \"Mailcow\"\n slug = \"mailcow\"\n\n urls_customizable = True\n", "path": "authentik/sources/oauth/types/mailcow.py"}], "after_files": [{"content": "\"\"\"Mailcow OAuth Views\"\"\"\nfrom typing import Any, Optional\n\nfrom requests.exceptions import RequestException\nfrom structlog.stdlib import get_logger\n\nfrom authentik.sources.oauth.clients.oauth2 import OAuth2Client\nfrom authentik.sources.oauth.types.manager import MANAGER, SourceType\nfrom authentik.sources.oauth.views.callback import OAuthCallback\nfrom authentik.sources.oauth.views.redirect import OAuthRedirect\n\nLOGGER = get_logger()\n\n\nclass MailcowOAuthRedirect(OAuthRedirect):\n \"\"\"Mailcow OAuth2 Redirect\"\"\"\n\n def get_additional_parameters(self, source): # pragma: no cover\n return {\n \"scope\": [\"profile\"],\n }\n\n\nclass MailcowOAuth2Client(OAuth2Client):\n \"\"\"MailcowOAuth2Client, for some reason, mailcow does not like the default headers\"\"\"\n\n def get_profile_info(self, token: dict[str, str]) -> Optional[dict[str, Any]]:\n \"Fetch user profile information.\"\n profile_url = self.source.type.profile_url or \"\"\n if self.source.type.urls_customizable and self.source.profile_url:\n profile_url = self.source.profile_url\n try:\n response = self.session.request(\n \"get\",\n f\"{profile_url}?access_token={token['access_token']}\",\n )\n response.raise_for_status()\n except RequestException as exc:\n LOGGER.warning(\"Unable to fetch user profile\", exc=exc, body=response.text)\n return None\n else:\n return response.json()\n\n\nclass MailcowOAuth2Callback(OAuthCallback):\n \"\"\"Mailcow OAuth2 Callback\"\"\"\n\n client_class = MailcowOAuth2Client\n\n def get_user_enroll_context(\n self,\n info: dict[str, Any],\n ) -> dict[str, Any]:\n return {\n \"username\": info.get(\"full_name\"),\n \"email\": info.get(\"email\"),\n \"name\": info.get(\"full_name\"),\n }\n\n\[email protected]()\nclass MailcowType(SourceType):\n \"\"\"Mailcow Type definition\"\"\"\n\n callback_view = MailcowOAuth2Callback\n redirect_view = MailcowOAuthRedirect\n name = \"Mailcow\"\n slug = \"mailcow\"\n\n urls_customizable = True\n", "path": "authentik/sources/oauth/types/mailcow.py"}]} | 1,039 | 111 |
gh_patches_debug_3662 | rasdani/github-patches | git_diff | scikit-hep__awkward-2169 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
run cpp tests in CI
### Version of Awkward Array
2.0.6
### Description and code to reproduce
@agoose77 and @jpivarski - I think, we need to have at least one node to run the cpp tests.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dev/copy-cpp-headers.py`
Content:
```
1 """Copy the header-only cpp headers into the various package directories that they are required"""
2 import pathlib
3 import shutil
4
5 root_path = pathlib.Path(__file__).absolute().parents[1]
6 source_path = root_path / "header-only"
7 dest_paths = (
8 root_path / "awkward-cpp" / "header-only",
9 root_path / "src" / "awkward" / "_connect" / "header-only",
10 )
11
12 if __name__ == "__main__":
13 for path in dest_paths:
14 if path.exists():
15 shutil.rmtree(path)
16 shutil.copytree(source_path, path)
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dev/copy-cpp-headers.py b/dev/copy-cpp-headers.py
--- a/dev/copy-cpp-headers.py
+++ b/dev/copy-cpp-headers.py
@@ -3,7 +3,7 @@
import shutil
root_path = pathlib.Path(__file__).absolute().parents[1]
-source_path = root_path / "header-only"
+source_path = root_path / "header-only" / "include"
dest_paths = (
root_path / "awkward-cpp" / "header-only",
root_path / "src" / "awkward" / "_connect" / "header-only",
| {"golden_diff": "diff --git a/dev/copy-cpp-headers.py b/dev/copy-cpp-headers.py\n--- a/dev/copy-cpp-headers.py\n+++ b/dev/copy-cpp-headers.py\n@@ -3,7 +3,7 @@\n import shutil\n \n root_path = pathlib.Path(__file__).absolute().parents[1]\n-source_path = root_path / \"header-only\"\n+source_path = root_path / \"header-only\" / \"include\"\n dest_paths = (\n root_path / \"awkward-cpp\" / \"header-only\",\n root_path / \"src\" / \"awkward\" / \"_connect\" / \"header-only\",\n", "issue": "run cpp tests in CI \n### Version of Awkward Array\n\n2.0.6\n\n### Description and code to reproduce\n\n@agoose77 and @jpivarski - I think, we need to have at least one node to run the cpp tests.\n", "before_files": [{"content": "\"\"\"Copy the header-only cpp headers into the various package directories that they are required\"\"\"\nimport pathlib\nimport shutil\n\nroot_path = pathlib.Path(__file__).absolute().parents[1]\nsource_path = root_path / \"header-only\"\ndest_paths = (\n root_path / \"awkward-cpp\" / \"header-only\",\n root_path / \"src\" / \"awkward\" / \"_connect\" / \"header-only\",\n)\n\nif __name__ == \"__main__\":\n for path in dest_paths:\n if path.exists():\n shutil.rmtree(path)\n shutil.copytree(source_path, path)\n", "path": "dev/copy-cpp-headers.py"}], "after_files": [{"content": "\"\"\"Copy the header-only cpp headers into the various package directories that they are required\"\"\"\nimport pathlib\nimport shutil\n\nroot_path = pathlib.Path(__file__).absolute().parents[1]\nsource_path = root_path / \"header-only\" / \"include\"\ndest_paths = (\n root_path / \"awkward-cpp\" / \"header-only\",\n root_path / \"src\" / \"awkward\" / \"_connect\" / \"header-only\",\n)\n\nif __name__ == \"__main__\":\n for path in dest_paths:\n if path.exists():\n shutil.rmtree(path)\n shutil.copytree(source_path, path)\n", "path": "dev/copy-cpp-headers.py"}]} | 467 | 136 |
gh_patches_debug_42985 | rasdani/github-patches | git_diff | pytorch__vision-914 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
STL-10 Testing Protocol
Class STL10 does not support recommended testing protocol.
See STL-10 official page: https://cs.stanford.edu/~acoates/stl10/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/stl10.py`
Content:
```
1 from __future__ import print_function
2 from PIL import Image
3 import os
4 import os.path
5 import numpy as np
6 from .cifar import CIFAR10
7
8
9 class STL10(CIFAR10):
10 """`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset.
11
12 Args:
13 root (string): Root directory of dataset where directory
14 ``stl10_binary`` exists.
15 split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.
16 Accordingly dataset is selected.
17 transform (callable, optional): A function/transform that takes in an PIL image
18 and returns a transformed version. E.g, ``transforms.RandomCrop``
19 target_transform (callable, optional): A function/transform that takes in the
20 target and transforms it.
21 download (bool, optional): If true, downloads the dataset from the internet and
22 puts it in root directory. If dataset is already downloaded, it is not
23 downloaded again.
24
25 """
26 base_folder = 'stl10_binary'
27 url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz"
28 filename = "stl10_binary.tar.gz"
29 tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'
30 class_names_file = 'class_names.txt'
31 train_list = [
32 ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],
33 ['train_y.bin', '5a34089d4802c674881badbb80307741'],
34 ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']
35 ]
36
37 test_list = [
38 ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'],
39 ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']
40 ]
41 splits = ('train', 'train+unlabeled', 'unlabeled', 'test')
42
43 def __init__(self, root, split='train',
44 transform=None, target_transform=None, download=False):
45 if split not in self.splits:
46 raise ValueError('Split "{}" not found. Valid splits are: {}'.format(
47 split, ', '.join(self.splits),
48 ))
49 self.root = os.path.expanduser(root)
50 self.transform = transform
51 self.target_transform = target_transform
52 self.split = split # train/test/unlabeled set
53
54 if download:
55 self.download()
56
57 if not self._check_integrity():
58 raise RuntimeError(
59 'Dataset not found or corrupted. '
60 'You can use download=True to download it')
61
62 # now load the picked numpy arrays
63 if self.split == 'train':
64 self.data, self.labels = self.__loadfile(
65 self.train_list[0][0], self.train_list[1][0])
66 elif self.split == 'train+unlabeled':
67 self.data, self.labels = self.__loadfile(
68 self.train_list[0][0], self.train_list[1][0])
69 unlabeled_data, _ = self.__loadfile(self.train_list[2][0])
70 self.data = np.concatenate((self.data, unlabeled_data))
71 self.labels = np.concatenate(
72 (self.labels, np.asarray([-1] * unlabeled_data.shape[0])))
73
74 elif self.split == 'unlabeled':
75 self.data, _ = self.__loadfile(self.train_list[2][0])
76 self.labels = np.asarray([-1] * self.data.shape[0])
77 else: # self.split == 'test':
78 self.data, self.labels = self.__loadfile(
79 self.test_list[0][0], self.test_list[1][0])
80
81 class_file = os.path.join(
82 self.root, self.base_folder, self.class_names_file)
83 if os.path.isfile(class_file):
84 with open(class_file) as f:
85 self.classes = f.read().splitlines()
86
87 def __getitem__(self, index):
88 """
89 Args:
90 index (int): Index
91
92 Returns:
93 tuple: (image, target) where target is index of the target class.
94 """
95 if self.labels is not None:
96 img, target = self.data[index], int(self.labels[index])
97 else:
98 img, target = self.data[index], None
99
100 # doing this so that it is consistent with all other datasets
101 # to return a PIL Image
102 img = Image.fromarray(np.transpose(img, (1, 2, 0)))
103
104 if self.transform is not None:
105 img = self.transform(img)
106
107 if self.target_transform is not None:
108 target = self.target_transform(target)
109
110 return img, target
111
112 def __len__(self):
113 return self.data.shape[0]
114
115 def __loadfile(self, data_file, labels_file=None):
116 labels = None
117 if labels_file:
118 path_to_labels = os.path.join(
119 self.root, self.base_folder, labels_file)
120 with open(path_to_labels, 'rb') as f:
121 labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based
122
123 path_to_data = os.path.join(self.root, self.base_folder, data_file)
124 with open(path_to_data, 'rb') as f:
125 # read whole file in uint8 chunks
126 everything = np.fromfile(f, dtype=np.uint8)
127 images = np.reshape(everything, (-1, 3, 96, 96))
128 images = np.transpose(images, (0, 1, 3, 2))
129
130 return images, labels
131
132 def extra_repr(self):
133 return "Split: {split}".format(**self.__dict__)
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchvision/datasets/stl10.py b/torchvision/datasets/stl10.py
--- a/torchvision/datasets/stl10.py
+++ b/torchvision/datasets/stl10.py
@@ -14,6 +14,9 @@
``stl10_binary`` exists.
split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.
Accordingly dataset is selected.
+ folds (int, optional): One of {0-9} or None.
+ For training, loads one of the 10 pre-defined folds of 1k samples for the
+ standard evaluation procedure. If no value is passed, loads the 5k samples.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
@@ -28,6 +31,7 @@
filename = "stl10_binary.tar.gz"
tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'
class_names_file = 'class_names.txt'
+ folds_list_file = 'fold_indices.txt'
train_list = [
['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],
['train_y.bin', '5a34089d4802c674881badbb80307741'],
@@ -40,7 +44,7 @@
]
splits = ('train', 'train+unlabeled', 'unlabeled', 'test')
- def __init__(self, root, split='train',
+ def __init__(self, root, split='train', folds=None,
transform=None, target_transform=None, download=False):
if split not in self.splits:
raise ValueError('Split "{}" not found. Valid splits are: {}'.format(
@@ -50,6 +54,7 @@
self.transform = transform
self.target_transform = target_transform
self.split = split # train/test/unlabeled set
+ self.folds = folds # one of the 10 pre-defined folds or the full dataset
if download:
self.download()
@@ -63,9 +68,12 @@
if self.split == 'train':
self.data, self.labels = self.__loadfile(
self.train_list[0][0], self.train_list[1][0])
+ self.__load_folds(folds)
+
elif self.split == 'train+unlabeled':
self.data, self.labels = self.__loadfile(
self.train_list[0][0], self.train_list[1][0])
+ self.__load_folds(folds)
unlabeled_data, _ = self.__loadfile(self.train_list[2][0])
self.data = np.concatenate((self.data, unlabeled_data))
self.labels = np.concatenate(
@@ -131,3 +139,16 @@
def extra_repr(self):
return "Split: {split}".format(**self.__dict__)
+
+ def __load_folds(self, folds):
+ # loads one of the folds if specified
+ if isinstance(folds, int):
+ if folds >= 0 and folds < 10:
+ path_to_folds = os.path.join(
+ self.root, self.base_folder, self.folds_list_file)
+ with open(path_to_folds, 'r') as f:
+ str_idx = f.read().splitlines()[folds]
+ list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ')
+ self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx]
+ else:
+ raise ValueError('Folds "{}" not found. Valid splits are: 0-9.'.format(folds))
| {"golden_diff": "diff --git a/torchvision/datasets/stl10.py b/torchvision/datasets/stl10.py\n--- a/torchvision/datasets/stl10.py\n+++ b/torchvision/datasets/stl10.py\n@@ -14,6 +14,9 @@\n ``stl10_binary`` exists.\n split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\n Accordingly dataset is selected.\n+ folds (int, optional): One of {0-9} or None.\n+ For training, loads one of the 10 pre-defined folds of 1k samples for the\n+ standard evaluation procedure. If no value is passed, loads the 5k samples.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n@@ -28,6 +31,7 @@\n filename = \"stl10_binary.tar.gz\"\n tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\n class_names_file = 'class_names.txt'\n+ folds_list_file = 'fold_indices.txt'\n train_list = [\n ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\n ['train_y.bin', '5a34089d4802c674881badbb80307741'],\n@@ -40,7 +44,7 @@\n ]\n splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\n \n- def __init__(self, root, split='train',\n+ def __init__(self, root, split='train', folds=None,\n transform=None, target_transform=None, download=False):\n if split not in self.splits:\n raise ValueError('Split \"{}\" not found. Valid splits are: {}'.format(\n@@ -50,6 +54,7 @@\n self.transform = transform\n self.target_transform = target_transform\n self.split = split # train/test/unlabeled set\n+ self.folds = folds # one of the 10 pre-defined folds or the full dataset\n \n if download:\n self.download()\n@@ -63,9 +68,12 @@\n if self.split == 'train':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n+ self.__load_folds(folds)\n+\n elif self.split == 'train+unlabeled':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n+ self.__load_folds(folds)\n unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\n self.data = np.concatenate((self.data, unlabeled_data))\n self.labels = np.concatenate(\n@@ -131,3 +139,16 @@\n \n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n+\n+ def __load_folds(self, folds):\n+ # loads one of the folds if specified\n+ if isinstance(folds, int):\n+ if folds >= 0 and folds < 10:\n+ path_to_folds = os.path.join(\n+ self.root, self.base_folder, self.folds_list_file)\n+ with open(path_to_folds, 'r') as f:\n+ str_idx = f.read().splitlines()[folds]\n+ list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ')\n+ self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx]\n+ else:\n+ raise ValueError('Folds \"{}\" not found. Valid splits are: 0-9.'.format(folds))\n", "issue": "STL-10 Testing Protocol\nClass STL10 does not support recommended testing protocol. \r\nSee STL-10 official page: https://cs.stanford.edu/~acoates/stl10/\n", "before_files": [{"content": "from __future__ import print_function\nfrom PIL import Image\nimport os\nimport os.path\nimport numpy as np\nfrom .cifar import CIFAR10\n\n\nclass STL10(CIFAR10):\n \"\"\"`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where directory\n ``stl10_binary`` exists.\n split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\n Accordingly dataset is selected.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n base_folder = 'stl10_binary'\n url = \"http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz\"\n filename = \"stl10_binary.tar.gz\"\n tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\n class_names_file = 'class_names.txt'\n train_list = [\n ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\n ['train_y.bin', '5a34089d4802c674881badbb80307741'],\n ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']\n ]\n\n test_list = [\n ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'],\n ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']\n ]\n splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\n\n def __init__(self, root, split='train',\n transform=None, target_transform=None, download=False):\n if split not in self.splits:\n raise ValueError('Split \"{}\" not found. Valid splits are: {}'.format(\n split, ', '.join(self.splits),\n ))\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.target_transform = target_transform\n self.split = split # train/test/unlabeled set\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\n 'Dataset not found or corrupted. '\n 'You can use download=True to download it')\n\n # now load the picked numpy arrays\n if self.split == 'train':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n elif self.split == 'train+unlabeled':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\n self.data = np.concatenate((self.data, unlabeled_data))\n self.labels = np.concatenate(\n (self.labels, np.asarray([-1] * unlabeled_data.shape[0])))\n\n elif self.split == 'unlabeled':\n self.data, _ = self.__loadfile(self.train_list[2][0])\n self.labels = np.asarray([-1] * self.data.shape[0])\n else: # self.split == 'test':\n self.data, self.labels = self.__loadfile(\n self.test_list[0][0], self.test_list[1][0])\n\n class_file = os.path.join(\n self.root, self.base_folder, self.class_names_file)\n if os.path.isfile(class_file):\n with open(class_file) as f:\n self.classes = f.read().splitlines()\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n if self.labels is not None:\n img, target = self.data[index], int(self.labels[index])\n else:\n img, target = self.data[index], None\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(np.transpose(img, (1, 2, 0)))\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return self.data.shape[0]\n\n def __loadfile(self, data_file, labels_file=None):\n labels = None\n if labels_file:\n path_to_labels = os.path.join(\n self.root, self.base_folder, labels_file)\n with open(path_to_labels, 'rb') as f:\n labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based\n\n path_to_data = os.path.join(self.root, self.base_folder, data_file)\n with open(path_to_data, 'rb') as f:\n # read whole file in uint8 chunks\n everything = np.fromfile(f, dtype=np.uint8)\n images = np.reshape(everything, (-1, 3, 96, 96))\n images = np.transpose(images, (0, 1, 3, 2))\n\n return images, labels\n\n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n", "path": "torchvision/datasets/stl10.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom PIL import Image\nimport os\nimport os.path\nimport numpy as np\nfrom .cifar import CIFAR10\n\n\nclass STL10(CIFAR10):\n \"\"\"`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset.\n\n Args:\n root (string): Root directory of dataset where directory\n ``stl10_binary`` exists.\n split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}.\n Accordingly dataset is selected.\n folds (int, optional): One of {0-9} or None.\n For training, loads one of the 10 pre-defined folds of 1k samples for the\n standard evaluation procedure. If no value is passed, loads the 5k samples.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n base_folder = 'stl10_binary'\n url = \"http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz\"\n filename = \"stl10_binary.tar.gz\"\n tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb'\n class_names_file = 'class_names.txt'\n folds_list_file = 'fold_indices.txt'\n train_list = [\n ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'],\n ['train_y.bin', '5a34089d4802c674881badbb80307741'],\n ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4']\n ]\n\n test_list = [\n ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'],\n ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e']\n ]\n splits = ('train', 'train+unlabeled', 'unlabeled', 'test')\n\n def __init__(self, root, split='train', folds=None,\n transform=None, target_transform=None, download=False):\n if split not in self.splits:\n raise ValueError('Split \"{}\" not found. Valid splits are: {}'.format(\n split, ', '.join(self.splits),\n ))\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.target_transform = target_transform\n self.split = split # train/test/unlabeled set\n self.folds = folds # one of the 10 pre-defined folds or the full dataset\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\n 'Dataset not found or corrupted. '\n 'You can use download=True to download it')\n\n # now load the picked numpy arrays\n if self.split == 'train':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n self.__load_folds(folds)\n\n elif self.split == 'train+unlabeled':\n self.data, self.labels = self.__loadfile(\n self.train_list[0][0], self.train_list[1][0])\n self.__load_folds(folds)\n unlabeled_data, _ = self.__loadfile(self.train_list[2][0])\n self.data = np.concatenate((self.data, unlabeled_data))\n self.labels = np.concatenate(\n (self.labels, np.asarray([-1] * unlabeled_data.shape[0])))\n\n elif self.split == 'unlabeled':\n self.data, _ = self.__loadfile(self.train_list[2][0])\n self.labels = np.asarray([-1] * self.data.shape[0])\n else: # self.split == 'test':\n self.data, self.labels = self.__loadfile(\n self.test_list[0][0], self.test_list[1][0])\n\n class_file = os.path.join(\n self.root, self.base_folder, self.class_names_file)\n if os.path.isfile(class_file):\n with open(class_file) as f:\n self.classes = f.read().splitlines()\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n if self.labels is not None:\n img, target = self.data[index], int(self.labels[index])\n else:\n img, target = self.data[index], None\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(np.transpose(img, (1, 2, 0)))\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return self.data.shape[0]\n\n def __loadfile(self, data_file, labels_file=None):\n labels = None\n if labels_file:\n path_to_labels = os.path.join(\n self.root, self.base_folder, labels_file)\n with open(path_to_labels, 'rb') as f:\n labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based\n\n path_to_data = os.path.join(self.root, self.base_folder, data_file)\n with open(path_to_data, 'rb') as f:\n # read whole file in uint8 chunks\n everything = np.fromfile(f, dtype=np.uint8)\n images = np.reshape(everything, (-1, 3, 96, 96))\n images = np.transpose(images, (0, 1, 3, 2))\n\n return images, labels\n\n def extra_repr(self):\n return \"Split: {split}\".format(**self.__dict__)\n\n def __load_folds(self, folds):\n # loads one of the folds if specified\n if isinstance(folds, int):\n if folds >= 0 and folds < 10:\n path_to_folds = os.path.join(\n self.root, self.base_folder, self.folds_list_file)\n with open(path_to_folds, 'r') as f:\n str_idx = f.read().splitlines()[folds]\n list_idx = np.fromstring(str_idx, dtype=np.uint8, sep=' ')\n self.data, self.labels = self.data[list_idx, :, :, :], self.labels[list_idx]\n else:\n raise ValueError('Folds \"{}\" not found. Valid splits are: 0-9.'.format(folds))\n", "path": "torchvision/datasets/stl10.py"}]} | 1,951 | 910 |
gh_patches_debug_11962 | rasdani/github-patches | git_diff | svthalia__concrexit-2591 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Updating promo requests
**Describe the bug**
It is not possible to update the promo request within a week of the publish date
**How to reproduce**
Create a promo request
Try to update the designer within a week to publish date
**Expected behavior**
The request can only be created more than a week before the publishing date, but assigned to, status and drive folder can always be edited.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/promotion/forms.py`
Content:
```
1 from django import forms
2 from django.utils import timezone
3
4 from promotion.models import PromotionRequest
5 from thaliawebsite.settings import PROMO_PUBLISH_DATE_TIMEDELTA
6
7
8 class PromotionRequestForm(forms.ModelForm):
9 class Meta:
10 model = PromotionRequest
11 fields = [
12 "event",
13 "publish_date",
14 "channel",
15 "assigned_to",
16 "status",
17 "drive_folder",
18 "remarks",
19 ]
20
21 def clean_publish_date(self):
22 publish_date = self.cleaned_data.get("publish_date")
23 create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA
24 if timezone.localdate() > create_time_minimum:
25 raise forms.ValidationError(
26 "Publish date cannot be within a week from now."
27 )
28 if "publish_date" in self.changed_data:
29 create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA
30 if timezone.localdate() > create_time_minimum:
31 raise forms.ValidationError(
32 "Publish date cannot be within a week from now."
33 )
34 return publish_date
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/promotion/forms.py b/website/promotion/forms.py
--- a/website/promotion/forms.py
+++ b/website/promotion/forms.py
@@ -20,11 +20,6 @@
def clean_publish_date(self):
publish_date = self.cleaned_data.get("publish_date")
- create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA
- if timezone.localdate() > create_time_minimum:
- raise forms.ValidationError(
- "Publish date cannot be within a week from now."
- )
if "publish_date" in self.changed_data:
create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA
if timezone.localdate() > create_time_minimum:
| {"golden_diff": "diff --git a/website/promotion/forms.py b/website/promotion/forms.py\n--- a/website/promotion/forms.py\n+++ b/website/promotion/forms.py\n@@ -20,11 +20,6 @@\n \n def clean_publish_date(self):\n publish_date = self.cleaned_data.get(\"publish_date\")\n- create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n- if timezone.localdate() > create_time_minimum:\n- raise forms.ValidationError(\n- \"Publish date cannot be within a week from now.\"\n- )\n if \"publish_date\" in self.changed_data:\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n", "issue": "Updating promo requests \n**Describe the bug**\r\nIt is not possible to update the promo request within a week of the publish date \r\n\r\n**How to reproduce** \r\nCreate a promo request \r\nTry to update the designer within a week to publish date \r\n\r\n**Expected behavior** \r\nThe request can only be created more than a week before the publishing date, but assigned to, status and drive folder can always be edited. \n", "before_files": [{"content": "from django import forms\nfrom django.utils import timezone\n\nfrom promotion.models import PromotionRequest\nfrom thaliawebsite.settings import PROMO_PUBLISH_DATE_TIMEDELTA\n\n\nclass PromotionRequestForm(forms.ModelForm):\n class Meta:\n model = PromotionRequest\n fields = [\n \"event\",\n \"publish_date\",\n \"channel\",\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n \"remarks\",\n ]\n\n def clean_publish_date(self):\n publish_date = self.cleaned_data.get(\"publish_date\")\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n raise forms.ValidationError(\n \"Publish date cannot be within a week from now.\"\n )\n if \"publish_date\" in self.changed_data:\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n raise forms.ValidationError(\n \"Publish date cannot be within a week from now.\"\n )\n return publish_date\n", "path": "website/promotion/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.utils import timezone\n\nfrom promotion.models import PromotionRequest\nfrom thaliawebsite.settings import PROMO_PUBLISH_DATE_TIMEDELTA\n\n\nclass PromotionRequestForm(forms.ModelForm):\n class Meta:\n model = PromotionRequest\n fields = [\n \"event\",\n \"publish_date\",\n \"channel\",\n \"assigned_to\",\n \"status\",\n \"drive_folder\",\n \"remarks\",\n ]\n\n def clean_publish_date(self):\n publish_date = self.cleaned_data.get(\"publish_date\")\n if \"publish_date\" in self.changed_data:\n create_time_minimum = publish_date - PROMO_PUBLISH_DATE_TIMEDELTA\n if timezone.localdate() > create_time_minimum:\n raise forms.ValidationError(\n \"Publish date cannot be within a week from now.\"\n )\n return publish_date\n", "path": "website/promotion/forms.py"}]} | 624 | 160 |
gh_patches_debug_2192 | rasdani/github-patches | git_diff | LMFDB__lmfdb-5179 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PIP dependencies
We have several deprecated dependencies that we should fix ASAP
```
flask<=1.1.4
markupsafe<=2.0.1
itsdangerous<=2.0.1
```
in particular, this prevents using lmfdb in an environment with jupyterlab installed, which is something we would like to have working on a short time basis.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lmfdb/local_fields/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from lmfdb.app import app
3 from lmfdb.logger import make_logger
4 from flask import Blueprint, request, redirect
5
6 local_fields_page = Blueprint("local_fields", __name__, template_folder='templates', static_folder="static")
7 logger = make_logger(local_fields_page)
8
9
10 @local_fields_page.context_processor
11 def body_class():
12 return {'body_class': 'local_fields'}
13
14 from . import main
15 assert main
16
17 from urllib.parse import urlparse, urlunparse
18
19
20 @local_fields_page.before_request
21 def redirect_local():
22 urlparts = urlparse(request.url)
23 if 'LocalNumberField' in urlparts.path:
24 urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField'))
25 return redirect(urlunparse(urlparts), 301)
26 return
27
28
29 app.register_blueprint(local_fields_page, url_prefix="/padicField")
30 app.register_blueprint(local_fields_page, url_prefix="/LocalNumberField")
31
32 # API2 has been disabled for now
33 #from lmfdb.api2.searchers import register_search_function
34 #register_search_function(
35 # "$p$-adic_fields",
36 # "$p$-adic fields",
37 # "Search over $p$-adic fields",
38 # auto_search = 'lf_fields'
39 #)
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lmfdb/local_fields/__init__.py b/lmfdb/local_fields/__init__.py
--- a/lmfdb/local_fields/__init__.py
+++ b/lmfdb/local_fields/__init__.py
@@ -27,7 +27,6 @@
app.register_blueprint(local_fields_page, url_prefix="/padicField")
-app.register_blueprint(local_fields_page, url_prefix="/LocalNumberField")
# API2 has been disabled for now
#from lmfdb.api2.searchers import register_search_function
| {"golden_diff": "diff --git a/lmfdb/local_fields/__init__.py b/lmfdb/local_fields/__init__.py\n--- a/lmfdb/local_fields/__init__.py\n+++ b/lmfdb/local_fields/__init__.py\n@@ -27,7 +27,6 @@\n \n \n app.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\n-app.register_blueprint(local_fields_page, url_prefix=\"/LocalNumberField\")\n \n # API2 has been disabled for now\n #from lmfdb.api2.searchers import register_search_function\n", "issue": "PIP dependencies\nWe have several deprecated dependencies that we should fix ASAP\r\n```\r\nflask<=1.1.4\r\nmarkupsafe<=2.0.1\r\nitsdangerous<=2.0.1\r\n```\r\n\r\nin particular, this prevents using lmfdb in an environment with jupyterlab installed, which is something we would like to have working on a short time basis. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lmfdb.app import app\nfrom lmfdb.logger import make_logger\nfrom flask import Blueprint, request, redirect\n\nlocal_fields_page = Blueprint(\"local_fields\", __name__, template_folder='templates', static_folder=\"static\")\nlogger = make_logger(local_fields_page)\n\n\n@local_fields_page.context_processor\ndef body_class():\n return {'body_class': 'local_fields'}\n\nfrom . import main\nassert main\n\nfrom urllib.parse import urlparse, urlunparse\n\n\n@local_fields_page.before_request\ndef redirect_local():\n urlparts = urlparse(request.url)\n if 'LocalNumberField' in urlparts.path:\n urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField'))\n return redirect(urlunparse(urlparts), 301)\n return\n\n\napp.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\napp.register_blueprint(local_fields_page, url_prefix=\"/LocalNumberField\")\n\n# API2 has been disabled for now\n#from lmfdb.api2.searchers import register_search_function\n#register_search_function(\n# \"$p$-adic_fields\",\n# \"$p$-adic fields\",\n# \"Search over $p$-adic fields\",\n# auto_search = 'lf_fields'\n#)\n", "path": "lmfdb/local_fields/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lmfdb.app import app\nfrom lmfdb.logger import make_logger\nfrom flask import Blueprint, request, redirect\n\nlocal_fields_page = Blueprint(\"local_fields\", __name__, template_folder='templates', static_folder=\"static\")\nlogger = make_logger(local_fields_page)\n\n\n@local_fields_page.context_processor\ndef body_class():\n return {'body_class': 'local_fields'}\n\nfrom . import main\nassert main\n\nfrom urllib.parse import urlparse, urlunparse\n\n\n@local_fields_page.before_request\ndef redirect_local():\n urlparts = urlparse(request.url)\n if 'LocalNumberField' in urlparts.path:\n urlparts = urlparts._replace(path=urlparts.path.replace('LocalNumberField', 'padicField'))\n return redirect(urlunparse(urlparts), 301)\n return\n\n\napp.register_blueprint(local_fields_page, url_prefix=\"/padicField\")\n\n# API2 has been disabled for now\n#from lmfdb.api2.searchers import register_search_function\n#register_search_function(\n# \"$p$-adic_fields\",\n# \"$p$-adic fields\",\n# \"Search over $p$-adic fields\",\n# auto_search = 'lf_fields'\n#)\n", "path": "lmfdb/local_fields/__init__.py"}]} | 695 | 113 |
gh_patches_debug_30051 | rasdani/github-patches | git_diff | doccano__doccano-2228 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a function to filter labels
When I rechecked the labels of the annotated data, I had no way of filtering out the labels I wanted to see. For example, when I am doing a check of dichotomous annotations, I would like to filter the data set to find out which labels are positive and which are negative, so that I can save time on the check. However, due to the lack of this function, I have to filter one by one from dataset, which wastes a lot of time and manpower.
Thanks for every contributor!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/examples/filters.py`
Content:
```
1 from django.db.models import Count, Q
2 from django_filters.rest_framework import BooleanFilter, FilterSet
3
4 from .models import Example
5
6
7 class ExampleFilter(FilterSet):
8 confirmed = BooleanFilter(field_name="states", method="filter_by_state")
9
10 def filter_by_state(self, queryset, field_name, is_confirmed: bool):
11 queryset = queryset.annotate(
12 num_confirm=Count(
13 expression=field_name,
14 filter=Q(**{f"{field_name}__confirmed_by": self.request.user})
15 | Q(project__collaborative_annotation=True),
16 )
17 )
18 if is_confirmed:
19 queryset = queryset.filter(num_confirm__gte=1)
20 else:
21 queryset = queryset.filter(num_confirm__lte=0)
22 return queryset
23
24 class Meta:
25 model = Example
26 fields = ("project", "text", "created_at", "updated_at")
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/examples/filters.py b/backend/examples/filters.py
--- a/backend/examples/filters.py
+++ b/backend/examples/filters.py
@@ -1,11 +1,12 @@
-from django.db.models import Count, Q
-from django_filters.rest_framework import BooleanFilter, FilterSet
+from django.db.models import Count, Q, QuerySet
+from django_filters.rest_framework import BooleanFilter, CharFilter, FilterSet
from .models import Example
class ExampleFilter(FilterSet):
confirmed = BooleanFilter(field_name="states", method="filter_by_state")
+ label = CharFilter(method="filter_by_label")
def filter_by_state(self, queryset, field_name, is_confirmed: bool):
queryset = queryset.annotate(
@@ -21,6 +22,35 @@
queryset = queryset.filter(num_confirm__lte=0)
return queryset
+ def filter_by_label(self, queryset: QuerySet, field_name: str, label: str) -> QuerySet:
+ """Filter examples by a given label name.
+
+ This performs filtering on all of the following labels at once:
+ - categories
+ - spans
+ - relations
+ - bboxes
+ - segmentations
+
+ Todo: Consider project type to make filtering more efficient.
+
+ Args:
+ queryset (QuerySet): QuerySet to filter.
+ field_name (str): This equals to `label`.
+ label (str): The label name to filter.
+
+ Returns:
+ QuerySet: Filtered examples.
+ """
+ queryset = queryset.filter(
+ Q(categories__label__text=label)
+ | Q(spans__label__text=label)
+ | Q(relations__type__text=label)
+ | Q(bboxes__label__text=label)
+ | Q(segmentations__label__text=label)
+ )
+ return queryset
+
class Meta:
model = Example
- fields = ("project", "text", "created_at", "updated_at")
+ fields = ("project", "text", "created_at", "updated_at", "label")
| {"golden_diff": "diff --git a/backend/examples/filters.py b/backend/examples/filters.py\n--- a/backend/examples/filters.py\n+++ b/backend/examples/filters.py\n@@ -1,11 +1,12 @@\n-from django.db.models import Count, Q\n-from django_filters.rest_framework import BooleanFilter, FilterSet\n+from django.db.models import Count, Q, QuerySet\n+from django_filters.rest_framework import BooleanFilter, CharFilter, FilterSet\n \n from .models import Example\n \n \n class ExampleFilter(FilterSet):\n confirmed = BooleanFilter(field_name=\"states\", method=\"filter_by_state\")\n+ label = CharFilter(method=\"filter_by_label\")\n \n def filter_by_state(self, queryset, field_name, is_confirmed: bool):\n queryset = queryset.annotate(\n@@ -21,6 +22,35 @@\n queryset = queryset.filter(num_confirm__lte=0)\n return queryset\n \n+ def filter_by_label(self, queryset: QuerySet, field_name: str, label: str) -> QuerySet:\n+ \"\"\"Filter examples by a given label name.\n+\n+ This performs filtering on all of the following labels at once:\n+ - categories\n+ - spans\n+ - relations\n+ - bboxes\n+ - segmentations\n+\n+ Todo: Consider project type to make filtering more efficient.\n+\n+ Args:\n+ queryset (QuerySet): QuerySet to filter.\n+ field_name (str): This equals to `label`.\n+ label (str): The label name to filter.\n+\n+ Returns:\n+ QuerySet: Filtered examples.\n+ \"\"\"\n+ queryset = queryset.filter(\n+ Q(categories__label__text=label)\n+ | Q(spans__label__text=label)\n+ | Q(relations__type__text=label)\n+ | Q(bboxes__label__text=label)\n+ | Q(segmentations__label__text=label)\n+ )\n+ return queryset\n+\n class Meta:\n model = Example\n- fields = (\"project\", \"text\", \"created_at\", \"updated_at\")\n+ fields = (\"project\", \"text\", \"created_at\", \"updated_at\", \"label\")\n", "issue": "Add a function to filter labels\nWhen I rechecked the labels of the annotated data, I had no way of filtering out the labels I wanted to see. For example, when I am doing a check of dichotomous annotations, I would like to filter the data set to find out which labels are positive and which are negative, so that I can save time on the check. However, due to the lack of this function, I have to filter one by one from dataset, which wastes a lot of time and manpower.\r\n\r\nThanks for every contributor!\n", "before_files": [{"content": "from django.db.models import Count, Q\nfrom django_filters.rest_framework import BooleanFilter, FilterSet\n\nfrom .models import Example\n\n\nclass ExampleFilter(FilterSet):\n confirmed = BooleanFilter(field_name=\"states\", method=\"filter_by_state\")\n\n def filter_by_state(self, queryset, field_name, is_confirmed: bool):\n queryset = queryset.annotate(\n num_confirm=Count(\n expression=field_name,\n filter=Q(**{f\"{field_name}__confirmed_by\": self.request.user})\n | Q(project__collaborative_annotation=True),\n )\n )\n if is_confirmed:\n queryset = queryset.filter(num_confirm__gte=1)\n else:\n queryset = queryset.filter(num_confirm__lte=0)\n return queryset\n\n class Meta:\n model = Example\n fields = (\"project\", \"text\", \"created_at\", \"updated_at\")\n", "path": "backend/examples/filters.py"}], "after_files": [{"content": "from django.db.models import Count, Q, QuerySet\nfrom django_filters.rest_framework import BooleanFilter, CharFilter, FilterSet\n\nfrom .models import Example\n\n\nclass ExampleFilter(FilterSet):\n confirmed = BooleanFilter(field_name=\"states\", method=\"filter_by_state\")\n label = CharFilter(method=\"filter_by_label\")\n\n def filter_by_state(self, queryset, field_name, is_confirmed: bool):\n queryset = queryset.annotate(\n num_confirm=Count(\n expression=field_name,\n filter=Q(**{f\"{field_name}__confirmed_by\": self.request.user})\n | Q(project__collaborative_annotation=True),\n )\n )\n if is_confirmed:\n queryset = queryset.filter(num_confirm__gte=1)\n else:\n queryset = queryset.filter(num_confirm__lte=0)\n return queryset\n\n def filter_by_label(self, queryset: QuerySet, field_name: str, label: str) -> QuerySet:\n \"\"\"Filter examples by a given label name.\n\n This performs filtering on all of the following labels at once:\n - categories\n - spans\n - relations\n - bboxes\n - segmentations\n\n Todo: Consider project type to make filtering more efficient.\n\n Args:\n queryset (QuerySet): QuerySet to filter.\n field_name (str): This equals to `label`.\n label (str): The label name to filter.\n\n Returns:\n QuerySet: Filtered examples.\n \"\"\"\n queryset = queryset.filter(\n Q(categories__label__text=label)\n | Q(spans__label__text=label)\n | Q(relations__type__text=label)\n | Q(bboxes__label__text=label)\n | Q(segmentations__label__text=label)\n )\n return queryset\n\n class Meta:\n model = Example\n fields = (\"project\", \"text\", \"created_at\", \"updated_at\", \"label\")\n", "path": "backend/examples/filters.py"}]} | 603 | 463 |
gh_patches_debug_13543 | rasdani/github-patches | git_diff | pre-commit__pre-commit-33 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pre-commit -i does not install the file with +x
No executable = no run :'(
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/git.py`
Content:
```
1 import functools
2 import os
3 import os.path
4 import pkg_resources
5 import re
6 from plumbum import local
7
8 from pre_commit.util import memoize_by_cwd
9
10
11 def _get_root_new():
12 path = os.getcwd()
13 while len(path) > 1:
14 if os.path.exists(os.path.join(path, '.git')):
15 return path
16 else:
17 path = os.path.normpath(os.path.join(path, '../'))
18 raise AssertionError('called from outside of the gits')
19
20
21 @memoize_by_cwd
22 def get_root():
23 return _get_root_new()
24
25
26 @memoize_by_cwd
27 def get_pre_commit_path():
28 return os.path.join(get_root(), '.git/hooks/pre-commit')
29
30
31 def create_pre_commit():
32 path = get_pre_commit_path()
33 pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')
34 local.path(path).write(local.path(pre_commit_file).read())
35
36
37 def remove_pre_commit():
38 local.path(get_pre_commit_path()).delete()
39
40
41 def get_head_sha(git_repo_path):
42 with local.cwd(git_repo_path):
43 return local['git']['rev-parse', 'HEAD']().strip()
44
45
46 @memoize_by_cwd
47 def get_staged_files():
48 return local['git']['diff', '--staged', '--name-only']().splitlines()
49
50
51 @memoize_by_cwd
52 def get_all_files():
53 return local['git']['ls-files']().splitlines()
54
55
56 def get_files_matching(all_file_list_strategy):
57 @functools.wraps(all_file_list_strategy)
58 @memoize_by_cwd
59 def wrapper(expr):
60 regex = re.compile(expr)
61 return set(filter(os.path.exists, (
62 filename
63 for filename in all_file_list_strategy()
64 if regex.search(filename)
65 )))
66 return wrapper
67
68
69 get_staged_files_matching = get_files_matching(get_staged_files)
70 get_all_files_matching = get_files_matching(get_all_files)
71
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -3,6 +3,7 @@
import os.path
import pkg_resources
import re
+import stat
from plumbum import local
from pre_commit.util import memoize_by_cwd
@@ -32,6 +33,8 @@
path = get_pre_commit_path()
pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')
local.path(path).write(local.path(pre_commit_file).read())
+ original_mode = os.stat(path).st_mode
+ os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def remove_pre_commit():
| {"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -3,6 +3,7 @@\n import os.path\n import pkg_resources\n import re\n+import stat\n from plumbum import local\n \n from pre_commit.util import memoize_by_cwd\n@@ -32,6 +33,8 @@\n path = get_pre_commit_path()\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n local.path(path).write(local.path(pre_commit_file).read())\n+ original_mode = os.stat(path).st_mode\n+ os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n \n \n def remove_pre_commit():\n", "issue": "pre-commit -i does not install the file with +x\nNo executable = no run :'(\n\n", "before_files": [{"content": "import functools\nimport os\nimport os.path\nimport pkg_resources\nimport re\nfrom plumbum import local\n\nfrom pre_commit.util import memoize_by_cwd\n\n\ndef _get_root_new():\n path = os.getcwd()\n while len(path) > 1:\n if os.path.exists(os.path.join(path, '.git')):\n return path\n else:\n path = os.path.normpath(os.path.join(path, '../'))\n raise AssertionError('called from outside of the gits')\n\n\n@memoize_by_cwd\ndef get_root():\n return _get_root_new()\n\n\n@memoize_by_cwd\ndef get_pre_commit_path():\n return os.path.join(get_root(), '.git/hooks/pre-commit')\n\n\ndef create_pre_commit():\n path = get_pre_commit_path()\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n local.path(path).write(local.path(pre_commit_file).read())\n\n\ndef remove_pre_commit():\n local.path(get_pre_commit_path()).delete()\n\n\ndef get_head_sha(git_repo_path):\n with local.cwd(git_repo_path):\n return local['git']['rev-parse', 'HEAD']().strip()\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return local['git']['diff', '--staged', '--name-only']().splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return local['git']['ls-files']().splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(expr):\n regex = re.compile(expr)\n return set(filter(os.path.exists, (\n filename\n for filename in all_file_list_strategy()\n if regex.search(filename)\n )))\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\n", "path": "pre_commit/git.py"}], "after_files": [{"content": "import functools\nimport os\nimport os.path\nimport pkg_resources\nimport re\nimport stat\nfrom plumbum import local\n\nfrom pre_commit.util import memoize_by_cwd\n\n\ndef _get_root_new():\n path = os.getcwd()\n while len(path) > 1:\n if os.path.exists(os.path.join(path, '.git')):\n return path\n else:\n path = os.path.normpath(os.path.join(path, '../'))\n raise AssertionError('called from outside of the gits')\n\n\n@memoize_by_cwd\ndef get_root():\n return _get_root_new()\n\n\n@memoize_by_cwd\ndef get_pre_commit_path():\n return os.path.join(get_root(), '.git/hooks/pre-commit')\n\n\ndef create_pre_commit():\n path = get_pre_commit_path()\n pre_commit_file = pkg_resources.resource_filename('pre_commit', 'resources/pre-commit.sh')\n local.path(path).write(local.path(pre_commit_file).read())\n original_mode = os.stat(path).st_mode\n os.chmod(path, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n\n\ndef remove_pre_commit():\n local.path(get_pre_commit_path()).delete()\n\n\ndef get_head_sha(git_repo_path):\n with local.cwd(git_repo_path):\n return local['git']['rev-parse', 'HEAD']().strip()\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return local['git']['diff', '--staged', '--name-only']().splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return local['git']['ls-files']().splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(expr):\n regex = re.compile(expr)\n return set(filter(os.path.exists, (\n filename\n for filename in all_file_list_strategy()\n if regex.search(filename)\n )))\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\n", "path": "pre_commit/git.py"}]} | 833 | 170 |
gh_patches_debug_37725 | rasdani/github-patches | git_diff | pwndbg__pwndbg-291 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typesetting seems to be wrong

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/__init__.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from __future__ import absolute_import
4 from __future__ import division
5 from __future__ import print_function
6 from __future__ import unicode_literals
7
8 import gdb
9
10 import pwndbg.android
11 import pwndbg.arch
12 import pwndbg.arguments
13 import pwndbg.argv
14 import pwndbg.color
15 import pwndbg.commands
16 import pwndbg.commands.argv
17 import pwndbg.commands.aslr
18 import pwndbg.commands.auxv
19 import pwndbg.commands.checksec
20 import pwndbg.commands.config
21 import pwndbg.commands.context
22 import pwndbg.commands.cpsr
23 import pwndbg.commands.dt
24 import pwndbg.commands.dumpargs
25 import pwndbg.commands.elf
26 import pwndbg.commands.gdbinit
27 import pwndbg.commands.got
28 import pwndbg.commands.heap
29 import pwndbg.commands.hexdump
30 import pwndbg.commands.ida
31 import pwndbg.commands.misc
32 import pwndbg.commands.next
33 import pwndbg.commands.peda
34 import pwndbg.commands.procinfo
35 import pwndbg.commands.radare2
36 import pwndbg.commands.reload
37 import pwndbg.commands.rop
38 import pwndbg.commands.ropper
39 import pwndbg.commands.search
40 import pwndbg.commands.segments
41 import pwndbg.commands.shell
42 import pwndbg.commands.stack
43 import pwndbg.commands.start
44 import pwndbg.commands.telescope
45 import pwndbg.commands.theme
46 import pwndbg.commands.version
47 import pwndbg.commands.vmmap
48 import pwndbg.commands.windbg
49 import pwndbg.commands.xor
50 import pwndbg.constants
51 import pwndbg.disasm
52 import pwndbg.disasm.arm
53 import pwndbg.disasm.jump
54 import pwndbg.disasm.mips
55 import pwndbg.disasm.ppc
56 import pwndbg.disasm.sparc
57 import pwndbg.disasm.x86
58 import pwndbg.dt
59 import pwndbg.elf
60 import pwndbg.exception
61 import pwndbg.heap
62 import pwndbg.inthook
63 import pwndbg.memory
64 import pwndbg.net
65 import pwndbg.proc
66 import pwndbg.prompt
67 import pwndbg.regs
68 import pwndbg.stack
69 import pwndbg.typeinfo
70 import pwndbg.version
71 import pwndbg.vmmap
72 import pwndbg.wrappers
73
74 __version__ = pwndbg.version.__version__
75 version = __version__
76
77 try:
78 import unicorn
79 import pwndbg.emu
80 except:
81 pass
82
83 __all__ = [
84 'arch',
85 'auxv',
86 'chain',
87 'color',
88 'compat',
89 'disasm',
90 'dt',
91 'elf',
92 'enhance',
93 'events',
94 'file',
95 'function',
96 'heap',
97 'hexdump',
98 'ida',
99 'info',
100 'linkmap',
101 'malloc',
102 'memoize',
103 'memory',
104 'proc',
105 'regs',
106 'remote',
107 'search',
108 'stack',
109 'strings',
110 'symbol',
111 'typeinfo',
112 'ui',
113 'vmmap'
114 ]
115
116 prompt = "pwndbg> "
117 prompt = "\x02" + prompt + "\x01" # STX + prompt + SOH
118 prompt = pwndbg.color.red(prompt)
119 prompt = pwndbg.color.bold(prompt)
120 prompt = "\x01" + prompt + "\x02" # SOH + prompt + STX
121
122 pre_commands = """
123 set confirm off
124 set verbose off
125 set prompt %s
126 set pagination off
127 set height 0
128 set history expansion on
129 set history save on
130 set follow-fork-mode child
131 set backtrace past-main on
132 set step-mode on
133 set print pretty on
134 set width 0
135 set print elements 15
136 handle SIGALRM nostop print nopass
137 handle SIGBUS stop print nopass
138 handle SIGPIPE nostop print nopass
139 handle SIGSEGV stop print nopass
140 """.strip() % prompt
141
142 for line in pre_commands.strip().splitlines():
143 gdb.execute(line)
144
145 # This may throw an exception, see pwndbg/pwndbg#27
146 try:
147 gdb.execute("set disassembly-flavor intel")
148 except gdb.error:
149 pass
150
```
Path: `pwndbg/ui.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 A few helpers for making things print pretty-like.
5 """
6 from __future__ import absolute_import
7 from __future__ import division
8 from __future__ import print_function
9 from __future__ import unicode_literals
10
11 import fcntl
12 import struct
13 import sys
14 import termios
15
16 import pwndbg.arch
17 import pwndbg.color.context as C
18 import pwndbg.color.theme as theme
19 import pwndbg.config as config
20
21 theme.Parameter('banner-separator', '─', 'repeated banner separator character')
22
23 def banner(title):
24 title = title.upper()
25 try:
26 _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))
27 except:
28 width = 80
29 width -= 2
30 return C.banner(("[{:%s^%ss}]" % (config.banner_separator, width)).format(title))
31
32 def addrsz(address):
33 address = int(address) & pwndbg.arch.ptrmask
34 return "%{}x".format(2*pwndbg.arch.ptrsize) % address
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/__init__.py b/pwndbg/__init__.py
--- a/pwndbg/__init__.py
+++ b/pwndbg/__init__.py
@@ -5,6 +5,8 @@
from __future__ import print_function
from __future__ import unicode_literals
+import signal
+
import gdb
import pwndbg.android
@@ -67,6 +69,7 @@
import pwndbg.regs
import pwndbg.stack
import pwndbg.typeinfo
+import pwndbg.ui
import pwndbg.version
import pwndbg.vmmap
import pwndbg.wrappers
@@ -131,13 +134,13 @@
set backtrace past-main on
set step-mode on
set print pretty on
-set width 0
+set width %i
set print elements 15
handle SIGALRM nostop print nopass
handle SIGBUS stop print nopass
handle SIGPIPE nostop print nopass
handle SIGSEGV stop print nopass
-""".strip() % prompt
+""".strip() % (prompt, pwndbg.ui.get_window_size()[1])
for line in pre_commands.strip().splitlines():
gdb.execute(line)
@@ -147,3 +150,7 @@
gdb.execute("set disassembly-flavor intel")
except gdb.error:
pass
+
+
+# handle resize event to align width and completion
+signal.signal(signal.SIGWINCH, lambda signum, frame: gdb.execute("set width %i" % pwndbg.ui.get_window_size()[1]))
diff --git a/pwndbg/ui.py b/pwndbg/ui.py
--- a/pwndbg/ui.py
+++ b/pwndbg/ui.py
@@ -9,6 +9,7 @@
from __future__ import unicode_literals
import fcntl
+import os
import struct
import sys
import termios
@@ -22,13 +23,21 @@
def banner(title):
title = title.upper()
- try:
- _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))
- except:
- width = 80
+ _height, width = get_window_size()
width -= 2
return C.banner(("[{:%s^%ss}]" % (config.banner_separator, width)).format(title))
def addrsz(address):
address = int(address) & pwndbg.arch.ptrmask
return "%{}x".format(2*pwndbg.arch.ptrsize) % address
+
+def get_window_size():
+ fallback = (int(os.environ.get('LINES', 20)), int(os.environ.get('COLUMNS', 80)))
+ if not sys.stdin.isatty:
+ return fallback
+ try:
+ # get terminal size and force ret buffer len of 4 bytes for safe unpacking by passing equally long arg
+ rows, cols = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))
+ except:
+ rows, cols = fallback
+ return rows, cols
| {"golden_diff": "diff --git a/pwndbg/__init__.py b/pwndbg/__init__.py\n--- a/pwndbg/__init__.py\n+++ b/pwndbg/__init__.py\n@@ -5,6 +5,8 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+import signal\n+\n import gdb\n \n import pwndbg.android\n@@ -67,6 +69,7 @@\n import pwndbg.regs\n import pwndbg.stack\n import pwndbg.typeinfo\n+import pwndbg.ui\n import pwndbg.version\n import pwndbg.vmmap\n import pwndbg.wrappers\n@@ -131,13 +134,13 @@\n set backtrace past-main on\n set step-mode on\n set print pretty on\n-set width 0\n+set width %i\n set print elements 15\n handle SIGALRM nostop print nopass\n handle SIGBUS stop print nopass\n handle SIGPIPE nostop print nopass\n handle SIGSEGV stop print nopass\n-\"\"\".strip() % prompt\n+\"\"\".strip() % (prompt, pwndbg.ui.get_window_size()[1])\n \n for line in pre_commands.strip().splitlines():\n gdb.execute(line)\n@@ -147,3 +150,7 @@\n gdb.execute(\"set disassembly-flavor intel\")\n except gdb.error:\n pass\n+\n+\n+# handle resize event to align width and completion\n+signal.signal(signal.SIGWINCH, lambda signum, frame: gdb.execute(\"set width %i\" % pwndbg.ui.get_window_size()[1]))\ndiff --git a/pwndbg/ui.py b/pwndbg/ui.py\n--- a/pwndbg/ui.py\n+++ b/pwndbg/ui.py\n@@ -9,6 +9,7 @@\n from __future__ import unicode_literals\n \n import fcntl\n+import os\n import struct\n import sys\n import termios\n@@ -22,13 +23,21 @@\n \n def banner(title):\n title = title.upper()\n- try:\n- _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n- except:\n- width = 80\n+ _height, width = get_window_size()\n width -= 2\n return C.banner((\"[{:%s^%ss}]\" % (config.banner_separator, width)).format(title))\n \n def addrsz(address):\n address = int(address) & pwndbg.arch.ptrmask\n return \"%{}x\".format(2*pwndbg.arch.ptrsize) % address\n+\n+def get_window_size():\n+ fallback = (int(os.environ.get('LINES', 20)), int(os.environ.get('COLUMNS', 80)))\n+ if not sys.stdin.isatty:\n+ return fallback\n+ try:\n+ # get terminal size and force ret buffer len of 4 bytes for safe unpacking by passing equally long arg\n+ rows, cols = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n+ except:\n+ rows, cols = fallback\n+ return rows, cols\n", "issue": "Typesetting seems to be wrong\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gdb\n\nimport pwndbg.android\nimport pwndbg.arch\nimport pwndbg.arguments\nimport pwndbg.argv\nimport pwndbg.color\nimport pwndbg.commands\nimport pwndbg.commands.argv\nimport pwndbg.commands.aslr\nimport pwndbg.commands.auxv\nimport pwndbg.commands.checksec\nimport pwndbg.commands.config\nimport pwndbg.commands.context\nimport pwndbg.commands.cpsr\nimport pwndbg.commands.dt\nimport pwndbg.commands.dumpargs\nimport pwndbg.commands.elf\nimport pwndbg.commands.gdbinit\nimport pwndbg.commands.got\nimport pwndbg.commands.heap\nimport pwndbg.commands.hexdump\nimport pwndbg.commands.ida\nimport pwndbg.commands.misc\nimport pwndbg.commands.next\nimport pwndbg.commands.peda\nimport pwndbg.commands.procinfo\nimport pwndbg.commands.radare2\nimport pwndbg.commands.reload\nimport pwndbg.commands.rop\nimport pwndbg.commands.ropper\nimport pwndbg.commands.search\nimport pwndbg.commands.segments\nimport pwndbg.commands.shell\nimport pwndbg.commands.stack\nimport pwndbg.commands.start\nimport pwndbg.commands.telescope\nimport pwndbg.commands.theme\nimport pwndbg.commands.version\nimport pwndbg.commands.vmmap\nimport pwndbg.commands.windbg\nimport pwndbg.commands.xor\nimport pwndbg.constants\nimport pwndbg.disasm\nimport pwndbg.disasm.arm\nimport pwndbg.disasm.jump\nimport pwndbg.disasm.mips\nimport pwndbg.disasm.ppc\nimport pwndbg.disasm.sparc\nimport pwndbg.disasm.x86\nimport pwndbg.dt\nimport pwndbg.elf\nimport pwndbg.exception\nimport pwndbg.heap\nimport pwndbg.inthook\nimport pwndbg.memory\nimport pwndbg.net\nimport pwndbg.proc\nimport pwndbg.prompt\nimport pwndbg.regs\nimport pwndbg.stack\nimport pwndbg.typeinfo\nimport pwndbg.version\nimport pwndbg.vmmap\nimport pwndbg.wrappers\n\n__version__ = pwndbg.version.__version__\nversion = __version__\n\ntry:\n import unicorn\n import pwndbg.emu\nexcept:\n pass\n\n__all__ = [\n'arch',\n'auxv',\n'chain',\n'color',\n'compat',\n'disasm',\n'dt',\n'elf',\n'enhance',\n'events',\n'file',\n'function',\n'heap',\n'hexdump',\n'ida',\n'info',\n'linkmap',\n'malloc',\n'memoize',\n'memory',\n'proc',\n'regs',\n'remote',\n'search',\n'stack',\n'strings',\n'symbol',\n'typeinfo',\n'ui',\n'vmmap'\n]\n\nprompt = \"pwndbg> \"\nprompt = \"\\x02\" + prompt + \"\\x01\" # STX + prompt + SOH\nprompt = pwndbg.color.red(prompt)\nprompt = pwndbg.color.bold(prompt)\nprompt = \"\\x01\" + prompt + \"\\x02\" # SOH + prompt + STX\n\npre_commands = \"\"\"\nset confirm off\nset verbose off\nset prompt %s\nset pagination off\nset height 0\nset history expansion on\nset history save on\nset follow-fork-mode child\nset backtrace past-main on\nset step-mode on\nset print pretty on\nset width 0\nset print elements 15\nhandle SIGALRM nostop print nopass\nhandle SIGBUS stop print nopass\nhandle SIGPIPE nostop print nopass\nhandle SIGSEGV stop print nopass\n\"\"\".strip() % prompt\n\nfor line in pre_commands.strip().splitlines():\n gdb.execute(line)\n\n# This may throw an exception, see pwndbg/pwndbg#27\ntry:\n gdb.execute(\"set disassembly-flavor intel\")\nexcept gdb.error:\n pass\n", "path": "pwndbg/__init__.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nA few helpers for making things print pretty-like.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport fcntl\nimport struct\nimport sys\nimport termios\n\nimport pwndbg.arch\nimport pwndbg.color.context as C\nimport pwndbg.color.theme as theme\nimport pwndbg.config as config\n\ntheme.Parameter('banner-separator', '\u2500', 'repeated banner separator character')\n\ndef banner(title):\n title = title.upper()\n try:\n _height, width = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n except:\n width = 80\n width -= 2\n return C.banner((\"[{:%s^%ss}]\" % (config.banner_separator, width)).format(title))\n\ndef addrsz(address):\n address = int(address) & pwndbg.arch.ptrmask\n return \"%{}x\".format(2*pwndbg.arch.ptrsize) % address\n", "path": "pwndbg/ui.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport signal\n\nimport gdb\n\nimport pwndbg.android\nimport pwndbg.arch\nimport pwndbg.arguments\nimport pwndbg.argv\nimport pwndbg.color\nimport pwndbg.commands\nimport pwndbg.commands.argv\nimport pwndbg.commands.aslr\nimport pwndbg.commands.auxv\nimport pwndbg.commands.checksec\nimport pwndbg.commands.config\nimport pwndbg.commands.context\nimport pwndbg.commands.cpsr\nimport pwndbg.commands.dt\nimport pwndbg.commands.dumpargs\nimport pwndbg.commands.elf\nimport pwndbg.commands.gdbinit\nimport pwndbg.commands.got\nimport pwndbg.commands.heap\nimport pwndbg.commands.hexdump\nimport pwndbg.commands.ida\nimport pwndbg.commands.misc\nimport pwndbg.commands.next\nimport pwndbg.commands.peda\nimport pwndbg.commands.procinfo\nimport pwndbg.commands.radare2\nimport pwndbg.commands.reload\nimport pwndbg.commands.rop\nimport pwndbg.commands.ropper\nimport pwndbg.commands.search\nimport pwndbg.commands.segments\nimport pwndbg.commands.shell\nimport pwndbg.commands.stack\nimport pwndbg.commands.start\nimport pwndbg.commands.telescope\nimport pwndbg.commands.theme\nimport pwndbg.commands.version\nimport pwndbg.commands.vmmap\nimport pwndbg.commands.windbg\nimport pwndbg.commands.xor\nimport pwndbg.constants\nimport pwndbg.disasm\nimport pwndbg.disasm.arm\nimport pwndbg.disasm.jump\nimport pwndbg.disasm.mips\nimport pwndbg.disasm.ppc\nimport pwndbg.disasm.sparc\nimport pwndbg.disasm.x86\nimport pwndbg.dt\nimport pwndbg.elf\nimport pwndbg.exception\nimport pwndbg.heap\nimport pwndbg.inthook\nimport pwndbg.memory\nimport pwndbg.net\nimport pwndbg.proc\nimport pwndbg.prompt\nimport pwndbg.regs\nimport pwndbg.stack\nimport pwndbg.typeinfo\nimport pwndbg.ui\nimport pwndbg.version\nimport pwndbg.vmmap\nimport pwndbg.wrappers\n\n__version__ = pwndbg.version.__version__\nversion = __version__\n\ntry:\n import unicorn\n import pwndbg.emu\nexcept:\n pass\n\n__all__ = [\n'arch',\n'auxv',\n'chain',\n'color',\n'compat',\n'disasm',\n'dt',\n'elf',\n'enhance',\n'events',\n'file',\n'function',\n'heap',\n'hexdump',\n'ida',\n'info',\n'linkmap',\n'malloc',\n'memoize',\n'memory',\n'proc',\n'regs',\n'remote',\n'search',\n'stack',\n'strings',\n'symbol',\n'typeinfo',\n'ui',\n'vmmap'\n]\n\nprompt = \"pwndbg> \"\nprompt = \"\\x02\" + prompt + \"\\x01\" # STX + prompt + SOH\nprompt = pwndbg.color.red(prompt)\nprompt = pwndbg.color.bold(prompt)\nprompt = \"\\x01\" + prompt + \"\\x02\" # SOH + prompt + STX\n\npre_commands = \"\"\"\nset confirm off\nset verbose off\nset prompt %s\nset pagination off\nset height 0\nset history expansion on\nset history save on\nset follow-fork-mode child\nset backtrace past-main on\nset step-mode on\nset print pretty on\nset width %i\nset print elements 15\nhandle SIGALRM nostop print nopass\nhandle SIGBUS stop print nopass\nhandle SIGPIPE nostop print nopass\nhandle SIGSEGV stop print nopass\n\"\"\".strip() % (prompt, pwndbg.ui.get_window_size()[1])\n\nfor line in pre_commands.strip().splitlines():\n gdb.execute(line)\n\n# This may throw an exception, see pwndbg/pwndbg#27\ntry:\n gdb.execute(\"set disassembly-flavor intel\")\nexcept gdb.error:\n pass\n\n\n# handle resize event to align width and completion\nsignal.signal(signal.SIGWINCH, lambda signum, frame: gdb.execute(\"set width %i\" % pwndbg.ui.get_window_size()[1]))\n", "path": "pwndbg/__init__.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nA few helpers for making things print pretty-like.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport fcntl\nimport os\nimport struct\nimport sys\nimport termios\n\nimport pwndbg.arch\nimport pwndbg.color.context as C\nimport pwndbg.color.theme as theme\nimport pwndbg.config as config\n\ntheme.Parameter('banner-separator', '\u2500', 'repeated banner separator character')\n\ndef banner(title):\n title = title.upper()\n _height, width = get_window_size()\n width -= 2\n return C.banner((\"[{:%s^%ss}]\" % (config.banner_separator, width)).format(title))\n\ndef addrsz(address):\n address = int(address) & pwndbg.arch.ptrmask\n return \"%{}x\".format(2*pwndbg.arch.ptrsize) % address\n\ndef get_window_size():\n fallback = (int(os.environ.get('LINES', 20)), int(os.environ.get('COLUMNS', 80)))\n if not sys.stdin.isatty:\n return fallback\n try:\n # get terminal size and force ret buffer len of 4 bytes for safe unpacking by passing equally long arg\n rows, cols = struct.unpack('hh', fcntl.ioctl(sys.stdin.fileno(), termios.TIOCGWINSZ, '1234'))\n except:\n rows, cols = fallback\n return rows, cols\n", "path": "pwndbg/ui.py"}]} | 1,904 | 707 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.