problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_65129 | rasdani/github-patches | git_diff | zulip__zulip-29386 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add clarification tooltip when settings can't be saved due to invalid Jitsi URL
In SETTINGS / ORGANIZATION SETTINGS > Other settings, we disable the "Save changes" button when the custom Jitsi URL is invalid. We should add a tooltip do the disabled button to explain why it is disabled: "Cannot save invalid Jitsi server URL."
<img width="809" alt="Screenshot 2023-11-02 at 10 31 14 PM" src="https://github.com/zulip/zulip/assets/2090066/b6bbb302-8b01-41ae-be98-1181497ecbf5">
</issue>
<code>
[start of tools/lib/capitalization.py]
1 import re
2 from typing import List, Match, Tuple
3
4 from bs4 import BeautifulSoup
5
6 # The phrases in this list will be ignored. The longest phrase is
7 # tried first; this removes the chance of smaller phrases changing
8 # the text before longer phrases are tried.
9 # The errors shown by `tools/check-capitalization` can be added to
10 # this list without any modification.
11 IGNORED_PHRASES = [
12 # Proper nouns and acronyms
13 r"API",
14 r"APNS",
15 r"Botserver",
16 r"Cookie Bot",
17 r"DevAuthBackend",
18 r"DSN",
19 r"Esc",
20 r"GCM",
21 r"GitHub",
22 r"Gravatar",
23 r"Help Center",
24 r"HTTP",
25 r"ID",
26 r"IDs",
27 r"Inbox",
28 r"IP",
29 r"JSON",
30 r"Kerberos",
31 r"LinkedIn",
32 r"LDAP",
33 r"Markdown",
34 r"OTP",
35 r"Pivotal",
36 r"Recent conversations",
37 r"DM",
38 r"DMs",
39 r"Slack",
40 r"Google",
41 r"Terms of Service",
42 r"Tuesday",
43 r"URL",
44 r"UUID",
45 r"Webathena",
46 r"WordPress",
47 r"Zephyr",
48 r"Zoom",
49 r"Zulip",
50 r"Zulip Server",
51 r"Zulip Account Security",
52 r"Zulip Security",
53 r"Zulip Cloud",
54 r"Zulip Cloud Standard",
55 r"Zulip Cloud Plus",
56 r"BigBlueButton",
57 # Code things
58 r"\.zuliprc",
59 # BeautifulSoup will remove <z-user> which is horribly confusing,
60 # so we need more of the sentence.
61 r"<z-user></z-user> will have the same role",
62 r"<z-user></z-user> will have the same properties",
63 # Things using "I"
64 r"I understand",
65 r"I'm",
66 r"I've",
67 r"Topics I participate in",
68 r"Topics I send a message to",
69 r"Topics I start",
70 # Specific short words
71 r"beta",
72 r"and",
73 r"bot",
74 r"e\.g\.",
75 r"enabled",
76 r"signups",
77 # Placeholders
78 r"keyword",
79 r"streamname",
80 r"user@example\.com",
81 r"example\.com",
82 r"acme",
83 # Fragments of larger strings
84 r"is …",
85 r"your subscriptions on your Streams page",
86 r"Add global time<br />Everyone sees global times in their own time zone\.",
87 r"user",
88 r"an unknown operating system",
89 r"Go to Settings",
90 r"find accounts for another email address",
91 # SPECIAL CASES
92 # Because topics usually are lower-case, this would look weird if it were capitalized
93 r"more topics",
94 # Used alone in a parenthetical where capitalized looks worse.
95 r"^deprecated$",
96 # We want the similar text in the Private Messages section to have the same capitalization.
97 r"more conversations",
98 r"back to streams",
99 # Capital 'i' looks weird in reminders popover
100 r"in 1 hour",
101 r"in 20 minutes",
102 r"in 3 hours",
103 # these are used as topics
104 r"^new streams$",
105 r"^stream events$",
106 # These are used as example short names (e.g. an uncapitalized context):
107 r"^marketing$",
108 r"^cookie$",
109 # Used to refer custom time limits
110 r"\bN\b",
111 # Capital c feels obtrusive in clear status option
112 r"clear",
113 r"group direct messages with \{recipient\}",
114 r"direct messages with \{recipient\}",
115 r"direct messages with yourself",
116 r"GIF",
117 # Emoji name placeholder
118 r"leafy green vegetable",
119 # Subdomain placeholder
120 r"your-organization-url",
121 # Used in invite modal
122 r"or",
123 # Used in GIPHY integration setting. GIFs Rating.
124 r"rated Y",
125 r"rated G",
126 r"rated PG",
127 r"rated PG13",
128 r"rated R",
129 # Used in GIPHY popover.
130 r"GIFs",
131 r"GIPHY",
132 # Used in our case studies
133 r"Technical University of Munich",
134 r"University of California San Diego",
135 # Used in stream creation form
136 r"email hidden",
137 # Use in compose box.
138 r"to send",
139 r"to add a new line",
140 # Used in showing Notification Bot read receipts message
141 "Notification Bot",
142 # Used in presence_enabled setting label
143 r"invisible mode off",
144 # Typeahead suggestions for "Pronouns" custom field type.
145 r"he/him",
146 r"she/her",
147 r"they/them",
148 # Used in message-move-time-limit setting label
149 r"does not apply to moderators and administrators",
150 # Used in message-delete-time-limit setting label
151 r"does not apply to administrators",
152 # Used as indicator with names for guest users.
153 r"guest",
154 # Used in pills for deactivated users.
155 r"deactivated",
156 # This is a reference to a setting/secret and should be lowercase.
157 r"zulip_org_id",
158 ]
159
160 # Sort regexes in descending order of their lengths. As a result, the
161 # longer phrases will be ignored first.
162 IGNORED_PHRASES.sort(key=len, reverse=True)
163
164 # Compile regexes to improve performance. This also extracts the
165 # text using BeautifulSoup and then removes extra whitespaces from
166 # it. This step enables us to add HTML in our regexes directly.
167 COMPILED_IGNORED_PHRASES = [
168 re.compile(" ".join(BeautifulSoup(regex, "lxml").text.split())) for regex in IGNORED_PHRASES
169 ]
170
171 SPLIT_BOUNDARY = "?.!" # Used to split string into sentences.
172 SPLIT_BOUNDARY_REGEX = re.compile(rf"[{SPLIT_BOUNDARY}]")
173
174 # Regexes which check capitalization in sentences.
175 DISALLOWED = [
176 r"^[a-z](?!\})", # Checks if the sentence starts with a lower case character.
177 r"^[A-Z][a-z]+[\sa-z0-9]+[A-Z]", # Checks if an upper case character exists
178 # after a lower case character when the first character is in upper case.
179 ]
180 DISALLOWED_REGEX = re.compile(r"|".join(DISALLOWED))
181
182 BANNED_WORDS = {
183 "realm": "The term realm should not appear in user-facing strings. Use organization instead.",
184 }
185
186
187 def get_safe_phrase(phrase: str) -> str:
188 """
189 Safe phrase is in lower case and doesn't contain characters which can
190 conflict with split boundaries. All conflicting characters are replaced
191 with low dash (_).
192 """
193 phrase = SPLIT_BOUNDARY_REGEX.sub("_", phrase)
194 return phrase.lower()
195
196
197 def replace_with_safe_phrase(matchobj: Match[str]) -> str:
198 """
199 The idea is to convert IGNORED_PHRASES into safe phrases, see
200 `get_safe_phrase()` function. The only exception is when the
201 IGNORED_PHRASE is at the start of the text or after a split
202 boundary; in this case, we change the first letter of the phrase
203 to upper case.
204 """
205 ignored_phrase = matchobj.group(0)
206 safe_string = get_safe_phrase(ignored_phrase)
207
208 start_index = matchobj.start()
209 complete_string = matchobj.string
210
211 is_string_start = start_index == 0
212 # We expect that there will be one space between split boundary
213 # and the next word.
214 punctuation = complete_string[max(start_index - 2, 0)]
215 is_after_split_boundary = punctuation in SPLIT_BOUNDARY
216 if is_string_start or is_after_split_boundary:
217 return safe_string.capitalize()
218
219 return safe_string
220
221
222 def get_safe_text(text: str) -> str:
223 """
224 This returns text which is rendered by BeautifulSoup and is in the
225 form that can be split easily and has all IGNORED_PHRASES processed.
226 """
227 soup = BeautifulSoup(text, "lxml")
228 text = " ".join(soup.text.split()) # Remove extra whitespaces.
229 for phrase_regex in COMPILED_IGNORED_PHRASES:
230 text = phrase_regex.sub(replace_with_safe_phrase, text)
231
232 return text
233
234
235 def is_capitalized(safe_text: str) -> bool:
236 sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)
237 return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)
238
239
240 def check_banned_words(text: str) -> List[str]:
241 lower_cased_text = text.lower()
242 errors = []
243 for word, reason in BANNED_WORDS.items():
244 if word in lower_cased_text:
245 # Hack: Should move this into BANNED_WORDS framework; for
246 # now, just hand-code the skips:
247 if (
248 "realm_name" in lower_cased_text
249 or "realm_uri" in lower_cased_text
250 or "remote_realm_host" in lower_cased_text
251 ):
252 continue
253 kwargs = dict(word=word, text=text, reason=reason)
254 msg = "{word} found in '{text}'. {reason}".format(**kwargs)
255 errors.append(msg)
256
257 return errors
258
259
260 def check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:
261 errors = []
262 ignored = []
263 banned_word_errors = []
264 for text in strings:
265 text = " ".join(text.split()) # Remove extra whitespaces.
266 safe_text = get_safe_text(text)
267 has_ignored_phrase = text != safe_text
268 capitalized = is_capitalized(safe_text)
269 if not capitalized:
270 errors.append(text)
271 elif has_ignored_phrase:
272 ignored.append(text)
273
274 banned_word_errors.extend(check_banned_words(text))
275
276 return sorted(errors), sorted(ignored), sorted(banned_word_errors)
277
[end of tools/lib/capitalization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py
--- a/tools/lib/capitalization.py
+++ b/tools/lib/capitalization.py
@@ -27,6 +27,7 @@
r"Inbox",
r"IP",
r"JSON",
+ r"Jitsi",
r"Kerberos",
r"LinkedIn",
r"LDAP",
| {"golden_diff": "diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py\n--- a/tools/lib/capitalization.py\n+++ b/tools/lib/capitalization.py\n@@ -27,6 +27,7 @@\n r\"Inbox\",\n r\"IP\",\n r\"JSON\",\n+ r\"Jitsi\",\n r\"Kerberos\",\n r\"LinkedIn\",\n r\"LDAP\",\n", "issue": "Add clarification tooltip when settings can't be saved due to invalid Jitsi URL\nIn SETTINGS / ORGANIZATION SETTINGS > Other settings, we disable the \"Save changes\" button when the custom Jitsi URL is invalid. We should add a tooltip do the disabled button to explain why it is disabled: \"Cannot save invalid Jitsi server URL.\"\r\n\r\n<img width=\"809\" alt=\"Screenshot 2023-11-02 at 10 31 14\u202fPM\" src=\"https://github.com/zulip/zulip/assets/2090066/b6bbb302-8b01-41ae-be98-1181497ecbf5\">\r\n\n", "before_files": [{"content": "import re\nfrom typing import List, Match, Tuple\n\nfrom bs4 import BeautifulSoup\n\n# The phrases in this list will be ignored. The longest phrase is\n# tried first; this removes the chance of smaller phrases changing\n# the text before longer phrases are tried.\n# The errors shown by `tools/check-capitalization` can be added to\n# this list without any modification.\nIGNORED_PHRASES = [\n # Proper nouns and acronyms\n r\"API\",\n r\"APNS\",\n r\"Botserver\",\n r\"Cookie Bot\",\n r\"DevAuthBackend\",\n r\"DSN\",\n r\"Esc\",\n r\"GCM\",\n r\"GitHub\",\n r\"Gravatar\",\n r\"Help Center\",\n r\"HTTP\",\n r\"ID\",\n r\"IDs\",\n r\"Inbox\",\n r\"IP\",\n r\"JSON\",\n r\"Kerberos\",\n r\"LinkedIn\",\n r\"LDAP\",\n r\"Markdown\",\n r\"OTP\",\n r\"Pivotal\",\n r\"Recent conversations\",\n r\"DM\",\n r\"DMs\",\n r\"Slack\",\n r\"Google\",\n r\"Terms of Service\",\n r\"Tuesday\",\n r\"URL\",\n r\"UUID\",\n r\"Webathena\",\n r\"WordPress\",\n r\"Zephyr\",\n r\"Zoom\",\n r\"Zulip\",\n r\"Zulip Server\",\n r\"Zulip Account Security\",\n r\"Zulip Security\",\n r\"Zulip Cloud\",\n r\"Zulip Cloud Standard\",\n r\"Zulip Cloud Plus\",\n r\"BigBlueButton\",\n # Code things\n r\"\\.zuliprc\",\n # BeautifulSoup will remove <z-user> which is horribly confusing,\n # so we need more of the sentence.\n r\"<z-user></z-user> will have the same role\",\n r\"<z-user></z-user> will have the same properties\",\n # Things using \"I\"\n r\"I understand\",\n r\"I'm\",\n r\"I've\",\n r\"Topics I participate in\",\n r\"Topics I send a message to\",\n r\"Topics I start\",\n # Specific short words\n r\"beta\",\n r\"and\",\n r\"bot\",\n r\"e\\.g\\.\",\n r\"enabled\",\n r\"signups\",\n # Placeholders\n r\"keyword\",\n r\"streamname\",\n r\"user@example\\.com\",\n r\"example\\.com\",\n r\"acme\",\n # Fragments of larger strings\n r\"is \u2026\",\n r\"your subscriptions on your Streams page\",\n r\"Add global time<br />Everyone sees global times in their own time zone\\.\",\n r\"user\",\n r\"an unknown operating system\",\n r\"Go to Settings\",\n r\"find accounts for another email address\",\n # SPECIAL CASES\n # Because topics usually are lower-case, this would look weird if it were capitalized\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n # We want the similar text in the Private Messages section to have the same capitalization.\n r\"more conversations\",\n r\"back to streams\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n r\"in 3 hours\",\n # these are used as topics\n r\"^new streams$\",\n r\"^stream events$\",\n # These are used as example short names (e.g. an uncapitalized context):\n r\"^marketing$\",\n r\"^cookie$\",\n # Used to refer custom time limits\n r\"\\bN\\b\",\n # Capital c feels obtrusive in clear status option\n r\"clear\",\n r\"group direct messages with \\{recipient\\}\",\n r\"direct messages with \\{recipient\\}\",\n r\"direct messages with yourself\",\n r\"GIF\",\n # Emoji name placeholder\n r\"leafy green vegetable\",\n # Subdomain placeholder\n r\"your-organization-url\",\n # Used in invite modal\n r\"or\",\n # Used in GIPHY integration setting. GIFs Rating.\n r\"rated Y\",\n r\"rated G\",\n r\"rated PG\",\n r\"rated PG13\",\n r\"rated R\",\n # Used in GIPHY popover.\n r\"GIFs\",\n r\"GIPHY\",\n # Used in our case studies\n r\"Technical University of Munich\",\n r\"University of California San Diego\",\n # Used in stream creation form\n r\"email hidden\",\n # Use in compose box.\n r\"to send\",\n r\"to add a new line\",\n # Used in showing Notification Bot read receipts message\n \"Notification Bot\",\n # Used in presence_enabled setting label\n r\"invisible mode off\",\n # Typeahead suggestions for \"Pronouns\" custom field type.\n r\"he/him\",\n r\"she/her\",\n r\"they/them\",\n # Used in message-move-time-limit setting label\n r\"does not apply to moderators and administrators\",\n # Used in message-delete-time-limit setting label\n r\"does not apply to administrators\",\n # Used as indicator with names for guest users.\n r\"guest\",\n # Used in pills for deactivated users.\n r\"deactivated\",\n # This is a reference to a setting/secret and should be lowercase.\n r\"zulip_org_id\",\n]\n\n# Sort regexes in descending order of their lengths. As a result, the\n# longer phrases will be ignored first.\nIGNORED_PHRASES.sort(key=len, reverse=True)\n\n# Compile regexes to improve performance. This also extracts the\n# text using BeautifulSoup and then removes extra whitespaces from\n# it. This step enables us to add HTML in our regexes directly.\nCOMPILED_IGNORED_PHRASES = [\n re.compile(\" \".join(BeautifulSoup(regex, \"lxml\").text.split())) for regex in IGNORED_PHRASES\n]\n\nSPLIT_BOUNDARY = \"?.!\" # Used to split string into sentences.\nSPLIT_BOUNDARY_REGEX = re.compile(rf\"[{SPLIT_BOUNDARY}]\")\n\n# Regexes which check capitalization in sentences.\nDISALLOWED = [\n r\"^[a-z](?!\\})\", # Checks if the sentence starts with a lower case character.\n r\"^[A-Z][a-z]+[\\sa-z0-9]+[A-Z]\", # Checks if an upper case character exists\n # after a lower case character when the first character is in upper case.\n]\nDISALLOWED_REGEX = re.compile(r\"|\".join(DISALLOWED))\n\nBANNED_WORDS = {\n \"realm\": \"The term realm should not appear in user-facing strings. Use organization instead.\",\n}\n\n\ndef get_safe_phrase(phrase: str) -> str:\n \"\"\"\n Safe phrase is in lower case and doesn't contain characters which can\n conflict with split boundaries. All conflicting characters are replaced\n with low dash (_).\n \"\"\"\n phrase = SPLIT_BOUNDARY_REGEX.sub(\"_\", phrase)\n return phrase.lower()\n\n\ndef replace_with_safe_phrase(matchobj: Match[str]) -> str:\n \"\"\"\n The idea is to convert IGNORED_PHRASES into safe phrases, see\n `get_safe_phrase()` function. The only exception is when the\n IGNORED_PHRASE is at the start of the text or after a split\n boundary; in this case, we change the first letter of the phrase\n to upper case.\n \"\"\"\n ignored_phrase = matchobj.group(0)\n safe_string = get_safe_phrase(ignored_phrase)\n\n start_index = matchobj.start()\n complete_string = matchobj.string\n\n is_string_start = start_index == 0\n # We expect that there will be one space between split boundary\n # and the next word.\n punctuation = complete_string[max(start_index - 2, 0)]\n is_after_split_boundary = punctuation in SPLIT_BOUNDARY\n if is_string_start or is_after_split_boundary:\n return safe_string.capitalize()\n\n return safe_string\n\n\ndef get_safe_text(text: str) -> str:\n \"\"\"\n This returns text which is rendered by BeautifulSoup and is in the\n form that can be split easily and has all IGNORED_PHRASES processed.\n \"\"\"\n soup = BeautifulSoup(text, \"lxml\")\n text = \" \".join(soup.text.split()) # Remove extra whitespaces.\n for phrase_regex in COMPILED_IGNORED_PHRASES:\n text = phrase_regex.sub(replace_with_safe_phrase, text)\n\n return text\n\n\ndef is_capitalized(safe_text: str) -> bool:\n sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)\n return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)\n\n\ndef check_banned_words(text: str) -> List[str]:\n lower_cased_text = text.lower()\n errors = []\n for word, reason in BANNED_WORDS.items():\n if word in lower_cased_text:\n # Hack: Should move this into BANNED_WORDS framework; for\n # now, just hand-code the skips:\n if (\n \"realm_name\" in lower_cased_text\n or \"realm_uri\" in lower_cased_text\n or \"remote_realm_host\" in lower_cased_text\n ):\n continue\n kwargs = dict(word=word, text=text, reason=reason)\n msg = \"{word} found in '{text}'. {reason}\".format(**kwargs)\n errors.append(msg)\n\n return errors\n\n\ndef check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:\n errors = []\n ignored = []\n banned_word_errors = []\n for text in strings:\n text = \" \".join(text.split()) # Remove extra whitespaces.\n safe_text = get_safe_text(text)\n has_ignored_phrase = text != safe_text\n capitalized = is_capitalized(safe_text)\n if not capitalized:\n errors.append(text)\n elif has_ignored_phrase:\n ignored.append(text)\n\n banned_word_errors.extend(check_banned_words(text))\n\n return sorted(errors), sorted(ignored), sorted(banned_word_errors)\n", "path": "tools/lib/capitalization.py"}]} | 3,649 | 92 |
gh_patches_debug_40748 | rasdani/github-patches | git_diff | vacanza__python-holidays-639 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ireland considering UK as base class and hence not being a country itself
Issue also opened here:
home-assistant/core#67542
Looks like Ireland is being considered as being part of the UK which is wrong as not all the holidays in the UK exist, or necessarily exist in Ireland.
Take a reference on this comment: https://github.com/home-assistant/core/issues/67542#issuecomment-1058079650
</issue>
<code>
[start of holidays/countries/ireland.py]
1 # python-holidays
2 # ---------------
3 # A fast, efficient Python library for generating country, province and state
4 # specific sets of holidays on the fly. It aims to make determining whether a
5 # specific date is a holiday as fast and flexible as possible.
6 #
7 # Authors: dr-prodigy <[email protected]> (c) 2017-2022
8 # ryanss <[email protected]> (c) 2014-2017
9 # Website: https://github.com/dr-prodigy/python-holidays
10 # License: MIT (see LICENSE file)
11
12 from datetime import date
13
14 from dateutil.easter import easter
15 from dateutil.relativedelta import relativedelta as rd, MO
16
17 from holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC
18 from holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND
19 from holidays.holiday_base import HolidayBase
20 from .united_kingdom import UnitedKingdom
21
22
23 class Ireland(UnitedKingdom):
24 country = "IE"
25
26 def __init__(self, **kwargs):
27 HolidayBase.__init__(self, **kwargs)
28
29 def _country_specific(self, year):
30 # Ireland exclusive holidays
31
32 # St. Patrick's Day
33 name = "St. Patrick's Day"
34 self[date(year, MAR, 17)] = name
35 if self.observed and date(year, MAR, 17).weekday() in WEEKEND:
36 self[date(year, MAR, 17) + rd(weekday=MO)] = name + " (Observed)"
37
38 # Easter Monday
39 self[easter(year) + rd(weekday=MO)] = "Easter Monday"
40
41 # May Day bank holiday (first Monday in May)
42 if year >= 1978:
43 name = "May Day"
44 if year == 1995:
45 dt = date(year, MAY, 8)
46 else:
47 dt = date(year, MAY, 1)
48 if dt.weekday() == MON:
49 self[dt] = name
50 elif dt.weekday() == TUE:
51 self[dt + rd(days=+6)] = name
52 elif dt.weekday() == WED:
53 self[dt + rd(days=+5)] = name
54 elif dt.weekday() == THU:
55 self[dt + rd(days=+4)] = name
56 elif dt.weekday() == FRI:
57 self[dt + rd(days=+3)] = name
58 elif dt.weekday() == SAT:
59 self[dt + rd(days=+2)] = name
60 elif dt.weekday() == SUN:
61 self[dt + rd(days=+1)] = name
62
63 # June bank holiday (first Monday in June)
64 self[date(year, JUN, 1) + rd(weekday=MO)] = "June Bank Holiday"
65
66 # Summer bank holiday (first Monday in August)
67 self[date(year, AUG, 1) + rd(weekday=MO)] = "Summer Bank Holiday"
68
69 # October Bank Holiday (last Monday in October)
70 self[date(year, OCT, 31) + rd(weekday=MO(-1))] = "October Bank Holiday"
71
72 # St. Stephen's Day
73 name = "St. Stephen's Day"
74 self[date(year, DEC, 26)] = name
75 if self.observed and date(year, DEC, 26).weekday() == SAT:
76 self[date(year, DEC, 28)] = name + " (Observed)"
77 elif self.observed and date(year, DEC, 26).weekday() == SUN:
78 self[date(year, DEC, 28)] = name + " (Observed)"
79
80
81 class IE(Ireland):
82 pass
83
84
85 class IRL(Ireland):
86 pass
87
[end of holidays/countries/ireland.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/holidays/countries/ireland.py b/holidays/countries/ireland.py
--- a/holidays/countries/ireland.py
+++ b/holidays/countries/ireland.py
@@ -16,20 +16,37 @@
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, MO
-from holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC
+from holidays.constants import FEB, MAR, MAY, JUN, AUG, OCT, DEC
from holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND
from holidays.holiday_base import HolidayBase
-from .united_kingdom import UnitedKingdom
+class Ireland(HolidayBase):
+ """
+ Official holidays in Ireland, as declared in the Citizen's Information
+ bulletin:
+ https://www.citizensinformation.ie/en/employment/employment_rights_and_conditions/leave_and_holidays/public_holidays_in_ireland.html
+ """
-class Ireland(UnitedKingdom):
country = "IE"
+ subdivisions = []
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
- def _country_specific(self, year):
- # Ireland exclusive holidays
+ def _populate(self, year):
+ self[date(year, JAN, 1)] = "New Year's Day"
+
+ # St. Brigid's Day
+ if year >= 2023:
+ dt = date(year, FEB, 1)
+ self[dt] = "St. Brigid's Day"
+
+ if self.observed and dt.weekday() != FRI:
+ self[date(year, FEB, 1) + rd(weekday=MO)] = "St. Brigid's Day (Observed)"
+
+ # One-off day of rememberance and recognition
+ if year == 2022:
+ self[date(year, MAR, 18)] = "Day of Rememberance and Recognition"
# St. Patrick's Day
name = "St. Patrick's Day"
@@ -40,7 +57,7 @@
# Easter Monday
self[easter(year) + rd(weekday=MO)] = "Easter Monday"
- # May Day bank holiday (first Monday in May)
+ # May bank holiday (first Monday in May)
if year >= 1978:
name = "May Day"
if year == 1995:
@@ -66,18 +83,24 @@
self[date(year, JUN, 1) + rd(weekday=MO)] = "June Bank Holiday"
# Summer bank holiday (first Monday in August)
- self[date(year, AUG, 1) + rd(weekday=MO)] = "Summer Bank Holiday"
+ self[date(year, AUG, 1) + rd(weekday=MO)] = "August Bank Holiday"
# October Bank Holiday (last Monday in October)
self[date(year, OCT, 31) + rd(weekday=MO(-1))] = "October Bank Holiday"
+ # Christmas Day
+ name = "Christmas Day"
+ self[date(year, DEC, 25)] = "Christmas Day"
+ if self.observed and date(year, DEC, 25).weekday() in WEEKEND:
+ self[date(year, DEC, 25) + rd(weekday=MON)] = name + " (Observed)"
+
# St. Stephen's Day
name = "St. Stephen's Day"
self[date(year, DEC, 26)] = name
if self.observed and date(year, DEC, 26).weekday() == SAT:
- self[date(year, DEC, 28)] = name + " (Observed)"
+ self[date(year, DEC, 26) + rd(weekday=MON)] = name + " (Observed)"
elif self.observed and date(year, DEC, 26).weekday() == SUN:
- self[date(year, DEC, 28)] = name + " (Observed)"
+ self[date(year, DEC, 26) + rd(weekday=TUE)] = name + " (Observed)"
class IE(Ireland):
| {"golden_diff": "diff --git a/holidays/countries/ireland.py b/holidays/countries/ireland.py\n--- a/holidays/countries/ireland.py\n+++ b/holidays/countries/ireland.py\n@@ -16,20 +16,37 @@\n from dateutil.easter import easter\n from dateutil.relativedelta import relativedelta as rd, MO\n \n-from holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC\n+from holidays.constants import FEB, MAR, MAY, JUN, AUG, OCT, DEC\n from holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND\n from holidays.holiday_base import HolidayBase\n-from .united_kingdom import UnitedKingdom\n \n+class Ireland(HolidayBase):\n+ \"\"\"\n+ Official holidays in Ireland, as declared in the Citizen's Information\n+ bulletin:\n+ https://www.citizensinformation.ie/en/employment/employment_rights_and_conditions/leave_and_holidays/public_holidays_in_ireland.html\n+ \"\"\"\n \n-class Ireland(UnitedKingdom):\n country = \"IE\"\n+ subdivisions = []\n \n def __init__(self, **kwargs):\n HolidayBase.__init__(self, **kwargs)\n \n- def _country_specific(self, year):\n- # Ireland exclusive holidays\n+ def _populate(self, year):\n+ self[date(year, JAN, 1)] = \"New Year's Day\"\n+\n+ # St. Brigid's Day\n+ if year >= 2023:\n+ dt = date(year, FEB, 1)\n+ self[dt] = \"St. Brigid's Day\"\n+\n+ if self.observed and dt.weekday() != FRI:\n+ self[date(year, FEB, 1) + rd(weekday=MO)] = \"St. Brigid's Day (Observed)\"\n+\n+ # One-off day of rememberance and recognition\n+ if year == 2022:\n+ self[date(year, MAR, 18)] = \"Day of Rememberance and Recognition\"\n \n # St. Patrick's Day\n name = \"St. Patrick's Day\"\n@@ -40,7 +57,7 @@\n # Easter Monday\n self[easter(year) + rd(weekday=MO)] = \"Easter Monday\"\n \n- # May Day bank holiday (first Monday in May)\n+ # May bank holiday (first Monday in May)\n if year >= 1978:\n name = \"May Day\"\n if year == 1995:\n@@ -66,18 +83,24 @@\n self[date(year, JUN, 1) + rd(weekday=MO)] = \"June Bank Holiday\"\n \n # Summer bank holiday (first Monday in August)\n- self[date(year, AUG, 1) + rd(weekday=MO)] = \"Summer Bank Holiday\"\n+ self[date(year, AUG, 1) + rd(weekday=MO)] = \"August Bank Holiday\"\n \n # October Bank Holiday (last Monday in October)\n self[date(year, OCT, 31) + rd(weekday=MO(-1))] = \"October Bank Holiday\"\n \n+ # Christmas Day\n+ name = \"Christmas Day\"\n+ self[date(year, DEC, 25)] = \"Christmas Day\"\n+ if self.observed and date(year, DEC, 25).weekday() in WEEKEND:\n+ self[date(year, DEC, 25) + rd(weekday=MON)] = name + \" (Observed)\"\n+\n # St. Stephen's Day\n name = \"St. Stephen's Day\"\n self[date(year, DEC, 26)] = name\n if self.observed and date(year, DEC, 26).weekday() == SAT:\n- self[date(year, DEC, 28)] = name + \" (Observed)\"\n+ self[date(year, DEC, 26) + rd(weekday=MON)] = name + \" (Observed)\"\n elif self.observed and date(year, DEC, 26).weekday() == SUN:\n- self[date(year, DEC, 28)] = name + \" (Observed)\"\n+ self[date(year, DEC, 26) + rd(weekday=TUE)] = name + \" (Observed)\"\n \n \n class IE(Ireland):\n", "issue": "Ireland considering UK as base class and hence not being a country itself\nIssue also opened here:\r\nhome-assistant/core#67542\r\n\r\nLooks like Ireland is being considered as being part of the UK which is wrong as not all the holidays in the UK exist, or necessarily exist in Ireland.\r\n\r\nTake a reference on this comment: https://github.com/home-assistant/core/issues/67542#issuecomment-1058079650\n", "before_files": [{"content": "# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Authors: dr-prodigy <[email protected]> (c) 2017-2022\n# ryanss <[email protected]> (c) 2014-2017\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd, MO\n\nfrom holidays.constants import MAR, MAY, JUN, AUG, OCT, DEC\nfrom holidays.constants import MON, TUE, WED, THU, FRI, SAT, SUN, WEEKEND\nfrom holidays.holiday_base import HolidayBase\nfrom .united_kingdom import UnitedKingdom\n\n\nclass Ireland(UnitedKingdom):\n country = \"IE\"\n\n def __init__(self, **kwargs):\n HolidayBase.__init__(self, **kwargs)\n\n def _country_specific(self, year):\n # Ireland exclusive holidays\n\n # St. Patrick's Day\n name = \"St. Patrick's Day\"\n self[date(year, MAR, 17)] = name\n if self.observed and date(year, MAR, 17).weekday() in WEEKEND:\n self[date(year, MAR, 17) + rd(weekday=MO)] = name + \" (Observed)\"\n\n # Easter Monday\n self[easter(year) + rd(weekday=MO)] = \"Easter Monday\"\n\n # May Day bank holiday (first Monday in May)\n if year >= 1978:\n name = \"May Day\"\n if year == 1995:\n dt = date(year, MAY, 8)\n else:\n dt = date(year, MAY, 1)\n if dt.weekday() == MON:\n self[dt] = name\n elif dt.weekday() == TUE:\n self[dt + rd(days=+6)] = name\n elif dt.weekday() == WED:\n self[dt + rd(days=+5)] = name\n elif dt.weekday() == THU:\n self[dt + rd(days=+4)] = name\n elif dt.weekday() == FRI:\n self[dt + rd(days=+3)] = name\n elif dt.weekday() == SAT:\n self[dt + rd(days=+2)] = name\n elif dt.weekday() == SUN:\n self[dt + rd(days=+1)] = name\n\n # June bank holiday (first Monday in June)\n self[date(year, JUN, 1) + rd(weekday=MO)] = \"June Bank Holiday\"\n\n # Summer bank holiday (first Monday in August)\n self[date(year, AUG, 1) + rd(weekday=MO)] = \"Summer Bank Holiday\"\n\n # October Bank Holiday (last Monday in October)\n self[date(year, OCT, 31) + rd(weekday=MO(-1))] = \"October Bank Holiday\"\n\n # St. Stephen's Day\n name = \"St. Stephen's Day\"\n self[date(year, DEC, 26)] = name\n if self.observed and date(year, DEC, 26).weekday() == SAT:\n self[date(year, DEC, 28)] = name + \" (Observed)\"\n elif self.observed and date(year, DEC, 26).weekday() == SUN:\n self[date(year, DEC, 28)] = name + \" (Observed)\"\n\n\nclass IE(Ireland):\n pass\n\n\nclass IRL(Ireland):\n pass\n", "path": "holidays/countries/ireland.py"}]} | 1,668 | 957 |
gh_patches_debug_17361 | rasdani/github-patches | git_diff | certbot__certbot-8895 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nginx parser cannot handle empty file
We have some simple tooling that empties a vhosts config file to disable it. However certbot throws errors in the debug log about it.
latest python-certbot-nginx on ubuntu 18.04 PPA
```
2019-05-25 16:18:38,333:DEBUG:certbot_nginx.parser:Could not parse file: /etc/nginx/sites-generated/xdomain due to Expected {Group:({[<SPC><TAB><CR><LF>] "#" rest of line}) | Group:(Forward: ...) | Group:({[<SPC><TAB><CR><LF>] {Combine:({{quoted string, starting with " ending with " | quoted string, starting with ' ending with '} ")" [Re:('(\\$\\{)|[^{;\\s]')]...}) | Combine:({Re:('(\\$\\{)|[^{};\\s\'\\"]') [Re:('(\\$\\{)|[^{;\\s]')]...}) | quoted string, starting with " ending with " | quoted string, starting with ' ending with '} [{<SPC><TAB><CR><LF> {Combine:({{quoted string, starting with " ending with " | quoted string, starting with ' ending with '} ")" [Re:('(\\$\\{)|[^{;\\s]')]...}) | Combine:({Re:('(\\$\\{)|[^{};\\s\'\\"]') [Re:('(\\$\\{)|[^{;\\s]')]...}) | quoted string, starting with " ending with " | quoted string, starting with ' ending with '}}]... [<SPC><TAB><CR><LF>] Suppress:(";")})} (at char 1), (line:2, col:1)
```
In this case `/etc/nginx/sites-generated/xdomain` would be pointing to an empty file, nginx is ok with that, the certbot parser isn't.
</issue>
<code>
[start of certbot-nginx/certbot_nginx/_internal/nginxparser.py]
1 """Very low-level nginx config parser based on pyparsing."""
2 # Forked from https://github.com/fatiherikli/nginxparser (MIT Licensed)
3 import copy
4 import logging
5 from typing import Any
6 from typing import IO
7
8 from pyparsing import Combine
9 from pyparsing import Forward
10 from pyparsing import Group
11 from pyparsing import Literal
12 from pyparsing import OneOrMore
13 from pyparsing import Optional
14 from pyparsing import QuotedString
15 from pyparsing import Regex
16 from pyparsing import restOfLine
17 from pyparsing import stringEnd
18 from pyparsing import White
19 from pyparsing import ZeroOrMore
20
21 logger = logging.getLogger(__name__)
22
23
24 class RawNginxParser:
25 # pylint: disable=pointless-statement
26 """A class that parses nginx configuration with pyparsing."""
27
28 # constants
29 space = Optional(White()).leaveWhitespace()
30 required_space = White().leaveWhitespace()
31
32 left_bracket = Literal("{").suppress()
33 right_bracket = space + Literal("}").suppress()
34 semicolon = Literal(";").suppress()
35 dquoted = QuotedString('"', multiline=True, unquoteResults=False, escChar='\\')
36 squoted = QuotedString("'", multiline=True, unquoteResults=False, escChar='\\')
37 quoted = dquoted | squoted
38 head_tokenchars = Regex(r"(\$\{)|[^{};\s'\"]") # if (last_space)
39 tail_tokenchars = Regex(r"(\$\{)|[^{;\s]") # else
40 tokenchars = Combine(head_tokenchars + ZeroOrMore(tail_tokenchars))
41 paren_quote_extend = Combine(quoted + Literal(')') + ZeroOrMore(tail_tokenchars))
42 # note: ')' allows extension, but then we fall into else, not last_space.
43
44 token = paren_quote_extend | tokenchars | quoted
45
46 whitespace_token_group = space + token + ZeroOrMore(required_space + token) + space
47 assignment = whitespace_token_group + semicolon
48
49 comment = space + Literal('#') + restOfLine
50
51 block = Forward()
52
53 # order matters! see issue 518, and also http { # server { \n}
54 contents = Group(comment) | Group(block) | Group(assignment)
55
56 block_begin = Group(whitespace_token_group)
57 block_innards = Group(ZeroOrMore(contents) + space).leaveWhitespace()
58 block << block_begin + left_bracket + block_innards + right_bracket
59
60 script = OneOrMore(contents) + space + stringEnd
61 script.parseWithTabs().leaveWhitespace()
62
63 def __init__(self, source):
64 self.source = source
65
66 def parse(self):
67 """Returns the parsed tree."""
68 return self.script.parseString(self.source)
69
70 def as_list(self):
71 """Returns the parsed tree as a list."""
72 return self.parse().asList()
73
74 class RawNginxDumper:
75 """A class that dumps nginx configuration from the provided tree."""
76 def __init__(self, blocks):
77 self.blocks = blocks
78
79 def __iter__(self, blocks=None):
80 """Iterates the dumped nginx content."""
81 blocks = blocks or self.blocks
82 for b0 in blocks:
83 if isinstance(b0, str):
84 yield b0
85 continue
86 item = copy.deepcopy(b0)
87 if spacey(item[0]):
88 yield item.pop(0) # indentation
89 if not item:
90 continue
91
92 if isinstance(item[0], list): # block
93 yield "".join(item.pop(0)) + '{'
94 for parameter in item.pop(0):
95 for line in self.__iter__([parameter]): # negate "for b0 in blocks"
96 yield line
97 yield '}'
98 else: # not a block - list of strings
99 semicolon = ";"
100 if isinstance(item[0], str) and item[0].strip() == '#': # comment
101 semicolon = ""
102 yield "".join(item) + semicolon
103
104 def __str__(self):
105 """Return the parsed block as a string."""
106 return ''.join(self)
107
108
109 spacey = lambda x: (isinstance(x, str) and x.isspace()) or x == ''
110
111
112 class UnspacedList(list):
113 """Wrap a list [of lists], making any whitespace entries magically invisible"""
114
115 def __init__(self, list_source):
116 # ensure our argument is not a generator, and duplicate any sublists
117 self.spaced = copy.deepcopy(list(list_source))
118 self.dirty = False
119
120 # Turn self into a version of the source list that has spaces removed
121 # and all sub-lists also UnspacedList()ed
122 list.__init__(self, list_source)
123 for i, entry in reversed(list(enumerate(self))):
124 if isinstance(entry, list):
125 sublist = UnspacedList(entry)
126 list.__setitem__(self, i, sublist)
127 self.spaced[i] = sublist.spaced
128 elif spacey(entry):
129 # don't delete comments
130 if "#" not in self[:i]:
131 list.__delitem__(self, i)
132
133 def _coerce(self, inbound):
134 """
135 Coerce some inbound object to be appropriately usable in this object
136
137 :param inbound: string or None or list or UnspacedList
138 :returns: (coerced UnspacedList or string or None, spaced equivalent)
139 :rtype: tuple
140
141 """
142 if not isinstance(inbound, list): # str or None
143 return inbound, inbound
144 else:
145 if not hasattr(inbound, "spaced"):
146 inbound = UnspacedList(inbound)
147 return inbound, inbound.spaced
148
149 def insert(self, i, x):
150 item, spaced_item = self._coerce(x)
151 slicepos = self._spaced_position(i) if i < len(self) else len(self.spaced)
152 self.spaced.insert(slicepos, spaced_item)
153 if not spacey(item):
154 list.insert(self, i, item)
155 self.dirty = True
156
157 def append(self, x):
158 item, spaced_item = self._coerce(x)
159 self.spaced.append(spaced_item)
160 if not spacey(item):
161 list.append(self, item)
162 self.dirty = True
163
164 def extend(self, x):
165 item, spaced_item = self._coerce(x)
166 self.spaced.extend(spaced_item)
167 list.extend(self, item)
168 self.dirty = True
169
170 def __add__(self, other):
171 l = copy.deepcopy(self)
172 l.extend(other)
173 l.dirty = True
174 return l
175
176 def pop(self, _i=None):
177 raise NotImplementedError("UnspacedList.pop() not yet implemented")
178 def remove(self, _):
179 raise NotImplementedError("UnspacedList.remove() not yet implemented")
180 def reverse(self):
181 raise NotImplementedError("UnspacedList.reverse() not yet implemented")
182 def sort(self, _cmp=None, _key=None, _Rev=None):
183 raise NotImplementedError("UnspacedList.sort() not yet implemented")
184 def __setslice__(self, _i, _j, _newslice):
185 raise NotImplementedError("Slice operations on UnspacedLists not yet implemented")
186
187 def __setitem__(self, i, value):
188 if isinstance(i, slice):
189 raise NotImplementedError("Slice operations on UnspacedLists not yet implemented")
190 item, spaced_item = self._coerce(value)
191 self.spaced.__setitem__(self._spaced_position(i), spaced_item)
192 if not spacey(item):
193 list.__setitem__(self, i, item)
194 self.dirty = True
195
196 def __delitem__(self, i):
197 self.spaced.__delitem__(self._spaced_position(i))
198 list.__delitem__(self, i)
199 self.dirty = True
200
201 def __deepcopy__(self, memo):
202 new_spaced = copy.deepcopy(self.spaced, memo=memo)
203 l = UnspacedList(new_spaced)
204 l.dirty = self.dirty
205 return l
206
207 def is_dirty(self):
208 """Recurse through the parse tree to figure out if any sublists are dirty"""
209 if self.dirty:
210 return True
211 return any((isinstance(x, UnspacedList) and x.is_dirty() for x in self))
212
213 def _spaced_position(self, idx):
214 "Convert from indexes in the unspaced list to positions in the spaced one"
215 pos = spaces = 0
216 # Normalize indexes like list[-1] etc, and save the result
217 if idx < 0:
218 idx = len(self) + idx
219 if not 0 <= idx < len(self):
220 raise IndexError("list index out of range")
221 idx0 = idx
222 # Count the number of spaces in the spaced list before idx in the unspaced one
223 while idx != -1:
224 if spacey(self.spaced[pos]):
225 spaces += 1
226 else:
227 idx -= 1
228 pos += 1
229 return idx0 + spaces
230
231
232 # Shortcut functions to respect Python's serialization interface
233 # (like pyyaml, picker or json)
234
235 def loads(source):
236 """Parses from a string.
237
238 :param str source: The string to parse
239 :returns: The parsed tree
240 :rtype: list
241
242 """
243 return UnspacedList(RawNginxParser(source).as_list())
244
245
246 def load(_file):
247 """Parses from a file.
248
249 :param file _file: The file to parse
250 :returns: The parsed tree
251 :rtype: list
252
253 """
254 return loads(_file.read())
255
256
257 def dumps(blocks: UnspacedList) -> str:
258 """Dump to a Unicode string.
259
260 :param UnspacedList block: The parsed tree
261 :rtype: six.text_type
262
263 """
264 return str(RawNginxDumper(blocks.spaced))
265
266
267 def dump(blocks: UnspacedList, _file: IO[Any]) -> None:
268 """Dump to a file.
269
270 :param UnspacedList block: The parsed tree
271 :param IO[Any] _file: The file stream to dump to. It must be opened with
272 Unicode encoding.
273 :rtype: None
274
275 """
276 _file.write(dumps(blocks))
277
[end of certbot-nginx/certbot_nginx/_internal/nginxparser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/certbot-nginx/certbot_nginx/_internal/nginxparser.py b/certbot-nginx/certbot_nginx/_internal/nginxparser.py
--- a/certbot-nginx/certbot_nginx/_internal/nginxparser.py
+++ b/certbot-nginx/certbot_nginx/_internal/nginxparser.py
@@ -9,7 +9,6 @@
from pyparsing import Forward
from pyparsing import Group
from pyparsing import Literal
-from pyparsing import OneOrMore
from pyparsing import Optional
from pyparsing import QuotedString
from pyparsing import Regex
@@ -57,7 +56,7 @@
block_innards = Group(ZeroOrMore(contents) + space).leaveWhitespace()
block << block_begin + left_bracket + block_innards + right_bracket
- script = OneOrMore(contents) + space + stringEnd
+ script = ZeroOrMore(contents) + space + stringEnd
script.parseWithTabs().leaveWhitespace()
def __init__(self, source):
| {"golden_diff": "diff --git a/certbot-nginx/certbot_nginx/_internal/nginxparser.py b/certbot-nginx/certbot_nginx/_internal/nginxparser.py\n--- a/certbot-nginx/certbot_nginx/_internal/nginxparser.py\n+++ b/certbot-nginx/certbot_nginx/_internal/nginxparser.py\n@@ -9,7 +9,6 @@\n from pyparsing import Forward\n from pyparsing import Group\n from pyparsing import Literal\n-from pyparsing import OneOrMore\n from pyparsing import Optional\n from pyparsing import QuotedString\n from pyparsing import Regex\n@@ -57,7 +56,7 @@\n block_innards = Group(ZeroOrMore(contents) + space).leaveWhitespace()\n block << block_begin + left_bracket + block_innards + right_bracket\n \n- script = OneOrMore(contents) + space + stringEnd\n+ script = ZeroOrMore(contents) + space + stringEnd\n script.parseWithTabs().leaveWhitespace()\n \n def __init__(self, source):\n", "issue": "nginx parser cannot handle empty file\nWe have some simple tooling that empties a vhosts config file to disable it. However certbot throws errors in the debug log about it.\r\n\r\nlatest python-certbot-nginx on ubuntu 18.04 PPA\r\n\r\n```\r\n2019-05-25 16:18:38,333:DEBUG:certbot_nginx.parser:Could not parse file: /etc/nginx/sites-generated/xdomain due to Expected {Group:({[<SPC><TAB><CR><LF>] \"#\" rest of line}) | Group:(Forward: ...) | Group:({[<SPC><TAB><CR><LF>] {Combine:({{quoted string, starting with \" ending with \" | quoted string, starting with ' ending with '} \")\" [Re:('(\\\\$\\\\{)|[^{;\\\\s]')]...}) | Combine:({Re:('(\\\\$\\\\{)|[^{};\\\\s\\'\\\\\"]') [Re:('(\\\\$\\\\{)|[^{;\\\\s]')]...}) | quoted string, starting with \" ending with \" | quoted string, starting with ' ending with '} [{<SPC><TAB><CR><LF> {Combine:({{quoted string, starting with \" ending with \" | quoted string, starting with ' ending with '} \")\" [Re:('(\\\\$\\\\{)|[^{;\\\\s]')]...}) | Combine:({Re:('(\\\\$\\\\{)|[^{};\\\\s\\'\\\\\"]') [Re:('(\\\\$\\\\{)|[^{;\\\\s]')]...}) | quoted string, starting with \" ending with \" | quoted string, starting with ' ending with '}}]... [<SPC><TAB><CR><LF>] Suppress:(\";\")})} (at char 1), (line:2, col:1)\r\n```\r\n\r\nIn this case `/etc/nginx/sites-generated/xdomain` would be pointing to an empty file, nginx is ok with that, the certbot parser isn't.\n", "before_files": [{"content": "\"\"\"Very low-level nginx config parser based on pyparsing.\"\"\"\n# Forked from https://github.com/fatiherikli/nginxparser (MIT Licensed)\nimport copy\nimport logging\nfrom typing import Any\nfrom typing import IO\n\nfrom pyparsing import Combine\nfrom pyparsing import Forward\nfrom pyparsing import Group\nfrom pyparsing import Literal\nfrom pyparsing import OneOrMore\nfrom pyparsing import Optional\nfrom pyparsing import QuotedString\nfrom pyparsing import Regex\nfrom pyparsing import restOfLine\nfrom pyparsing import stringEnd\nfrom pyparsing import White\nfrom pyparsing import ZeroOrMore\n\nlogger = logging.getLogger(__name__)\n\n\nclass RawNginxParser:\n # pylint: disable=pointless-statement\n \"\"\"A class that parses nginx configuration with pyparsing.\"\"\"\n\n # constants\n space = Optional(White()).leaveWhitespace()\n required_space = White().leaveWhitespace()\n\n left_bracket = Literal(\"{\").suppress()\n right_bracket = space + Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n dquoted = QuotedString('\"', multiline=True, unquoteResults=False, escChar='\\\\')\n squoted = QuotedString(\"'\", multiline=True, unquoteResults=False, escChar='\\\\')\n quoted = dquoted | squoted\n head_tokenchars = Regex(r\"(\\$\\{)|[^{};\\s'\\\"]\") # if (last_space)\n tail_tokenchars = Regex(r\"(\\$\\{)|[^{;\\s]\") # else\n tokenchars = Combine(head_tokenchars + ZeroOrMore(tail_tokenchars))\n paren_quote_extend = Combine(quoted + Literal(')') + ZeroOrMore(tail_tokenchars))\n # note: ')' allows extension, but then we fall into else, not last_space.\n\n token = paren_quote_extend | tokenchars | quoted\n\n whitespace_token_group = space + token + ZeroOrMore(required_space + token) + space\n assignment = whitespace_token_group + semicolon\n\n comment = space + Literal('#') + restOfLine\n\n block = Forward()\n\n # order matters! see issue 518, and also http { # server { \\n}\n contents = Group(comment) | Group(block) | Group(assignment)\n\n block_begin = Group(whitespace_token_group)\n block_innards = Group(ZeroOrMore(contents) + space).leaveWhitespace()\n block << block_begin + left_bracket + block_innards + right_bracket\n\n script = OneOrMore(contents) + space + stringEnd\n script.parseWithTabs().leaveWhitespace()\n\n def __init__(self, source):\n self.source = source\n\n def parse(self):\n \"\"\"Returns the parsed tree.\"\"\"\n return self.script.parseString(self.source)\n\n def as_list(self):\n \"\"\"Returns the parsed tree as a list.\"\"\"\n return self.parse().asList()\n\nclass RawNginxDumper:\n \"\"\"A class that dumps nginx configuration from the provided tree.\"\"\"\n def __init__(self, blocks):\n self.blocks = blocks\n\n def __iter__(self, blocks=None):\n \"\"\"Iterates the dumped nginx content.\"\"\"\n blocks = blocks or self.blocks\n for b0 in blocks:\n if isinstance(b0, str):\n yield b0\n continue\n item = copy.deepcopy(b0)\n if spacey(item[0]):\n yield item.pop(0) # indentation\n if not item:\n continue\n\n if isinstance(item[0], list): # block\n yield \"\".join(item.pop(0)) + '{'\n for parameter in item.pop(0):\n for line in self.__iter__([parameter]): # negate \"for b0 in blocks\"\n yield line\n yield '}'\n else: # not a block - list of strings\n semicolon = \";\"\n if isinstance(item[0], str) and item[0].strip() == '#': # comment\n semicolon = \"\"\n yield \"\".join(item) + semicolon\n\n def __str__(self):\n \"\"\"Return the parsed block as a string.\"\"\"\n return ''.join(self)\n\n\nspacey = lambda x: (isinstance(x, str) and x.isspace()) or x == ''\n\n\nclass UnspacedList(list):\n \"\"\"Wrap a list [of lists], making any whitespace entries magically invisible\"\"\"\n\n def __init__(self, list_source):\n # ensure our argument is not a generator, and duplicate any sublists\n self.spaced = copy.deepcopy(list(list_source))\n self.dirty = False\n\n # Turn self into a version of the source list that has spaces removed\n # and all sub-lists also UnspacedList()ed\n list.__init__(self, list_source)\n for i, entry in reversed(list(enumerate(self))):\n if isinstance(entry, list):\n sublist = UnspacedList(entry)\n list.__setitem__(self, i, sublist)\n self.spaced[i] = sublist.spaced\n elif spacey(entry):\n # don't delete comments\n if \"#\" not in self[:i]:\n list.__delitem__(self, i)\n\n def _coerce(self, inbound):\n \"\"\"\n Coerce some inbound object to be appropriately usable in this object\n\n :param inbound: string or None or list or UnspacedList\n :returns: (coerced UnspacedList or string or None, spaced equivalent)\n :rtype: tuple\n\n \"\"\"\n if not isinstance(inbound, list): # str or None\n return inbound, inbound\n else:\n if not hasattr(inbound, \"spaced\"):\n inbound = UnspacedList(inbound)\n return inbound, inbound.spaced\n\n def insert(self, i, x):\n item, spaced_item = self._coerce(x)\n slicepos = self._spaced_position(i) if i < len(self) else len(self.spaced)\n self.spaced.insert(slicepos, spaced_item)\n if not spacey(item):\n list.insert(self, i, item)\n self.dirty = True\n\n def append(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.append(spaced_item)\n if not spacey(item):\n list.append(self, item)\n self.dirty = True\n\n def extend(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.extend(spaced_item)\n list.extend(self, item)\n self.dirty = True\n\n def __add__(self, other):\n l = copy.deepcopy(self)\n l.extend(other)\n l.dirty = True\n return l\n\n def pop(self, _i=None):\n raise NotImplementedError(\"UnspacedList.pop() not yet implemented\")\n def remove(self, _):\n raise NotImplementedError(\"UnspacedList.remove() not yet implemented\")\n def reverse(self):\n raise NotImplementedError(\"UnspacedList.reverse() not yet implemented\")\n def sort(self, _cmp=None, _key=None, _Rev=None):\n raise NotImplementedError(\"UnspacedList.sort() not yet implemented\")\n def __setslice__(self, _i, _j, _newslice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n\n def __setitem__(self, i, value):\n if isinstance(i, slice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n item, spaced_item = self._coerce(value)\n self.spaced.__setitem__(self._spaced_position(i), spaced_item)\n if not spacey(item):\n list.__setitem__(self, i, item)\n self.dirty = True\n\n def __delitem__(self, i):\n self.spaced.__delitem__(self._spaced_position(i))\n list.__delitem__(self, i)\n self.dirty = True\n\n def __deepcopy__(self, memo):\n new_spaced = copy.deepcopy(self.spaced, memo=memo)\n l = UnspacedList(new_spaced)\n l.dirty = self.dirty\n return l\n\n def is_dirty(self):\n \"\"\"Recurse through the parse tree to figure out if any sublists are dirty\"\"\"\n if self.dirty:\n return True\n return any((isinstance(x, UnspacedList) and x.is_dirty() for x in self))\n\n def _spaced_position(self, idx):\n \"Convert from indexes in the unspaced list to positions in the spaced one\"\n pos = spaces = 0\n # Normalize indexes like list[-1] etc, and save the result\n if idx < 0:\n idx = len(self) + idx\n if not 0 <= idx < len(self):\n raise IndexError(\"list index out of range\")\n idx0 = idx\n # Count the number of spaces in the spaced list before idx in the unspaced one\n while idx != -1:\n if spacey(self.spaced[pos]):\n spaces += 1\n else:\n idx -= 1\n pos += 1\n return idx0 + spaces\n\n\n# Shortcut functions to respect Python's serialization interface\n# (like pyyaml, picker or json)\n\ndef loads(source):\n \"\"\"Parses from a string.\n\n :param str source: The string to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return UnspacedList(RawNginxParser(source).as_list())\n\n\ndef load(_file):\n \"\"\"Parses from a file.\n\n :param file _file: The file to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return loads(_file.read())\n\n\ndef dumps(blocks: UnspacedList) -> str:\n \"\"\"Dump to a Unicode string.\n\n :param UnspacedList block: The parsed tree\n :rtype: six.text_type\n\n \"\"\"\n return str(RawNginxDumper(blocks.spaced))\n\n\ndef dump(blocks: UnspacedList, _file: IO[Any]) -> None:\n \"\"\"Dump to a file.\n\n :param UnspacedList block: The parsed tree\n :param IO[Any] _file: The file stream to dump to. It must be opened with\n Unicode encoding.\n :rtype: None\n\n \"\"\"\n _file.write(dumps(blocks))\n", "path": "certbot-nginx/certbot_nginx/_internal/nginxparser.py"}]} | 3,953 | 234 |
gh_patches_debug_6921 | rasdani/github-patches | git_diff | plotly__dash-2513 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Exception when property of patched_fig is viewed
I know that it is currently not supported to view properties of `patch_fig=Patch()` but when e.g. iterating over trace names like so:
```
for trace in patched_fig['data']:
print(trace['name'])
```
no exception or error message is thrown but an endless stream of
```
...
<dash._patch.Patch object at 0x7f3b89a89b80>
<dash._patch.Patch object at 0x7f3b8305c0a0>
<dash._patch.Patch object at 0x7f3b89a89b80>
<dash._patch.Patch object at 0x7f3b8305c0a0>
<dash._patch.Patch object at 0x7f3b89a89b80>
<dash._patch.Patch object at 0x7f3b8305c0a0>
...
```
This is not exactly intended right?
I got there by trying to delete a trace of patched_fig by its name which otherwise appears not be possible (or is it?)
</issue>
<code>
[start of dash/_patch.py]
1 def _operation(name, location, **kwargs):
2 return {"operation": name, "location": location, "params": dict(**kwargs)}
3
4
5 _noop = object()
6
7
8 def validate_slice(obj):
9 if isinstance(obj, slice):
10 raise TypeError("a slice is not a valid index for patch")
11
12
13 class Patch:
14 """
15 Patch a callback output value
16
17 Act like a proxy of the output prop value on the frontend.
18
19 Supported prop types: Dictionaries and lists.
20 """
21
22 def __init__(self, location=None, parent=None):
23 if location is not None:
24 self._location = location
25 else:
26 # pylint: disable=consider-using-ternary
27 self._location = (parent and parent._location) or []
28 if parent is not None:
29 self._operations = parent._operations
30 else:
31 self._operations = []
32
33 def __getstate__(self):
34 return vars(self)
35
36 def __setstate__(self, state):
37 vars(self).update(state)
38
39 def __getitem__(self, item):
40 validate_slice(item)
41 return Patch(location=self._location + [item], parent=self)
42
43 def __getattr__(self, item):
44 if item == "tolist":
45 # to_json fix
46 raise AttributeError
47 if item == "_location":
48 return self._location
49 if item == "_operations":
50 return self._operations
51 return self.__getitem__(item)
52
53 def __setattr__(self, key, value):
54 if key in ("_location", "_operations"):
55 self.__dict__[key] = value
56 else:
57 self.__setitem__(key, value)
58
59 def __delattr__(self, item):
60 self.__delitem__(item)
61
62 def __setitem__(self, key, value):
63 validate_slice(key)
64 if value is _noop:
65 # The += set themselves.
66 return
67 self._operations.append(
68 _operation(
69 "Assign",
70 self._location + [key],
71 value=value,
72 )
73 )
74
75 def __delitem__(self, key):
76 validate_slice(key)
77 self._operations.append(_operation("Delete", self._location + [key]))
78
79 def __iadd__(self, other):
80 if isinstance(other, (list, tuple)):
81 self.extend(other)
82 else:
83 self._operations.append(_operation("Add", self._location, value=other))
84 return _noop
85
86 def __isub__(self, other):
87 self._operations.append(_operation("Sub", self._location, value=other))
88 return _noop
89
90 def __imul__(self, other):
91 self._operations.append(_operation("Mul", self._location, value=other))
92 return _noop
93
94 def __itruediv__(self, other):
95 self._operations.append(_operation("Div", self._location, value=other))
96 return _noop
97
98 def __ior__(self, other):
99 self.update(E=other)
100 return _noop
101
102 def append(self, item):
103 """Add the item to the end of a list"""
104 self._operations.append(_operation("Append", self._location, value=item))
105
106 def prepend(self, item):
107 """Add the item to the start of a list"""
108 self._operations.append(_operation("Prepend", self._location, value=item))
109
110 def insert(self, index, item):
111 """Add the item at the index of a list"""
112 self._operations.append(
113 _operation("Insert", self._location, value=item, index=index)
114 )
115
116 def clear(self):
117 """Remove all items in a list"""
118 self._operations.append(_operation("Clear", self._location))
119
120 def reverse(self):
121 """Reversal of the order of items in a list"""
122 self._operations.append(_operation("Reverse", self._location))
123
124 def extend(self, item):
125 """Add all the items to the end of a list"""
126 if not isinstance(item, (list, tuple)):
127 raise TypeError(f"{item} should be a list or tuple")
128 self._operations.append(_operation("Extend", self._location, value=item))
129
130 def remove(self, item):
131 """filter the item out of a list on the frontend"""
132 self._operations.append(_operation("Remove", self._location, value=item))
133
134 def update(self, E=None, **F):
135 """Merge a dict or keyword arguments with another dictionary"""
136 value = E or {}
137 value.update(F)
138 self._operations.append(_operation("Merge", self._location, value=value))
139
140 # pylint: disable=no-self-use
141 def sort(self):
142 raise KeyError(
143 "sort is reserved for future use, use brackets to access this key on your object"
144 )
145
146 def to_plotly_json(self):
147 return {
148 "__dash_patch_update": "__dash_patch_update",
149 "operations": self._operations,
150 }
151
[end of dash/_patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dash/_patch.py b/dash/_patch.py
--- a/dash/_patch.py
+++ b/dash/_patch.py
@@ -99,6 +99,12 @@
self.update(E=other)
return _noop
+ def __iter__(self):
+ raise TypeError("Patch objects are write-only, you cannot iterate them.")
+
+ def __repr__(self):
+ return f"<write-only dash.Patch object at {self._location}>"
+
def append(self, item):
"""Add the item to the end of a list"""
self._operations.append(_operation("Append", self._location, value=item))
| {"golden_diff": "diff --git a/dash/_patch.py b/dash/_patch.py\n--- a/dash/_patch.py\n+++ b/dash/_patch.py\n@@ -99,6 +99,12 @@\n self.update(E=other)\n return _noop\n \n+ def __iter__(self):\n+ raise TypeError(\"Patch objects are write-only, you cannot iterate them.\")\n+\n+ def __repr__(self):\n+ return f\"<write-only dash.Patch object at {self._location}>\"\n+\n def append(self, item):\n \"\"\"Add the item to the end of a list\"\"\"\n self._operations.append(_operation(\"Append\", self._location, value=item))\n", "issue": "[BUG] Exception when property of patched_fig is viewed\nI know that it is currently not supported to view properties of `patch_fig=Patch()` but when e.g. iterating over trace names like so:\r\n```\r\n for trace in patched_fig['data']:\r\n print(trace['name'])\r\n```\r\nno exception or error message is thrown but an endless stream of \r\n\r\n```\r\n...\r\n<dash._patch.Patch object at 0x7f3b89a89b80>\r\n<dash._patch.Patch object at 0x7f3b8305c0a0>\r\n<dash._patch.Patch object at 0x7f3b89a89b80>\r\n<dash._patch.Patch object at 0x7f3b8305c0a0>\r\n<dash._patch.Patch object at 0x7f3b89a89b80>\r\n<dash._patch.Patch object at 0x7f3b8305c0a0>\r\n...\r\n```\r\nThis is not exactly intended right?\r\n\r\nI got there by trying to delete a trace of patched_fig by its name which otherwise appears not be possible (or is it?)\r\n\n", "before_files": [{"content": "def _operation(name, location, **kwargs):\n return {\"operation\": name, \"location\": location, \"params\": dict(**kwargs)}\n\n\n_noop = object()\n\n\ndef validate_slice(obj):\n if isinstance(obj, slice):\n raise TypeError(\"a slice is not a valid index for patch\")\n\n\nclass Patch:\n \"\"\"\n Patch a callback output value\n\n Act like a proxy of the output prop value on the frontend.\n\n Supported prop types: Dictionaries and lists.\n \"\"\"\n\n def __init__(self, location=None, parent=None):\n if location is not None:\n self._location = location\n else:\n # pylint: disable=consider-using-ternary\n self._location = (parent and parent._location) or []\n if parent is not None:\n self._operations = parent._operations\n else:\n self._operations = []\n\n def __getstate__(self):\n return vars(self)\n\n def __setstate__(self, state):\n vars(self).update(state)\n\n def __getitem__(self, item):\n validate_slice(item)\n return Patch(location=self._location + [item], parent=self)\n\n def __getattr__(self, item):\n if item == \"tolist\":\n # to_json fix\n raise AttributeError\n if item == \"_location\":\n return self._location\n if item == \"_operations\":\n return self._operations\n return self.__getitem__(item)\n\n def __setattr__(self, key, value):\n if key in (\"_location\", \"_operations\"):\n self.__dict__[key] = value\n else:\n self.__setitem__(key, value)\n\n def __delattr__(self, item):\n self.__delitem__(item)\n\n def __setitem__(self, key, value):\n validate_slice(key)\n if value is _noop:\n # The += set themselves.\n return\n self._operations.append(\n _operation(\n \"Assign\",\n self._location + [key],\n value=value,\n )\n )\n\n def __delitem__(self, key):\n validate_slice(key)\n self._operations.append(_operation(\"Delete\", self._location + [key]))\n\n def __iadd__(self, other):\n if isinstance(other, (list, tuple)):\n self.extend(other)\n else:\n self._operations.append(_operation(\"Add\", self._location, value=other))\n return _noop\n\n def __isub__(self, other):\n self._operations.append(_operation(\"Sub\", self._location, value=other))\n return _noop\n\n def __imul__(self, other):\n self._operations.append(_operation(\"Mul\", self._location, value=other))\n return _noop\n\n def __itruediv__(self, other):\n self._operations.append(_operation(\"Div\", self._location, value=other))\n return _noop\n\n def __ior__(self, other):\n self.update(E=other)\n return _noop\n\n def append(self, item):\n \"\"\"Add the item to the end of a list\"\"\"\n self._operations.append(_operation(\"Append\", self._location, value=item))\n\n def prepend(self, item):\n \"\"\"Add the item to the start of a list\"\"\"\n self._operations.append(_operation(\"Prepend\", self._location, value=item))\n\n def insert(self, index, item):\n \"\"\"Add the item at the index of a list\"\"\"\n self._operations.append(\n _operation(\"Insert\", self._location, value=item, index=index)\n )\n\n def clear(self):\n \"\"\"Remove all items in a list\"\"\"\n self._operations.append(_operation(\"Clear\", self._location))\n\n def reverse(self):\n \"\"\"Reversal of the order of items in a list\"\"\"\n self._operations.append(_operation(\"Reverse\", self._location))\n\n def extend(self, item):\n \"\"\"Add all the items to the end of a list\"\"\"\n if not isinstance(item, (list, tuple)):\n raise TypeError(f\"{item} should be a list or tuple\")\n self._operations.append(_operation(\"Extend\", self._location, value=item))\n\n def remove(self, item):\n \"\"\"filter the item out of a list on the frontend\"\"\"\n self._operations.append(_operation(\"Remove\", self._location, value=item))\n\n def update(self, E=None, **F):\n \"\"\"Merge a dict or keyword arguments with another dictionary\"\"\"\n value = E or {}\n value.update(F)\n self._operations.append(_operation(\"Merge\", self._location, value=value))\n\n # pylint: disable=no-self-use\n def sort(self):\n raise KeyError(\n \"sort is reserved for future use, use brackets to access this key on your object\"\n )\n\n def to_plotly_json(self):\n return {\n \"__dash_patch_update\": \"__dash_patch_update\",\n \"operations\": self._operations,\n }\n", "path": "dash/_patch.py"}]} | 2,199 | 145 |
gh_patches_debug_5255 | rasdani/github-patches | git_diff | wagtail__wagtail-1666 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError when passwords do not match on user creation form
Wagtail will crash with the following error if the two passwords don't match on the user creation form:
```
AttributeError at /admin/users/add/
'UserCreationForm' object has no attribute 'error_messages'
```
On this line: https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailusers/forms.py#L92
</issue>
<code>
[start of wagtail/wagtailusers/forms.py]
1 from django import forms
2 from django.contrib.auth import get_user_model
3 from django.utils.translation import ugettext_lazy as _
4 from django.contrib.auth.models import Group, Permission
5 from django.forms.models import inlineformset_factory
6
7 from wagtail.wagtailcore import hooks
8 from wagtail.wagtailadmin.widgets import AdminPageChooser
9 from wagtail.wagtailusers.models import UserProfile
10 from wagtail.wagtailcore.models import Page, UserPagePermissionsProxy, GroupPagePermission
11
12
13 User = get_user_model()
14
15 # The standard fields each user model is expected to have, as a minimum.
16 standard_fields = set(['email', 'first_name', 'last_name', 'is_superuser', 'groups'])
17
18
19 class UsernameForm(forms.ModelForm):
20 """
21 Intelligently sets up the username field if it is infact a username. If the
22 User model has been swapped out, and the username field is an email or
23 something else, dont touch it.
24 """
25 def __init__(self, *args, **kwargs):
26 super(UsernameForm, self).__init__(*args, **kwargs)
27 if User.USERNAME_FIELD == 'username':
28 field = self.fields['username']
29 field.regex = r"^[\w.@+-]+$"
30 field.help_text = _("Required. 30 characters or fewer. Letters, "
31 "digits and @/./+/-/_ only.")
32 field.error_messages = field.error_messages.copy()
33 field.error_messages.update({
34 'invalid': _("This value may contain only letters, numbers "
35 "and @/./+/-/_ characters.")})
36
37 @property
38 def username_field(self):
39 return self[User.USERNAME_FIELD]
40
41 def separate_username_field(self):
42 return User.USERNAME_FIELD not in standard_fields
43
44
45 class UserCreationForm(UsernameForm):
46
47 required_css_class = "required"
48 is_superuser = forms.BooleanField(
49 label=_("Administrator"),
50 required=False,
51 help_text=_("If ticked, this user has the ability to manage user accounts.")
52 )
53
54 password1 = forms.CharField(
55 label=_("Password"),
56 required=False,
57 widget=forms.PasswordInput,
58 help_text=_("Leave blank if not changing."))
59 password2 = forms.CharField(
60 label=_("Password confirmation"), required=False,
61 widget=forms.PasswordInput,
62 help_text=_("Enter the same password as above, for verification."))
63
64 email = forms.EmailField(required=True, label=_("Email"))
65 first_name = forms.CharField(required=True, label=_("First Name"))
66 last_name = forms.CharField(required=True, label=_("Last Name"))
67
68 class Meta:
69 model = User
70 fields = set([User.USERNAME_FIELD]) | standard_fields
71 widgets = {
72 'groups': forms.CheckboxSelectMultiple
73 }
74
75 def clean_username(self):
76 username_field = User.USERNAME_FIELD
77 username = self.cleaned_data[username_field]
78 try:
79 User._default_manager.get(**{username_field: username})
80 except User.DoesNotExist:
81 return username
82 raise forms.ValidationError(
83 self.error_messages['duplicate_username'],
84 code='duplicate_username',
85 )
86
87 def clean_password2(self):
88 password1 = self.cleaned_data.get("password1")
89 password2 = self.cleaned_data.get("password2")
90 if password1 and password2 and password1 != password2:
91 raise forms.ValidationError(
92 self.error_messages['password_mismatch'],
93 code='password_mismatch',
94 )
95 return password2
96
97 def save(self, commit=True):
98 user = super(UserCreationForm, self).save(commit=False)
99 user.set_password(self.cleaned_data["password1"])
100
101 # users can access django-admin iff they are a superuser
102 user.is_staff = user.is_superuser
103
104 if commit:
105 user.save()
106 self.save_m2m()
107 return user
108
109
110 # Largely the same as django.contrib.auth.forms.UserCreationForm, but with enough subtle changes
111 # (to make password non-required) that it isn't worth inheriting...
112 class UserEditForm(UsernameForm):
113 required_css_class = "required"
114
115 error_messages = {
116 'duplicate_username': _("A user with that username already exists."),
117 'password_mismatch': _("The two password fields didn't match."),
118 }
119
120 email = forms.EmailField(required=True, label=_("Email"))
121 first_name = forms.CharField(required=True, label=_("First Name"))
122 last_name = forms.CharField(required=True, label=_("Last Name"))
123
124 password1 = forms.CharField(
125 label=_("Password"),
126 required=False,
127 widget=forms.PasswordInput,
128 help_text=_("Leave blank if not changing."))
129 password2 = forms.CharField(
130 label=_("Password confirmation"), required=False,
131 widget=forms.PasswordInput,
132 help_text=_("Enter the same password as above, for verification."))
133
134 is_superuser = forms.BooleanField(
135 label=_("Administrator"),
136 required=False,
137 help_text=_("Administrators have the ability to manage user accounts.")
138 )
139
140 class Meta:
141 model = User
142 fields = set([User.USERNAME_FIELD, "is_active"]) | standard_fields
143 widgets = {
144 'groups': forms.CheckboxSelectMultiple
145 }
146
147 def clean_username(self):
148 # Since User.username is unique, this check is redundant,
149 # but it sets a nicer error message than the ORM. See #13147.
150 username = self.cleaned_data["username"]
151 username_field = User.USERNAME_FIELD
152 try:
153 User._default_manager.exclude(id=self.instance.id).get(**{
154 username_field: username})
155 except User.DoesNotExist:
156 return username
157 raise forms.ValidationError(self.error_messages['duplicate_username'])
158
159 def clean_password2(self):
160 password1 = self.cleaned_data.get("password1")
161 password2 = self.cleaned_data.get("password2")
162 if password1 != password2:
163 raise forms.ValidationError(
164 self.error_messages['password_mismatch'])
165 return password2
166
167 def save(self, commit=True):
168 user = super(UserEditForm, self).save(commit=False)
169
170 # users can access django-admin iff they are a superuser
171 user.is_staff = user.is_superuser
172
173 if self.cleaned_data["password1"]:
174 user.set_password(self.cleaned_data["password1"])
175 if commit:
176 user.save()
177 self.save_m2m()
178 return user
179
180
181 class GroupForm(forms.ModelForm):
182 def __init__(self, *args, **kwargs):
183 super(GroupForm, self).__init__(*args, **kwargs)
184 self.registered_permissions = Permission.objects.none()
185 for fn in hooks.get_hooks('register_permissions'):
186 self.registered_permissions = self.registered_permissions | fn()
187 self.fields['permissions'].queryset = self.registered_permissions
188
189 required_css_class = "required"
190
191 error_messages = {
192 'duplicate_name': _("A group with that name already exists."),
193 }
194
195 is_superuser = forms.BooleanField(
196 label=_("Administrator"),
197 required=False,
198 help_text=_("Administrators have the ability to manage user accounts.")
199 )
200
201 class Meta:
202 model = Group
203 fields = ("name", "permissions", )
204
205 def clean_name(self):
206 # Since Group.name is unique, this check is redundant,
207 # but it sets a nicer error message than the ORM. See #13147.
208 name = self.cleaned_data["name"]
209 try:
210 Group._default_manager.exclude(id=self.instance.id).get(name=name)
211 except Group.DoesNotExist:
212 return name
213 raise forms.ValidationError(self.error_messages['duplicate_name'])
214
215 def save(self):
216 # We go back to the object to read (in order to reapply) the
217 # permissions which were set on this group, but which are not
218 # accessible in the wagtail admin interface, as otherwise these would
219 # be clobbered by this form.
220 try:
221 untouchable_permissions = self.instance.permissions.exclude(pk__in=self.registered_permissions)
222 bool(untouchable_permissions) # force this to be evaluated, as it's about to change
223 except ValueError:
224 # this form is not bound; we're probably creating a new group
225 untouchable_permissions = []
226 group = super(GroupForm, self).save()
227 group.permissions.add(*untouchable_permissions)
228 return group
229
230
231 class GroupPagePermissionForm(forms.ModelForm):
232 page = forms.ModelChoiceField(queryset=Page.objects.all(),
233 widget=AdminPageChooser(show_edit_link=False))
234
235 class Meta:
236 model = GroupPagePermission
237 fields = ('page', 'permission_type')
238
239
240 class BaseGroupPagePermissionFormSet(forms.models.BaseInlineFormSet):
241 def __init__(self, *args, **kwargs):
242 super(BaseGroupPagePermissionFormSet, self).__init__(*args, **kwargs)
243 self.form = GroupPagePermissionForm
244 for form in self.forms:
245 form.fields['DELETE'].widget = forms.HiddenInput()
246
247 @property
248 def empty_form(self):
249 empty_form = super(BaseGroupPagePermissionFormSet, self).empty_form
250 empty_form.fields['DELETE'].widget = forms.HiddenInput()
251 return empty_form
252
253
254 GroupPagePermissionFormSet = inlineformset_factory(
255 Group,
256 GroupPagePermission,
257 formset=BaseGroupPagePermissionFormSet,
258 extra=0,
259 fields=('page', 'permission_type'),
260 )
261
262
263 class NotificationPreferencesForm(forms.ModelForm):
264 def __init__(self, *args, **kwargs):
265 super(NotificationPreferencesForm, self).__init__(*args, **kwargs)
266 user_perms = UserPagePermissionsProxy(self.instance.user)
267 if not user_perms.can_publish_pages():
268 del self.fields['submitted_notifications']
269 if not user_perms.can_edit_pages():
270 del self.fields['approved_notifications']
271 del self.fields['rejected_notifications']
272
273 class Meta:
274 model = UserProfile
275 fields = ("submitted_notifications", "approved_notifications", "rejected_notifications")
276
[end of wagtail/wagtailusers/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/wagtailusers/forms.py b/wagtail/wagtailusers/forms.py
--- a/wagtail/wagtailusers/forms.py
+++ b/wagtail/wagtailusers/forms.py
@@ -43,8 +43,13 @@
class UserCreationForm(UsernameForm):
-
required_css_class = "required"
+
+ error_messages = {
+ 'duplicate_username': _("A user with that username already exists."),
+ 'password_mismatch': _("The two password fields didn't match."),
+ }
+
is_superuser = forms.BooleanField(
label=_("Administrator"),
required=False,
| {"golden_diff": "diff --git a/wagtail/wagtailusers/forms.py b/wagtail/wagtailusers/forms.py\n--- a/wagtail/wagtailusers/forms.py\n+++ b/wagtail/wagtailusers/forms.py\n@@ -43,8 +43,13 @@\n \n \n class UserCreationForm(UsernameForm):\n-\n required_css_class = \"required\"\n+\n+ error_messages = {\n+ 'duplicate_username': _(\"A user with that username already exists.\"),\n+ 'password_mismatch': _(\"The two password fields didn't match.\"),\n+ }\n+\n is_superuser = forms.BooleanField(\n label=_(\"Administrator\"),\n required=False,\n", "issue": "AttributeError when passwords do not match on user creation form\nWagtail will crash with the following error if the two passwords don't match on the user creation form:\n\n```\nAttributeError at /admin/users/add/\n\n'UserCreationForm' object has no attribute 'error_messages'\n```\n\nOn this line: https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailusers/forms.py#L92\n\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import Group, Permission\nfrom django.forms.models import inlineformset_factory\n\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailadmin.widgets import AdminPageChooser\nfrom wagtail.wagtailusers.models import UserProfile\nfrom wagtail.wagtailcore.models import Page, UserPagePermissionsProxy, GroupPagePermission\n\n\nUser = get_user_model()\n\n# The standard fields each user model is expected to have, as a minimum.\nstandard_fields = set(['email', 'first_name', 'last_name', 'is_superuser', 'groups'])\n\n\nclass UsernameForm(forms.ModelForm):\n \"\"\"\n Intelligently sets up the username field if it is infact a username. If the\n User model has been swapped out, and the username field is an email or\n something else, dont touch it.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(UsernameForm, self).__init__(*args, **kwargs)\n if User.USERNAME_FIELD == 'username':\n field = self.fields['username']\n field.regex = r\"^[\\w.@+-]+$\"\n field.help_text = _(\"Required. 30 characters or fewer. Letters, \"\n \"digits and @/./+/-/_ only.\")\n field.error_messages = field.error_messages.copy()\n field.error_messages.update({\n 'invalid': _(\"This value may contain only letters, numbers \"\n \"and @/./+/-/_ characters.\")})\n\n @property\n def username_field(self):\n return self[User.USERNAME_FIELD]\n\n def separate_username_field(self):\n return User.USERNAME_FIELD not in standard_fields\n\n\nclass UserCreationForm(UsernameForm):\n\n required_css_class = \"required\"\n is_superuser = forms.BooleanField(\n label=_(\"Administrator\"),\n required=False,\n help_text=_(\"If ticked, this user has the ability to manage user accounts.\")\n )\n\n password1 = forms.CharField(\n label=_(\"Password\"),\n required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Leave blank if not changing.\"))\n password2 = forms.CharField(\n label=_(\"Password confirmation\"), required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Enter the same password as above, for verification.\"))\n\n email = forms.EmailField(required=True, label=_(\"Email\"))\n first_name = forms.CharField(required=True, label=_(\"First Name\"))\n last_name = forms.CharField(required=True, label=_(\"Last Name\"))\n\n class Meta:\n model = User\n fields = set([User.USERNAME_FIELD]) | standard_fields\n widgets = {\n 'groups': forms.CheckboxSelectMultiple\n }\n\n def clean_username(self):\n username_field = User.USERNAME_FIELD\n username = self.cleaned_data[username_field]\n try:\n User._default_manager.get(**{username_field: username})\n except User.DoesNotExist:\n return username\n raise forms.ValidationError(\n self.error_messages['duplicate_username'],\n code='duplicate_username',\n )\n\n def clean_password2(self):\n password1 = self.cleaned_data.get(\"password1\")\n password2 = self.cleaned_data.get(\"password2\")\n if password1 and password2 and password1 != password2:\n raise forms.ValidationError(\n self.error_messages['password_mismatch'],\n code='password_mismatch',\n )\n return password2\n\n def save(self, commit=True):\n user = super(UserCreationForm, self).save(commit=False)\n user.set_password(self.cleaned_data[\"password1\"])\n\n # users can access django-admin iff they are a superuser\n user.is_staff = user.is_superuser\n\n if commit:\n user.save()\n self.save_m2m()\n return user\n\n\n# Largely the same as django.contrib.auth.forms.UserCreationForm, but with enough subtle changes\n# (to make password non-required) that it isn't worth inheriting...\nclass UserEditForm(UsernameForm):\n required_css_class = \"required\"\n\n error_messages = {\n 'duplicate_username': _(\"A user with that username already exists.\"),\n 'password_mismatch': _(\"The two password fields didn't match.\"),\n }\n\n email = forms.EmailField(required=True, label=_(\"Email\"))\n first_name = forms.CharField(required=True, label=_(\"First Name\"))\n last_name = forms.CharField(required=True, label=_(\"Last Name\"))\n\n password1 = forms.CharField(\n label=_(\"Password\"),\n required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Leave blank if not changing.\"))\n password2 = forms.CharField(\n label=_(\"Password confirmation\"), required=False,\n widget=forms.PasswordInput,\n help_text=_(\"Enter the same password as above, for verification.\"))\n\n is_superuser = forms.BooleanField(\n label=_(\"Administrator\"),\n required=False,\n help_text=_(\"Administrators have the ability to manage user accounts.\")\n )\n\n class Meta:\n model = User\n fields = set([User.USERNAME_FIELD, \"is_active\"]) | standard_fields\n widgets = {\n 'groups': forms.CheckboxSelectMultiple\n }\n\n def clean_username(self):\n # Since User.username is unique, this check is redundant,\n # but it sets a nicer error message than the ORM. See #13147.\n username = self.cleaned_data[\"username\"]\n username_field = User.USERNAME_FIELD\n try:\n User._default_manager.exclude(id=self.instance.id).get(**{\n username_field: username})\n except User.DoesNotExist:\n return username\n raise forms.ValidationError(self.error_messages['duplicate_username'])\n\n def clean_password2(self):\n password1 = self.cleaned_data.get(\"password1\")\n password2 = self.cleaned_data.get(\"password2\")\n if password1 != password2:\n raise forms.ValidationError(\n self.error_messages['password_mismatch'])\n return password2\n\n def save(self, commit=True):\n user = super(UserEditForm, self).save(commit=False)\n\n # users can access django-admin iff they are a superuser\n user.is_staff = user.is_superuser\n\n if self.cleaned_data[\"password1\"]:\n user.set_password(self.cleaned_data[\"password1\"])\n if commit:\n user.save()\n self.save_m2m()\n return user\n\n\nclass GroupForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(GroupForm, self).__init__(*args, **kwargs)\n self.registered_permissions = Permission.objects.none()\n for fn in hooks.get_hooks('register_permissions'):\n self.registered_permissions = self.registered_permissions | fn()\n self.fields['permissions'].queryset = self.registered_permissions\n\n required_css_class = \"required\"\n\n error_messages = {\n 'duplicate_name': _(\"A group with that name already exists.\"),\n }\n\n is_superuser = forms.BooleanField(\n label=_(\"Administrator\"),\n required=False,\n help_text=_(\"Administrators have the ability to manage user accounts.\")\n )\n\n class Meta:\n model = Group\n fields = (\"name\", \"permissions\", )\n\n def clean_name(self):\n # Since Group.name is unique, this check is redundant,\n # but it sets a nicer error message than the ORM. See #13147.\n name = self.cleaned_data[\"name\"]\n try:\n Group._default_manager.exclude(id=self.instance.id).get(name=name)\n except Group.DoesNotExist:\n return name\n raise forms.ValidationError(self.error_messages['duplicate_name'])\n\n def save(self):\n # We go back to the object to read (in order to reapply) the\n # permissions which were set on this group, but which are not\n # accessible in the wagtail admin interface, as otherwise these would\n # be clobbered by this form.\n try:\n untouchable_permissions = self.instance.permissions.exclude(pk__in=self.registered_permissions)\n bool(untouchable_permissions) # force this to be evaluated, as it's about to change\n except ValueError:\n # this form is not bound; we're probably creating a new group\n untouchable_permissions = []\n group = super(GroupForm, self).save()\n group.permissions.add(*untouchable_permissions)\n return group\n\n\nclass GroupPagePermissionForm(forms.ModelForm):\n page = forms.ModelChoiceField(queryset=Page.objects.all(),\n widget=AdminPageChooser(show_edit_link=False))\n\n class Meta:\n model = GroupPagePermission\n fields = ('page', 'permission_type')\n\n\nclass BaseGroupPagePermissionFormSet(forms.models.BaseInlineFormSet):\n def __init__(self, *args, **kwargs):\n super(BaseGroupPagePermissionFormSet, self).__init__(*args, **kwargs)\n self.form = GroupPagePermissionForm\n for form in self.forms:\n form.fields['DELETE'].widget = forms.HiddenInput()\n\n @property\n def empty_form(self):\n empty_form = super(BaseGroupPagePermissionFormSet, self).empty_form\n empty_form.fields['DELETE'].widget = forms.HiddenInput()\n return empty_form\n\n\nGroupPagePermissionFormSet = inlineformset_factory(\n Group,\n GroupPagePermission,\n formset=BaseGroupPagePermissionFormSet,\n extra=0,\n fields=('page', 'permission_type'),\n)\n\n\nclass NotificationPreferencesForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(NotificationPreferencesForm, self).__init__(*args, **kwargs)\n user_perms = UserPagePermissionsProxy(self.instance.user)\n if not user_perms.can_publish_pages():\n del self.fields['submitted_notifications']\n if not user_perms.can_edit_pages():\n del self.fields['approved_notifications']\n del self.fields['rejected_notifications']\n\n class Meta:\n model = UserProfile\n fields = (\"submitted_notifications\", \"approved_notifications\", \"rejected_notifications\")\n", "path": "wagtail/wagtailusers/forms.py"}]} | 3,441 | 137 |
gh_patches_debug_28460 | rasdani/github-patches | git_diff | mindsdb__mindsdb-2678 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Fix scylladb error when connecting with secure bundle
When connecting with `secure_connect_bundle` users got unknown secure_connect_bundle path error.
</issue>
<code>
[start of mindsdb/integrations/handlers/scylla_handler/scylla_handler.py]
1 import os
2 from mindsdb.integrations.libs.base_handler import DatabaseHandler
3 from mindsdb_sql import parse_sql
4 from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
5 from cassandra.cluster import Cluster
6 from cassandra.auth import PlainTextAuthProvider
7 from mindsdb.integrations.libs.response import (
8 HandlerStatusResponse as StatusResponse,
9 HandlerResponse as Response,
10 RESPONSE_TYPE
11 )
12 from mindsdb.utilities.log import log
13 import pandas as pd
14 from mindsdb_sql.parser.ast.base import ASTNode
15
16
17 class ScyllaHandler(DatabaseHandler):
18 """
19 This handler handles connection and execution of the Scylla statements.
20 """
21 name = 'scylla'
22
23 def __init__(self, name=None, **kwargs):
24 super().__init__(name)
25 self.parser = parse_sql
26 self.connection_args = kwargs.get('connection_data')
27 self.session = None
28 self.is_connected = False
29
30 def connect(self):
31 """
32 Handles the connection to a Scylla keystore.
33 """
34 if self.is_connected is True:
35 return self.session
36
37 auth_provider = PlainTextAuthProvider(
38 username=self.connection_args['user'], password=self.connection_args['password']
39 )
40
41 connection_props = {
42 'auth_provider': auth_provider
43 }
44
45 if self.connection_args['protocol_version'] is not None:
46 connection_props['protocol_version'] = self.connection_args['protocol_version']
47
48 secure_connect_bundle = self.connection_args.get('secure_connect_bundle')
49
50 if secure_connect_bundle is not None:
51 if os.path.isfile(self.secure_connect_bundle) is False:
52 raise Exception("Secure_connect_bundle' must be path to the file")
53 connection_props['cloud'] = {
54 'secure_connect_bundle': self.secure_connect_bundle
55 }
56 else:
57 connection_props['contact_points'] = [self.connection_args['host']]
58 connection_props['port'] = int(self.connection_args['port'])
59
60 cluster = Cluster(**connection_props)
61 session = cluster.connect(self.connection_args['keyspace'])
62
63 self.is_connected = True
64 self.session = session
65 return self.session
66
67 def check_connection(self) -> StatusResponse:
68 """
69 Check the connection of the Scylla database
70 :return: success status and error message if error occurs
71 """
72 response = StatusResponse(False)
73
74 try:
75 session = self.connect()
76 # TODO: change the healthcheck
77 session.execute('SELECT release_version FROM system.local').one()
78 response.success = True
79 except Exception as e:
80 log.error(f'Error connecting to Scylla {self.connection_args["keyspace"]}, {e}!')
81 response.error_message = e
82
83 if response.success is False and self.is_connected is True:
84 self.is_connected = False
85
86 return response
87
88 def native_query(self, query: str) -> Response:
89 """
90 Receive SQL query and runs it
91 :param query: The SQL query to run in MySQL
92 :return: returns the records from the current recordset
93 """
94 session = self.connect()
95 try:
96 resp = session.execute(query).all()
97 if resp:
98 response = Response(
99 RESPONSE_TYPE.TABLE,
100 pd.DataFrame(
101 resp
102 )
103 )
104 else:
105 response = Response(RESPONSE_TYPE.OK)
106 except Exception as e:
107 log.error(f'Error running query: {query} on {self.connection_args["keyspace"]}!')
108 response = Response(
109 RESPONSE_TYPE.ERROR,
110 error_message=str(e)
111 )
112 return response
113
114 def query(self, query: ASTNode) -> Response:
115 """
116 Retrieve the data from the SQL statement.
117 """
118 renderer = SqlalchemyRender('mysql')
119 query_str = renderer.get_string(query, with_failback=True)
120 return self.native_query(query_str)
121
122 def get_tables(self) -> Response:
123 """
124 Get a list with all of the tabels in MySQL
125 """
126 q = "DESCRIBE TABLES;"
127 result = self.native_query(q)
128 df = result.data_frame
129 result.data_frame = df.rename(columns={df.columns[0]: 'table_name'})
130 return result
131
132 def get_columns(self, table_name) -> Response:
133 """
134 Show details about the table
135 """
136 q = f"DESCRIBE {table_name};"
137 result = self.native_query(q)
138 return result
139
[end of mindsdb/integrations/handlers/scylla_handler/scylla_handler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py
--- a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py
+++ b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py
@@ -41,24 +41,21 @@
connection_props = {
'auth_provider': auth_provider
}
-
- if self.connection_args['protocol_version'] is not None:
- connection_props['protocol_version'] = self.connection_args['protocol_version']
-
+ connection_props['protocol_version'] = self.connection_args.get('protocol_version', 4)
secure_connect_bundle = self.connection_args.get('secure_connect_bundle')
if secure_connect_bundle is not None:
- if os.path.isfile(self.secure_connect_bundle) is False:
+ if os.path.isfile(secure_connect_bundle) is False:
raise Exception("Secure_connect_bundle' must be path to the file")
connection_props['cloud'] = {
- 'secure_connect_bundle': self.secure_connect_bundle
+ 'secure_connect_bundle': secure_connect_bundle
}
else:
connection_props['contact_points'] = [self.connection_args['host']]
connection_props['port'] = int(self.connection_args['port'])
cluster = Cluster(**connection_props)
- session = cluster.connect(self.connection_args['keyspace'])
+ session = cluster.connect(self.connection_args.get('keyspace'))
self.is_connected = True
self.session = session
| {"golden_diff": "diff --git a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py\n--- a/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py\n+++ b/mindsdb/integrations/handlers/scylla_handler/scylla_handler.py\n@@ -41,24 +41,21 @@\n connection_props = {\n 'auth_provider': auth_provider\n }\n-\n- if self.connection_args['protocol_version'] is not None:\n- connection_props['protocol_version'] = self.connection_args['protocol_version']\n- \n+ connection_props['protocol_version'] = self.connection_args.get('protocol_version', 4)\n secure_connect_bundle = self.connection_args.get('secure_connect_bundle')\n \n if secure_connect_bundle is not None:\n- if os.path.isfile(self.secure_connect_bundle) is False:\n+ if os.path.isfile(secure_connect_bundle) is False:\n raise Exception(\"Secure_connect_bundle' must be path to the file\")\n connection_props['cloud'] = {\n- 'secure_connect_bundle': self.secure_connect_bundle\n+ 'secure_connect_bundle': secure_connect_bundle\n }\n else:\n connection_props['contact_points'] = [self.connection_args['host']]\n connection_props['port'] = int(self.connection_args['port'])\n \n cluster = Cluster(**connection_props)\n- session = cluster.connect(self.connection_args['keyspace'])\n+ session = cluster.connect(self.connection_args.get('keyspace'))\n \n self.is_connected = True\n self.session = session\n", "issue": "[BUG] Fix scylladb error when connecting with secure bundle\nWhen connecting with `secure_connect_bundle` users got unknown secure_connect_bundle path error.\n", "before_files": [{"content": "import os\nfrom mindsdb.integrations.libs.base_handler import DatabaseHandler\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\nfrom cassandra.cluster import Cluster\nfrom cassandra.auth import PlainTextAuthProvider\nfrom mindsdb.integrations.libs.response import (\n HandlerStatusResponse as StatusResponse,\n HandlerResponse as Response,\n RESPONSE_TYPE\n)\nfrom mindsdb.utilities.log import log\nimport pandas as pd\nfrom mindsdb_sql.parser.ast.base import ASTNode\n\n\nclass ScyllaHandler(DatabaseHandler):\n \"\"\"\n This handler handles connection and execution of the Scylla statements.\n \"\"\"\n name = 'scylla'\n\n def __init__(self, name=None, **kwargs):\n super().__init__(name)\n self.parser = parse_sql\n self.connection_args = kwargs.get('connection_data')\n self.session = None\n self.is_connected = False\n\n def connect(self):\n \"\"\"\n Handles the connection to a Scylla keystore.\n \"\"\"\n if self.is_connected is True:\n return self.session\n\n auth_provider = PlainTextAuthProvider(\n username=self.connection_args['user'], password=self.connection_args['password']\n )\n\n connection_props = {\n 'auth_provider': auth_provider\n }\n\n if self.connection_args['protocol_version'] is not None:\n connection_props['protocol_version'] = self.connection_args['protocol_version']\n \n secure_connect_bundle = self.connection_args.get('secure_connect_bundle')\n\n if secure_connect_bundle is not None:\n if os.path.isfile(self.secure_connect_bundle) is False:\n raise Exception(\"Secure_connect_bundle' must be path to the file\")\n connection_props['cloud'] = {\n 'secure_connect_bundle': self.secure_connect_bundle\n }\n else:\n connection_props['contact_points'] = [self.connection_args['host']]\n connection_props['port'] = int(self.connection_args['port'])\n\n cluster = Cluster(**connection_props)\n session = cluster.connect(self.connection_args['keyspace'])\n\n self.is_connected = True\n self.session = session\n return self.session\n\n def check_connection(self) -> StatusResponse:\n \"\"\"\n Check the connection of the Scylla database\n :return: success status and error message if error occurs\n \"\"\"\n response = StatusResponse(False)\n\n try:\n session = self.connect()\n # TODO: change the healthcheck\n session.execute('SELECT release_version FROM system.local').one()\n response.success = True\n except Exception as e:\n log.error(f'Error connecting to Scylla {self.connection_args[\"keyspace\"]}, {e}!')\n response.error_message = e\n\n if response.success is False and self.is_connected is True:\n self.is_connected = False\n\n return response\n\n def native_query(self, query: str) -> Response:\n \"\"\"\n Receive SQL query and runs it\n :param query: The SQL query to run in MySQL\n :return: returns the records from the current recordset\n \"\"\"\n session = self.connect()\n try:\n resp = session.execute(query).all()\n if resp:\n response = Response(\n RESPONSE_TYPE.TABLE,\n pd.DataFrame(\n resp\n )\n )\n else:\n response = Response(RESPONSE_TYPE.OK)\n except Exception as e:\n log.error(f'Error running query: {query} on {self.connection_args[\"keyspace\"]}!')\n response = Response(\n RESPONSE_TYPE.ERROR,\n error_message=str(e)\n )\n return response\n\n def query(self, query: ASTNode) -> Response:\n \"\"\"\n Retrieve the data from the SQL statement.\n \"\"\"\n renderer = SqlalchemyRender('mysql')\n query_str = renderer.get_string(query, with_failback=True)\n return self.native_query(query_str)\n\n def get_tables(self) -> Response:\n \"\"\"\n Get a list with all of the tabels in MySQL\n \"\"\"\n q = \"DESCRIBE TABLES;\"\n result = self.native_query(q)\n df = result.data_frame\n result.data_frame = df.rename(columns={df.columns[0]: 'table_name'})\n return result\n\n def get_columns(self, table_name) -> Response:\n \"\"\"\n Show details about the table\n \"\"\"\n q = f\"DESCRIBE {table_name};\"\n result = self.native_query(q)\n return result\n", "path": "mindsdb/integrations/handlers/scylla_handler/scylla_handler.py"}]} | 1,833 | 350 |
gh_patches_debug_31481 | rasdani/github-patches | git_diff | bids-standard__pybids-1023 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BIDSValidation error message doesn't provide path of erroneous `dataset_description.json`
When creating a layout object or adding derivative directories, the [`BIDSDerivativesValidationError`](https://github.com/bids-standard/pybids/blob/f0d198da950371e64c6b2911627a549d78f62df3/bids/layout/validation.py#L130-L132) does not provide the file on which it errored out. This makes it hard for the user to debug the issue.
</issue>
<code>
[start of bids/layout/validation.py]
1 """Functionality related to validation of BIDSLayouts and BIDS projects."""
2
3 from pathlib import Path
4 import json
5 import re
6 import warnings
7
8 from ..utils import listify
9 from ..exceptions import BIDSValidationError, BIDSDerivativesValidationError
10
11
12 MANDATORY_BIDS_FIELDS = {
13 "Name": {"Name": "Example dataset"},
14 "BIDSVersion": {"BIDSVersion": "1.0.2"},
15 }
16
17
18 MANDATORY_DERIVATIVES_FIELDS = {
19 **MANDATORY_BIDS_FIELDS,
20 "GeneratedBy": {
21 "GeneratedBy": [{"Name": "Example pipeline"}]
22 },
23 }
24
25 EXAMPLE_BIDS_DESCRIPTION = {
26 k: val[k] for val in MANDATORY_BIDS_FIELDS.values() for k in val}
27
28
29 EXAMPLE_DERIVATIVES_DESCRIPTION = {
30 k: val[k] for val in MANDATORY_DERIVATIVES_FIELDS.values() for k in val}
31
32
33 DEFAULT_LOCATIONS_TO_IGNORE = {
34 re.compile(r"^/(code|models|sourcedata|stimuli)"),
35 re.compile(r'/\.'),
36 }
37
38 def absolute_path_deprecation_warning():
39 warnings.warn("The absolute_paths argument will be removed from PyBIDS "
40 "in 0.14. You can easily access the relative path of "
41 "BIDSFile objects via the .relpath attribute (instead of "
42 ".path). Switching to this pattern is strongly encouraged, "
43 "as the current implementation of relative path handling "
44 "is known to produce query failures in certain edge cases.")
45
46
47 def indexer_arg_deprecation_warning():
48 warnings.warn("The ability to pass arguments to BIDSLayout that control "
49 "indexing is likely to be removed in future; possibly as "
50 "early as PyBIDS 0.14. This includes the `config_filename`, "
51 "`ignore`, `force_index`, and `index_metadata` arguments. "
52 "The recommended usage pattern is to initialize a new "
53 "BIDSLayoutIndexer with these arguments, and pass it to "
54 "the BIDSLayout via the `indexer` argument.")
55
56
57 def validate_root(root, validate):
58 # Validate root argument and make sure it contains mandatory info
59 try:
60 root = Path(root)
61 except TypeError:
62 raise TypeError("root argument must be a pathlib.Path (or a type that "
63 "supports casting to pathlib.Path, such as "
64 "string) specifying the directory "
65 "containing the BIDS dataset.")
66
67 root = root.absolute()
68
69 if not root.exists():
70 raise ValueError("BIDS root does not exist: %s" % root)
71
72 target = root / 'dataset_description.json'
73 if not target.exists():
74 if validate:
75 raise BIDSValidationError(
76 "'dataset_description.json' is missing from project root."
77 " Every valid BIDS dataset must have this file."
78 "\nExample contents of 'dataset_description.json': \n%s" %
79 json.dumps(EXAMPLE_BIDS_DESCRIPTION)
80 )
81 else:
82 description = None
83 else:
84 err = None
85 try:
86 with open(target, 'r', encoding='utf-8') as desc_fd:
87 description = json.load(desc_fd)
88 except (UnicodeDecodeError, json.JSONDecodeError) as e:
89 description = None
90 err = e
91 if validate:
92
93 if description is None:
94 raise BIDSValidationError(
95 "'dataset_description.json' is not a valid json file."
96 " There is likely a typo in your 'dataset_description.json'."
97 "\nExample contents of 'dataset_description.json': \n%s" %
98 json.dumps(EXAMPLE_BIDS_DESCRIPTION)
99 ) from err
100
101 for k in MANDATORY_BIDS_FIELDS:
102 if k not in description:
103 raise BIDSValidationError(
104 "Mandatory %r field missing from "
105 "'dataset_description.json'."
106 "\nExample: %s" % (k, MANDATORY_BIDS_FIELDS[k])
107 )
108
109 return root, description
110
111
112 def validate_derivative_path(path, **kwargs):
113 # Collect all paths that contain a dataset_description.json
114 dd = Path(path) / 'dataset_description.json'
115 description = json.loads(dd.read_text(encoding='utf-8'))
116 pipeline_names = [pipeline["Name"]
117 for pipeline in description.get("GeneratedBy", [])
118 if "Name" in pipeline]
119 if pipeline_names:
120 pipeline_name = pipeline_names[0]
121 elif "PipelineDescription" in description:
122 warnings.warn("The PipelineDescription field was superseded "
123 "by GeneratedBy in BIDS 1.4.0. You can use "
124 "``pybids upgrade`` to update your derivative "
125 "dataset.")
126 pipeline_name = description["PipelineDescription"].get("Name")
127 else:
128 pipeline_name = None
129 if pipeline_name is None:
130 raise BIDSDerivativesValidationError(
131 "Every valid BIDS-derivatives dataset must "
132 "have a GeneratedBy.Name field set "
133 "inside 'dataset_description.json'. "
134 f"\nExample: {MANDATORY_DERIVATIVES_FIELDS['GeneratedBy']}"
135 )
136 return pipeline_name
137
138
139 def _sort_patterns(patterns, root):
140 """Return sorted patterns, from more specific to more general."""
141 regexes = [patt for patt in patterns if hasattr(patt, "search")]
142
143 paths = [
144 str((root / patt).absolute())
145 for patt in listify(patterns)
146 if not hasattr(patt, "search")
147 ]
148 # Sort patterns from general to specific
149 paths.sort(key=len)
150
151 # Combine and return (note path patterns are reversed, specific first)
152 return [Path(p) for p in reversed(paths)] + regexes
153
154
155 def validate_indexing_args(ignore, force_index, root):
156 if ignore is None:
157 ignore = list(
158 DEFAULT_LOCATIONS_TO_IGNORE - set(force_index or [])
159 )
160
161 # root has already been validated to be a directory
162 ignore = _sort_patterns(ignore, root)
163 force_index = _sort_patterns(force_index or [], root)
164
165 # Derivatives get special handling; they shouldn't be indexed normally
166 for entry in force_index:
167 condi = (isinstance(entry, str) and
168 str(entry.resolve()).startswith('derivatives'))
169 if condi:
170 msg = ("Do not pass 'derivatives' in the force_index "
171 "list. To index derivatives, either set "
172 "derivatives=True, or use add_derivatives().")
173 raise ValueError(msg)
174
175 return ignore, force_index
176
[end of bids/layout/validation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bids/layout/validation.py b/bids/layout/validation.py
--- a/bids/layout/validation.py
+++ b/bids/layout/validation.py
@@ -93,7 +93,8 @@
if description is None:
raise BIDSValidationError(
"'dataset_description.json' is not a valid json file."
- " There is likely a typo in your 'dataset_description.json'."
+ " There is likely a typo in your 'dataset_description.json' at "
+ f"{target.resolve()}. "
"\nExample contents of 'dataset_description.json': \n%s" %
json.dumps(EXAMPLE_BIDS_DESCRIPTION)
) from err
@@ -102,7 +103,8 @@
if k not in description:
raise BIDSValidationError(
"Mandatory %r field missing from "
- "'dataset_description.json'."
+ "'dataset_description.json' at "
+ f"{target.resolve()}. "
"\nExample: %s" % (k, MANDATORY_BIDS_FIELDS[k])
)
@@ -130,7 +132,8 @@
raise BIDSDerivativesValidationError(
"Every valid BIDS-derivatives dataset must "
"have a GeneratedBy.Name field set "
- "inside 'dataset_description.json'. "
+ "inside 'dataset_description.json', "
+ f"here {dd.resolve()}. "
f"\nExample: {MANDATORY_DERIVATIVES_FIELDS['GeneratedBy']}"
)
return pipeline_name
| {"golden_diff": "diff --git a/bids/layout/validation.py b/bids/layout/validation.py\n--- a/bids/layout/validation.py\n+++ b/bids/layout/validation.py\n@@ -93,7 +93,8 @@\n if description is None:\n raise BIDSValidationError(\n \"'dataset_description.json' is not a valid json file.\"\n- \" There is likely a typo in your 'dataset_description.json'.\"\n+ \" There is likely a typo in your 'dataset_description.json' at \"\n+ f\"{target.resolve()}. \"\n \"\\nExample contents of 'dataset_description.json': \\n%s\" %\n json.dumps(EXAMPLE_BIDS_DESCRIPTION)\n ) from err\n@@ -102,7 +103,8 @@\n if k not in description:\n raise BIDSValidationError(\n \"Mandatory %r field missing from \"\n- \"'dataset_description.json'.\"\n+ \"'dataset_description.json' at \"\n+ f\"{target.resolve()}. \"\n \"\\nExample: %s\" % (k, MANDATORY_BIDS_FIELDS[k])\n )\n \n@@ -130,7 +132,8 @@\n raise BIDSDerivativesValidationError(\n \"Every valid BIDS-derivatives dataset must \"\n \"have a GeneratedBy.Name field set \"\n- \"inside 'dataset_description.json'. \"\n+ \"inside 'dataset_description.json', \"\n+ f\"here {dd.resolve()}. \"\n f\"\\nExample: {MANDATORY_DERIVATIVES_FIELDS['GeneratedBy']}\"\n )\n return pipeline_name\n", "issue": "BIDSValidation error message doesn't provide path of erroneous `dataset_description.json` \nWhen creating a layout object or adding derivative directories, the [`BIDSDerivativesValidationError`](https://github.com/bids-standard/pybids/blob/f0d198da950371e64c6b2911627a549d78f62df3/bids/layout/validation.py#L130-L132) does not provide the file on which it errored out. This makes it hard for the user to debug the issue.\n", "before_files": [{"content": "\"\"\"Functionality related to validation of BIDSLayouts and BIDS projects.\"\"\"\n\nfrom pathlib import Path\nimport json\nimport re\nimport warnings\n\nfrom ..utils import listify\nfrom ..exceptions import BIDSValidationError, BIDSDerivativesValidationError\n\n\nMANDATORY_BIDS_FIELDS = {\n \"Name\": {\"Name\": \"Example dataset\"},\n \"BIDSVersion\": {\"BIDSVersion\": \"1.0.2\"},\n}\n\n\nMANDATORY_DERIVATIVES_FIELDS = {\n **MANDATORY_BIDS_FIELDS,\n \"GeneratedBy\": {\n \"GeneratedBy\": [{\"Name\": \"Example pipeline\"}]\n },\n}\n\nEXAMPLE_BIDS_DESCRIPTION = {\n k: val[k] for val in MANDATORY_BIDS_FIELDS.values() for k in val}\n\n\nEXAMPLE_DERIVATIVES_DESCRIPTION = {\n k: val[k] for val in MANDATORY_DERIVATIVES_FIELDS.values() for k in val}\n\n\nDEFAULT_LOCATIONS_TO_IGNORE = {\n re.compile(r\"^/(code|models|sourcedata|stimuli)\"),\n re.compile(r'/\\.'),\n}\n\ndef absolute_path_deprecation_warning():\n warnings.warn(\"The absolute_paths argument will be removed from PyBIDS \"\n \"in 0.14. You can easily access the relative path of \"\n \"BIDSFile objects via the .relpath attribute (instead of \"\n \".path). Switching to this pattern is strongly encouraged, \"\n \"as the current implementation of relative path handling \"\n \"is known to produce query failures in certain edge cases.\")\n\n\ndef indexer_arg_deprecation_warning():\n warnings.warn(\"The ability to pass arguments to BIDSLayout that control \"\n \"indexing is likely to be removed in future; possibly as \"\n \"early as PyBIDS 0.14. This includes the `config_filename`, \"\n \"`ignore`, `force_index`, and `index_metadata` arguments. \"\n \"The recommended usage pattern is to initialize a new \"\n \"BIDSLayoutIndexer with these arguments, and pass it to \"\n \"the BIDSLayout via the `indexer` argument.\")\n\n\ndef validate_root(root, validate):\n # Validate root argument and make sure it contains mandatory info\n try:\n root = Path(root)\n except TypeError:\n raise TypeError(\"root argument must be a pathlib.Path (or a type that \"\n \"supports casting to pathlib.Path, such as \"\n \"string) specifying the directory \"\n \"containing the BIDS dataset.\")\n\n root = root.absolute()\n\n if not root.exists():\n raise ValueError(\"BIDS root does not exist: %s\" % root)\n\n target = root / 'dataset_description.json'\n if not target.exists():\n if validate:\n raise BIDSValidationError(\n \"'dataset_description.json' is missing from project root.\"\n \" Every valid BIDS dataset must have this file.\"\n \"\\nExample contents of 'dataset_description.json': \\n%s\" %\n json.dumps(EXAMPLE_BIDS_DESCRIPTION)\n )\n else:\n description = None\n else:\n err = None\n try:\n with open(target, 'r', encoding='utf-8') as desc_fd:\n description = json.load(desc_fd)\n except (UnicodeDecodeError, json.JSONDecodeError) as e:\n description = None\n err = e\n if validate:\n\n if description is None:\n raise BIDSValidationError(\n \"'dataset_description.json' is not a valid json file.\"\n \" There is likely a typo in your 'dataset_description.json'.\"\n \"\\nExample contents of 'dataset_description.json': \\n%s\" %\n json.dumps(EXAMPLE_BIDS_DESCRIPTION)\n ) from err\n\n for k in MANDATORY_BIDS_FIELDS:\n if k not in description:\n raise BIDSValidationError(\n \"Mandatory %r field missing from \"\n \"'dataset_description.json'.\"\n \"\\nExample: %s\" % (k, MANDATORY_BIDS_FIELDS[k])\n )\n\n return root, description\n\n\ndef validate_derivative_path(path, **kwargs):\n # Collect all paths that contain a dataset_description.json\n dd = Path(path) / 'dataset_description.json'\n description = json.loads(dd.read_text(encoding='utf-8'))\n pipeline_names = [pipeline[\"Name\"]\n for pipeline in description.get(\"GeneratedBy\", [])\n if \"Name\" in pipeline]\n if pipeline_names:\n pipeline_name = pipeline_names[0]\n elif \"PipelineDescription\" in description:\n warnings.warn(\"The PipelineDescription field was superseded \"\n \"by GeneratedBy in BIDS 1.4.0. You can use \"\n \"``pybids upgrade`` to update your derivative \"\n \"dataset.\")\n pipeline_name = description[\"PipelineDescription\"].get(\"Name\")\n else:\n pipeline_name = None\n if pipeline_name is None:\n raise BIDSDerivativesValidationError(\n \"Every valid BIDS-derivatives dataset must \"\n \"have a GeneratedBy.Name field set \"\n \"inside 'dataset_description.json'. \"\n f\"\\nExample: {MANDATORY_DERIVATIVES_FIELDS['GeneratedBy']}\"\n )\n return pipeline_name\n\n\ndef _sort_patterns(patterns, root):\n \"\"\"Return sorted patterns, from more specific to more general.\"\"\"\n regexes = [patt for patt in patterns if hasattr(patt, \"search\")]\n\n paths = [\n str((root / patt).absolute())\n for patt in listify(patterns)\n if not hasattr(patt, \"search\")\n ]\n # Sort patterns from general to specific\n paths.sort(key=len)\n\n # Combine and return (note path patterns are reversed, specific first)\n return [Path(p) for p in reversed(paths)] + regexes\n\n\ndef validate_indexing_args(ignore, force_index, root):\n if ignore is None:\n ignore = list(\n DEFAULT_LOCATIONS_TO_IGNORE - set(force_index or [])\n )\n\n # root has already been validated to be a directory\n ignore = _sort_patterns(ignore, root)\n force_index = _sort_patterns(force_index or [], root)\n\n # Derivatives get special handling; they shouldn't be indexed normally\n for entry in force_index:\n condi = (isinstance(entry, str) and\n str(entry.resolve()).startswith('derivatives'))\n if condi:\n msg = (\"Do not pass 'derivatives' in the force_index \"\n \"list. To index derivatives, either set \"\n \"derivatives=True, or use add_derivatives().\")\n raise ValueError(msg)\n\n return ignore, force_index\n", "path": "bids/layout/validation.py"}]} | 2,480 | 330 |
gh_patches_debug_11687 | rasdani/github-patches | git_diff | pypa__setuptools-2907 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`distutils` submodules being loaded from the stdlib
It seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).
_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_
`distutils` submodules being loaded from the stdlib
It seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).
_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_
</issue>
<code>
[start of _distutils_hack/__init__.py]
1 import sys
2 import os
3 import re
4 import importlib
5 import warnings
6
7
8 is_pypy = '__pypy__' in sys.builtin_module_names
9
10
11 warnings.filterwarnings('ignore',
12 r'.+ distutils\b.+ deprecated',
13 DeprecationWarning)
14
15
16 def warn_distutils_present():
17 if 'distutils' not in sys.modules:
18 return
19 if is_pypy and sys.version_info < (3, 7):
20 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
21 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
22 return
23 warnings.warn(
24 "Distutils was imported before Setuptools, but importing Setuptools "
25 "also replaces the `distutils` module in `sys.modules`. This may lead "
26 "to undesirable behaviors or errors. To avoid these issues, avoid "
27 "using distutils directly, ensure that setuptools is installed in the "
28 "traditional way (e.g. not an editable install), and/or make sure "
29 "that setuptools is always imported before distutils.")
30
31
32 def clear_distutils():
33 if 'distutils' not in sys.modules:
34 return
35 warnings.warn("Setuptools is replacing distutils.")
36 mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
37 for name in mods:
38 del sys.modules[name]
39
40
41 def enabled():
42 """
43 Allow selection of distutils by environment variable.
44 """
45 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
46 return which == 'local'
47
48
49 def ensure_local_distutils():
50 clear_distutils()
51 distutils = importlib.import_module('setuptools._distutils')
52 distutils.__name__ = 'distutils'
53 sys.modules['distutils'] = distutils
54
55 # sanity check that submodules load as expected
56 core = importlib.import_module('distutils.core')
57 assert '_distutils' in core.__file__, core.__file__
58
59
60 def do_override():
61 """
62 Ensure that the local copy of distutils is preferred over stdlib.
63
64 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
65 for more motivation.
66 """
67 if enabled():
68 warn_distutils_present()
69 ensure_local_distutils()
70
71
72 class DistutilsMetaFinder:
73 def find_spec(self, fullname, path, target=None):
74 if path is not None:
75 return
76
77 method_name = 'spec_for_{fullname}'.format(**locals())
78 method = getattr(self, method_name, lambda: None)
79 return method()
80
81 def spec_for_distutils(self):
82 import importlib.abc
83 import importlib.util
84
85 class DistutilsLoader(importlib.abc.Loader):
86
87 def create_module(self, spec):
88 return importlib.import_module('setuptools._distutils')
89
90 def exec_module(self, module):
91 pass
92
93 return importlib.util.spec_from_loader('distutils', DistutilsLoader())
94
95 def spec_for_pip(self):
96 """
97 Ensure stdlib distutils when running under pip.
98 See pypa/pip#8761 for rationale.
99 """
100 if self.pip_imported_during_build():
101 return
102 clear_distutils()
103 self.spec_for_distutils = lambda: None
104
105 @staticmethod
106 def pip_imported_during_build():
107 """
108 Detect if pip is being imported in a build script. Ref #2355.
109 """
110 import traceback
111 return any(
112 frame.f_globals['__file__'].endswith('setup.py')
113 for frame, line in traceback.walk_stack(None)
114 )
115
116
117 DISTUTILS_FINDER = DistutilsMetaFinder()
118
119
120 def add_shim():
121 sys.meta_path.insert(0, DISTUTILS_FINDER)
122
123
124 def remove_shim():
125 try:
126 sys.meta_path.remove(DISTUTILS_FINDER)
127 except ValueError:
128 pass
129
[end of _distutils_hack/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -48,11 +48,15 @@
def ensure_local_distutils():
clear_distutils()
- distutils = importlib.import_module('setuptools._distutils')
- distutils.__name__ = 'distutils'
- sys.modules['distutils'] = distutils
- # sanity check that submodules load as expected
+ # With the DistutilsMetaFinder in place,
+ # perform an import to cause distutils to be
+ # loaded from setuptools._distutils. Ref #2906.
+ add_shim()
+ importlib.import_module('distutils')
+ remove_shim()
+
+ # check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
| {"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -48,11 +48,15 @@\n \n def ensure_local_distutils():\n clear_distutils()\n- distutils = importlib.import_module('setuptools._distutils')\n- distutils.__name__ = 'distutils'\n- sys.modules['distutils'] = distutils\n \n- # sanity check that submodules load as expected\n+ # With the DistutilsMetaFinder in place,\n+ # perform an import to cause distutils to be\n+ # loaded from setuptools._distutils. Ref #2906.\n+ add_shim()\n+ importlib.import_module('distutils')\n+ remove_shim()\n+\n+ # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n", "issue": "`distutils` submodules being loaded from the stdlib\nIt seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).\r\n\r\n_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_\n`distutils` submodules being loaded from the stdlib\nIt seems the issue is that `distutils.sysconfig` is being loaded from the stdlib, even though [the distutils hack has an explicit check that submodules are loaded from the locally-bundled copy](https://github.com/pypa/setuptools/blob/dd5a2cec373ffe7eefc087c1cd06fb4e491a7e88/_distutils_hack/__init__.py#L55-L57).\r\n\r\n_Originally posted by @jaraco in https://github.com/pypa/distutils/issues/16#issuecomment-980043534_\n", "before_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\nwarnings.filterwarnings('ignore',\n r'.+ distutils\\b.+ deprecated',\n DeprecationWarning)\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n distutils = importlib.import_module('setuptools._distutils')\n distutils.__name__ = 'distutils'\n sys.modules['distutils'] = distutils\n\n # sanity check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @staticmethod\n def pip_imported_during_build():\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n frame.f_globals['__file__'].endswith('setup.py')\n for frame, line in traceback.walk_stack(None)\n )\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}]} | 2,038 | 229 |
gh_patches_debug_17193 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-2195 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Negative Hydro values (in EIA, US) cause parser to fail
When scraping historic data, I've noticed that negative hydro values are sometimes returned by EIA. For example, for US-TN 2018-09-23 6am local time, there was hydro production reported of -144.
I am not sure if this means we should just always move negative hydro production values over to storage, or if for each BA we should decide if the hydro production should be 'storage' hydro or run-of-river. What you think? @systemcatch I guess you are closest to the EIA data.
</issue>
<code>
[start of parsers/EIA.py]
1 #!/usr/bin/env python3
2 """Parser for U.S. Energy Information Administration, https://www.eia.gov/ .
3
4 Aggregates and standardizes data from most of the US ISOs,
5 and exposes them via a unified API.
6
7 Requires an API key, set in the EIA_KEY environment variable. Get one here:
8 https://www.eia.gov/opendata/register.php
9 """
10 import datetime
11 import os
12
13 import arrow
14 from dateutil import parser, tz
15 os.environ.setdefault('EIA_KEY', 'eia_key')
16 from eiapy import Series
17 import requests
18
19 from .lib.validation import validate
20 from .ENTSOE import merge_production_outputs
21
22 EXCHANGES = {
23 'MX-BC->US-CA': 'EBA.CISO-CFE.ID.H',
24 'US-BPA->US-IPC': 'EBA.BPAT-IPCO.ID.H',
25 'US-SPP->US-TX': 'SWPP.ID.H-EBA.ERCO',
26 'US-MISO->US-PJM': 'EBA.MISO-PJM.ID.H',
27 'US-MISO->US-SPP': 'EBA.MISO-SWPP.ID.H',
28 'US-NEISO->US-NY': 'EBA.ISNE-NYIS.ID.H',
29 'US-NY->US-PJM': 'EBA.NYIS-PJM.ID.H'
30 }
31 # based on https://www.eia.gov/beta/electricity/gridmonitor/dashboard/electric_overview/US48/US48
32 REGIONS = {
33 'US-CA': 'CAL',
34 'US-CAR': 'CAR',
35 'US-SPP': 'CENT',
36 'US-FL': 'FLA',
37 'US-PJM': 'MIDA',
38 'US-MISO': 'MIDW',
39 'US-NEISO': 'NE',
40 'US-NY': 'NY',
41 'US-NW': 'NW',
42 'US-SE': 'SE',
43 'US-SEC': 'SEC',
44 'US-SVERI': 'SW',
45 'US-TN': 'TEN',
46 'US-TX': 'TEX',
47 }
48 TYPES = {
49 # 'biomass': 'BM', # not currently supported
50 'coal': 'COL',
51 'gas': 'NG',
52 'hydro': 'WAT',
53 'nuclear': 'NUC',
54 'oil': 'OIL',
55 'unknown': 'OTH',
56 'solar': 'SUN',
57 'wind': 'WND',
58 }
59 PRODUCTION_SERIES = 'EBA.%s-ALL.NG.H'
60 PRODUCTION_MIX_SERIES = 'EBA.%s-ALL.NG.%s.H'
61 DEMAND_SERIES = 'EBA.%s-ALL.D.H'
62 FORECAST_SERIES = 'EBA.%s-ALL.DF.H'
63
64
65 def fetch_consumption_forecast(zone_key, session=None, target_datetime=None, logger=None):
66 return _fetch_series(zone_key, FORECAST_SERIES % REGIONS[zone_key],
67 session=session, target_datetime=target_datetime,
68 logger=logger)
69
70
71 def fetch_production(zone_key, session=None, target_datetime=None, logger=None):
72 return _fetch_series(zone_key, PRODUCTION_SERIES % REGIONS[zone_key],
73 session=session, target_datetime=target_datetime,
74 logger=logger)
75
76
77 def fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):
78 consumption = _fetch_series(zone_key, DEMAND_SERIES % REGIONS[zone_key],
79 session=session, target_datetime=target_datetime,
80 logger=logger)
81 for point in consumption:
82 point['consumption'] = point.pop('value')
83
84 return consumption
85
86
87 def fetch_production_mix(zone_key, session=None, target_datetime=None, logger=None):
88 mixes = []
89 for type, code in TYPES.items():
90 series = PRODUCTION_MIX_SERIES % (REGIONS[zone_key], code)
91 mix = _fetch_series(zone_key, series, session=session,
92 target_datetime=target_datetime, logger=logger)
93 if not mix:
94 continue
95 for point in mix:
96 point.update({
97 'production': {type: point.pop('value')},
98 'storage': {}, # required by merge_production_outputs()
99 })
100
101 #replace small negative solar values (>-5) with 0s
102 point = validate(point, logger=logger, remove_negative=True)
103 mixes.append(mix)
104
105 return merge_production_outputs(mixes, zone_key, merge_source='eia.gov')
106
107
108 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
109 sortedcodes = '->'.join(sorted([zone_key1, zone_key2]))
110 exchange = _fetch_series(sortedcodes, EXCHANGES[sortedcodes], session=session,
111 target_datetime=target_datetime, logger=logger)
112 for point in exchange:
113 point.update({
114 'sortedZoneKeys': point.pop('zoneKey'),
115 'netFlow': point.pop('value'),
116 })
117 if sortedcodes == 'MX-BC->US-CA':
118 point['netFlow'] = -point['netFlow']
119
120 return exchange
121
122
123 def _fetch_series(zone_key, series_id, session=None, target_datetime=None,
124 logger=None):
125 """Fetches and converts a data series."""
126 key = os.environ['EIA_KEY']
127 assert key and key != 'eia_key', key
128
129 s = session or requests.Session()
130 series = Series(series_id=series_id, session=s)
131
132 if target_datetime:
133 utc = tz.gettz('UTC')
134 #eia currently only accepts utc timestamps in the form YYYYMMDDTHHZ
135 dt = target_datetime.astimezone(utc).strftime('%Y%m%dT%HZ')
136 raw_data = series.last_from(24, end=dt)
137 else:
138 # Get the last 24 hours available.
139 raw_data = series.last(24)
140
141 # UTC timestamp with no offset returned.
142 if not raw_data.get('series'):
143 # Series doesn't exist. Probably requesting a fuel from a region that
144 # doesn't have any capacity for that fuel type.
145 return []
146
147 return [{
148 'zoneKey': zone_key,
149 'datetime': parser.parse(datapoint[0]),
150 'value': datapoint[1],
151 'source': 'eia.gov',
152 } for datapoint in raw_data['series'][0]['data']]
153
154
155 def main():
156 "Main method, never used by the Electricity Map backend, but handy for testing."
157 from pprint import pprint
158 pprint(fetch_consumption_forecast('US-NY'))
159 pprint(fetch_production('US-SEC'))
160 pprint(fetch_production_mix('US-TN'))
161 pprint(fetch_consumption('US-CAR'))
162 pprint(fetch_exchange('MX-BC', 'US-CA'))
163
164
165 if __name__ == '__main__':
166 main()
167
[end of parsers/EIA.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/EIA.py b/parsers/EIA.py
--- a/parsers/EIA.py
+++ b/parsers/EIA.py
@@ -93,12 +93,18 @@
if not mix:
continue
for point in mix:
- point.update({
- 'production': {type: point.pop('value')},
- 'storage': {}, # required by merge_production_outputs()
- })
-
- #replace small negative solar values (>-5) with 0s
+ if type == 'hydro' and point['value'] < 0:
+ point.update({
+ 'production': {},# required by merge_production_outputs()
+ 'storage': {type: point.pop('value')},
+ })
+ else:
+ point.update({
+ 'production': {type: point.pop('value')},
+ 'storage': {}, # required by merge_production_outputs()
+ })
+
+ #replace small negative values (>-5) with 0s This is necessary for solar
point = validate(point, logger=logger, remove_negative=True)
mixes.append(mix)
| {"golden_diff": "diff --git a/parsers/EIA.py b/parsers/EIA.py\n--- a/parsers/EIA.py\n+++ b/parsers/EIA.py\n@@ -93,12 +93,18 @@\n if not mix:\n continue\n for point in mix:\n- point.update({\n- 'production': {type: point.pop('value')},\n- 'storage': {}, # required by merge_production_outputs()\n- })\n-\n- #replace small negative solar values (>-5) with 0s\n+ if type == 'hydro' and point['value'] < 0:\n+ point.update({\n+ 'production': {},# required by merge_production_outputs()\n+ 'storage': {type: point.pop('value')},\n+ })\n+ else:\n+ point.update({\n+ 'production': {type: point.pop('value')},\n+ 'storage': {}, # required by merge_production_outputs()\n+ })\n+\n+ #replace small negative values (>-5) with 0s This is necessary for solar\n point = validate(point, logger=logger, remove_negative=True)\n mixes.append(mix)\n", "issue": "Negative Hydro values (in EIA, US) cause parser to fail\nWhen scraping historic data, I've noticed that negative hydro values are sometimes returned by EIA. For example, for US-TN 2018-09-23 6am local time, there was hydro production reported of -144.\r\n\r\nI am not sure if this means we should just always move negative hydro production values over to storage, or if for each BA we should decide if the hydro production should be 'storage' hydro or run-of-river. What you think? @systemcatch I guess you are closest to the EIA data.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Parser for U.S. Energy Information Administration, https://www.eia.gov/ .\n\nAggregates and standardizes data from most of the US ISOs,\nand exposes them via a unified API.\n\nRequires an API key, set in the EIA_KEY environment variable. Get one here:\nhttps://www.eia.gov/opendata/register.php\n\"\"\"\nimport datetime\nimport os\n\nimport arrow\nfrom dateutil import parser, tz\nos.environ.setdefault('EIA_KEY', 'eia_key')\nfrom eiapy import Series\nimport requests\n\nfrom .lib.validation import validate\nfrom .ENTSOE import merge_production_outputs\n\nEXCHANGES = {\n 'MX-BC->US-CA': 'EBA.CISO-CFE.ID.H',\n 'US-BPA->US-IPC': 'EBA.BPAT-IPCO.ID.H',\n 'US-SPP->US-TX': 'SWPP.ID.H-EBA.ERCO',\n 'US-MISO->US-PJM': 'EBA.MISO-PJM.ID.H',\n 'US-MISO->US-SPP': 'EBA.MISO-SWPP.ID.H',\n 'US-NEISO->US-NY': 'EBA.ISNE-NYIS.ID.H',\n 'US-NY->US-PJM': 'EBA.NYIS-PJM.ID.H'\n}\n# based on https://www.eia.gov/beta/electricity/gridmonitor/dashboard/electric_overview/US48/US48\nREGIONS = {\n 'US-CA': 'CAL',\n 'US-CAR': 'CAR',\n 'US-SPP': 'CENT',\n 'US-FL': 'FLA',\n 'US-PJM': 'MIDA',\n 'US-MISO': 'MIDW',\n 'US-NEISO': 'NE',\n 'US-NY': 'NY',\n 'US-NW': 'NW',\n 'US-SE': 'SE',\n 'US-SEC': 'SEC',\n 'US-SVERI': 'SW',\n 'US-TN': 'TEN',\n 'US-TX': 'TEX',\n}\nTYPES = {\n # 'biomass': 'BM', # not currently supported\n 'coal': 'COL',\n 'gas': 'NG',\n 'hydro': 'WAT',\n 'nuclear': 'NUC',\n 'oil': 'OIL',\n 'unknown': 'OTH',\n 'solar': 'SUN',\n 'wind': 'WND',\n}\nPRODUCTION_SERIES = 'EBA.%s-ALL.NG.H'\nPRODUCTION_MIX_SERIES = 'EBA.%s-ALL.NG.%s.H'\nDEMAND_SERIES = 'EBA.%s-ALL.D.H'\nFORECAST_SERIES = 'EBA.%s-ALL.DF.H'\n\n\ndef fetch_consumption_forecast(zone_key, session=None, target_datetime=None, logger=None):\n return _fetch_series(zone_key, FORECAST_SERIES % REGIONS[zone_key],\n session=session, target_datetime=target_datetime,\n logger=logger)\n\n\ndef fetch_production(zone_key, session=None, target_datetime=None, logger=None):\n return _fetch_series(zone_key, PRODUCTION_SERIES % REGIONS[zone_key],\n session=session, target_datetime=target_datetime,\n logger=logger)\n\n\ndef fetch_consumption(zone_key, session=None, target_datetime=None, logger=None):\n consumption = _fetch_series(zone_key, DEMAND_SERIES % REGIONS[zone_key],\n session=session, target_datetime=target_datetime,\n logger=logger)\n for point in consumption:\n point['consumption'] = point.pop('value')\n\n return consumption\n\n\ndef fetch_production_mix(zone_key, session=None, target_datetime=None, logger=None):\n mixes = []\n for type, code in TYPES.items():\n series = PRODUCTION_MIX_SERIES % (REGIONS[zone_key], code)\n mix = _fetch_series(zone_key, series, session=session,\n target_datetime=target_datetime, logger=logger)\n if not mix:\n continue\n for point in mix:\n point.update({\n 'production': {type: point.pop('value')},\n 'storage': {}, # required by merge_production_outputs()\n })\n\n #replace small negative solar values (>-5) with 0s\n point = validate(point, logger=logger, remove_negative=True)\n mixes.append(mix)\n\n return merge_production_outputs(mixes, zone_key, merge_source='eia.gov')\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n sortedcodes = '->'.join(sorted([zone_key1, zone_key2]))\n exchange = _fetch_series(sortedcodes, EXCHANGES[sortedcodes], session=session,\n target_datetime=target_datetime, logger=logger)\n for point in exchange:\n point.update({\n 'sortedZoneKeys': point.pop('zoneKey'),\n 'netFlow': point.pop('value'),\n })\n if sortedcodes == 'MX-BC->US-CA':\n point['netFlow'] = -point['netFlow']\n\n return exchange\n\n\ndef _fetch_series(zone_key, series_id, session=None, target_datetime=None,\n logger=None):\n \"\"\"Fetches and converts a data series.\"\"\"\n key = os.environ['EIA_KEY']\n assert key and key != 'eia_key', key\n\n s = session or requests.Session()\n series = Series(series_id=series_id, session=s)\n\n if target_datetime:\n utc = tz.gettz('UTC')\n #eia currently only accepts utc timestamps in the form YYYYMMDDTHHZ\n dt = target_datetime.astimezone(utc).strftime('%Y%m%dT%HZ')\n raw_data = series.last_from(24, end=dt)\n else:\n # Get the last 24 hours available.\n raw_data = series.last(24)\n\n # UTC timestamp with no offset returned.\n if not raw_data.get('series'):\n # Series doesn't exist. Probably requesting a fuel from a region that\n # doesn't have any capacity for that fuel type.\n return []\n\n return [{\n 'zoneKey': zone_key,\n 'datetime': parser.parse(datapoint[0]),\n 'value': datapoint[1],\n 'source': 'eia.gov',\n } for datapoint in raw_data['series'][0]['data']]\n\n\ndef main():\n \"Main method, never used by the Electricity Map backend, but handy for testing.\"\n from pprint import pprint\n pprint(fetch_consumption_forecast('US-NY'))\n pprint(fetch_production('US-SEC'))\n pprint(fetch_production_mix('US-TN'))\n pprint(fetch_consumption('US-CAR'))\n pprint(fetch_exchange('MX-BC', 'US-CA'))\n\n\nif __name__ == '__main__':\n main()\n", "path": "parsers/EIA.py"}]} | 2,534 | 248 |
gh_patches_debug_64121 | rasdani/github-patches | git_diff | plotly__dash-333 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The README is in markdown and doesn't render properly on pypi.io
See: https://pypi.org/project/dash/
</issue>
<code>
[start of setup.py]
1 import io
2 from setuptools import setup, find_packages
3
4 main_ns = {}
5 exec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used
6
7 setup(
8 name='dash',
9 version=main_ns['__version__'],
10 author='chris p',
11 author_email='[email protected]',
12 packages=find_packages(exclude=['tests*']),
13 license='MIT',
14 description=('A Python framework for building reactive web-apps. '
15 'Developed by Plotly.'),
16 long_description=io.open('README.md', encoding='utf-8').read(),
17 install_requires=[
18 'Flask>=0.12',
19 'flask-compress',
20 'plotly',
21 'dash_renderer',
22 ],
23 url='https://plot.ly/dash',
24 classifiers=[
25 'Development Status :: 5 - Production/Stable',
26 'Environment :: Web Environment',
27 'Framework :: Flask',
28 'Intended Audience :: Developers',
29 'Intended Audience :: Education',
30 'Intended Audience :: Financial and Insurance Industry',
31 'Intended Audience :: Healthcare Industry',
32 'Intended Audience :: Manufacturing',
33 'Intended Audience :: Science/Research',
34 'License :: OSI Approved :: MIT License',
35 'Programming Language :: Python :: 2.7',
36 'Programming Language :: Python :: 3.3',
37 'Programming Language :: Python :: 3.4',
38 'Programming Language :: Python :: 3.5',
39 'Programming Language :: Python :: 3.6',
40 'Topic :: Database :: Front-Ends',
41 'Topic :: Office/Business :: Financial :: Spreadsheet',
42 'Topic :: Scientific/Engineering :: Visualization',
43 'Topic :: Software Development :: Libraries :: Application Frameworks',
44 'Topic :: Software Development :: Widget Sets'
45 ]
46 )
47
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -14,6 +14,7 @@
description=('A Python framework for building reactive web-apps. '
'Developed by Plotly.'),
long_description=io.open('README.md', encoding='utf-8').read(),
+ long_description_content_type='text/markdown',
install_requires=[
'Flask>=0.12',
'flask-compress',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,6 +14,7 @@\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n+ long_description_content_type='text/markdown',\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n", "issue": "The README is in markdown and doesn't render properly on pypi.io\nSee: https://pypi.org/project/dash/\r\n\n", "before_files": [{"content": "import io\nfrom setuptools import setup, find_packages\n\nmain_ns = {}\nexec(open('dash/version.py').read(), main_ns) # pylint: disable=exec-used\n\nsetup(\n name='dash',\n version=main_ns['__version__'],\n author='chris p',\n author_email='[email protected]',\n packages=find_packages(exclude=['tests*']),\n license='MIT',\n description=('A Python framework for building reactive web-apps. '\n 'Developed by Plotly.'),\n long_description=io.open('README.md', encoding='utf-8').read(),\n install_requires=[\n 'Flask>=0.12',\n 'flask-compress',\n 'plotly',\n 'dash_renderer',\n ],\n url='https://plot.ly/dash',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Financial and Insurance Industry',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Database :: Front-Ends',\n 'Topic :: Office/Business :: Financial :: Spreadsheet',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Widget Sets'\n ]\n)\n", "path": "setup.py"}]} | 1,022 | 104 |
gh_patches_debug_40366 | rasdani/github-patches | git_diff | chainer__chainer-321 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hierarchical softmax doesn't have `to_cpu`
Same problem as #276
</issue>
<code>
[start of chainer/functions/hierarchical_softmax.py]
1 import numpy
2 import six
3
4 from chainer import cuda
5 from chainer import function
6 from chainer.utils import type_check
7
8
9 class TreeParser(object):
10
11 def __init__(self):
12 self.next_id = 0
13
14 def size(self):
15 return self.next_id
16
17 def get_paths(self):
18 return self.paths
19
20 def get_codes(self):
21 return self.codes
22
23 def parse(self, tree):
24 self.next_id = 0
25 self.path = []
26 self.code = []
27 self.paths = {}
28 self.codes = {}
29 self._parse(tree)
30
31 assert(len(self.path) == 0)
32 assert(len(self.code) == 0)
33 assert(len(self.paths) == len(self.codes))
34
35 def _parse(self, node):
36 if isinstance(node, tuple):
37 # internal node
38 if len(node) != 2:
39 raise ValueError(
40 'All internal nodes must have two child nodes')
41 left, right = node
42 self.path.append(self.next_id)
43 self.next_id += 1
44 self.code.append(1.0)
45 self._parse(left)
46
47 self.code[-1] = -1.0
48 self._parse(right)
49
50 self.path.pop()
51 self.code.pop()
52
53 else:
54 # leaf node
55 self.paths[node] = numpy.array(self.path).astype(numpy.int32)
56 self.codes[node] = numpy.array(self.code).astype(numpy.float32)
57
58
59 class BinaryHierarchicalSoftmax(function.Function):
60
61 """Implementation of hierarchical softmax (HSM).
62
63 In natural language applications, vocabulary size is too large to use
64 softmax loss.
65 Instead, the hierarchical softmax uses product of sigmoid functions.
66 It costs only :math:`O(\log(n))` time where :math:`n` is the vocabulary
67 size in average.
68
69 At first a user need to prepare a binary tree whose each leaf is
70 corresponding to a word in a vocabulary.
71 When a word :math:`x` is given, exactly one path from the root of the tree
72 to the leaf of the word exists.
73 Let :math:`\mbox{path}(x) = ((e_1, b_1), \dots, (e_m, b_m))` be the path of
74 :math:`x`, where :math:`e_i` is an index of :math:`i`-th internal node, and
75 :math:`b_i \in \{-1, 1\}` indicates direction to move at :math:`i`-th
76 internal node (-1 is left, and 1 is right).
77 Then, the probability of :math:`x` is given as below:
78
79 .. math::
80
81 P(x) &= \prod_{(e_i, b_i) \in \mbox{path}(x)}P(b_i | e_i) \\\\
82 &= \prod_{(e_i, b_i) \in \mbox{path}(x)}\sigma(b_i x^\\top
83 w_{e_i}),
84
85 where :math:`\sigma(\\cdot)` is a sigmoid function, and :math:`w` is a
86 weight matrix.
87
88 This function costs :math:`O(\log(n))` time as an average length of paths
89 is :math:`O(\log(n))`, and :math:`O(n)` memory as the number of internal
90 nodes equals :math:`n - 1`.
91
92 Args:
93 in_size (int): Dimension of input vectors.
94 tree: A binary tree made with tuples like `((1, 2), 3)`.
95
96 See: Hierarchical Probabilistic Neural Network Language Model [Morin+,
97 AISTAT2005].
98
99 """
100
101 parameter_names = ('W',)
102 gradient_names = ('gW',)
103
104 def __init__(self, in_size, tree):
105 parser = TreeParser()
106 parser.parse(tree)
107 self.paths = parser.get_paths()
108 self.codes = parser.get_codes()
109
110 self.W = numpy.random.uniform(
111 -1, 1, (parser.size(), in_size)).astype(numpy.float32)
112 self.gW = numpy.zeros(self.W.shape, numpy.float32)
113
114 def check_type_forward(self, in_types):
115 type_check.expect(in_types.size() == 2)
116 x_type, t_type = in_types
117
118 type_check.expect(
119 x_type.dtype == numpy.float32,
120 x_type.ndim == 2,
121 t_type.dtype == numpy.int32,
122 t_type.ndim == 1,
123 x_type.shape[0] == t_type.shape[0]
124 )
125
126 def check_type_backward(self, in_types, out_types):
127 type_check.expect(
128 out_types.size() == 1,
129 out_types[0].dtype == numpy.float32,
130 out_types[0].ndim == 0
131 )
132
133 def forward_cpu(self, args):
134 x, t = args
135
136 loss = numpy.float32(0.0)
137 for ix, it in six.moves.zip(x, t):
138 loss += self._forward_cpu_one(ix, it)
139 return numpy.array(loss),
140
141 def _forward_cpu_one(self, x, t):
142 assert t in self.paths
143
144 w = self.W[self.paths[t]]
145 wxy = w.dot(x) * self.codes[t]
146 loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))
147 return numpy.sum(loss)
148
149 def backward_cpu(self, args, loss):
150 x, t = args
151 gloss, = loss
152 gx = numpy.empty_like(x)
153 for i, (ix, it) in enumerate(six.moves.zip(x, t)):
154 gx[i] = self._backward_cpu_one(ix, it, gloss)
155 return gx, None
156
157 def _backward_cpu_one(self, x, t, gloss):
158 path = self.paths[t]
159 w = self.W[path]
160 wxy = w.dot(x) * self.codes[t]
161 g = -gloss * self.codes[t] / (1.0 + numpy.exp(wxy))
162 gx = g.dot(w)
163 gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))
164 self.gW[path] += gw
165 return gx
166
167 def to_gpu(self, device=None):
168 function.Function.to_gpu(self, device)
169
170 n_vocab = max(self.paths.keys()) + 1
171 paths = cuda.to_gpu(numpy.concatenate(
172 [self.paths[i] for i in range(n_vocab) if i in self.paths]))
173 codes = cuda.to_gpu(numpy.concatenate(
174 [self.codes[i] for i in range(n_vocab) if i in self.codes]))
175
176 begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)
177 begins[0] = 0
178 for i in range(0, n_vocab):
179 length = len(self.paths[i]) if i in self.paths else 0
180 begins[i + 1] = begins[i] + length
181
182 self.paths = paths
183 self.codes = codes
184 self.begins = cuda.to_gpu(begins)
185
186 def forward_gpu(self, inputs):
187 x, t = inputs
188
189 max_length = cuda.reduce(
190 'int* t, int* begins', 'begins[t[i] + 1] - begins[t[i]]',
191 'max(a,b)', '0', 'binary_hierarchical_softmax_max_length',
192 numpy.int32
193 )(t, self.begins)
194 max_length = cuda.to_cpu(max_length)[()]
195
196 length = max_length * x.shape[0]
197 ls = cuda.empty((length,), dtype=numpy.float32)
198 n_in = x.shape[1]
199 wxy = cuda.empty((length,), dtype=numpy.float32)
200 cuda.elementwise(
201 '''float* ls, float* wxy, const float* x, const float* w,
202 const int* ts, const int* paths, const float* codes,
203 const int* begins, int c, int max_length''',
204 '''
205 int ind = i / max_length;
206 int offset = i - ind * max_length;
207 int t = ts[ind];
208
209 int begin = begins[t];
210 int length = begins[t + 1] - begins[t];
211
212 if (offset < length) {
213 int p = begin + offset;
214 int node = paths[p];
215
216 x = &x[ind * c];
217
218 float wx = 0;
219 for (int j = 0; j < c; ++j) {
220 wx += w[node * c + j] * x[j];
221 }
222 wxy[i] = wx * codes[p];
223 ls[i] = log(1 + exp(-wxy[i]));
224 } else {
225 ls[i] = 0;
226 }
227 ''',
228 'binary_hierarchical_softmax_forward'
229 )(ls, wxy, x, self.W, t, self.paths, self.codes, self.begins,
230 n_in, max_length)
231 self.max_length = max_length
232 self.wxy = wxy
233 return cuda.gpuarray.sum(ls),
234
235 def backward_gpu(self, inputs, loss):
236 x, t = inputs
237 gloss, = loss
238
239 n_in = x.shape[1]
240 gx = cuda.zeros_like(x)
241 cuda.elementwise(
242 '''const float* wxy, float* gx, float* gw, const float* x,
243 const float* w, const int* ts, const int* paths,
244 const float* codes, const int* begins,
245 const float* gloss, int c, int max_length''',
246 '''
247 int ind = i / max_length;
248 int offset = i - ind * max_length;
249 int t = ts[ind];
250
251 int begin = begins[t];
252 int length = begins[t + 1] - begins[t];
253
254 if (offset < length) {
255 int p = begin + offset;
256 int node = paths[p];
257 float code = codes[p];
258 gx = &gx[ind * c];
259 x = &x[ind * c];
260
261 float g = -*gloss * code / (1.0 + exp(wxy[i]));
262 for (int j = 0; j < c; ++j) {
263 atomicAdd(gx + j, g * w[node * c + j]);
264 atomicAdd(gw + node * c + j, g * x[j]);
265 }
266 }
267 ''',
268 'binary_hierarchical_softmax_bwd'
269 )(self.wxy, gx, self.gW, x, self.W, t, self.paths, self.codes,
270 self.begins, gloss, n_in, self.max_length)
271 return gx, None
272
273
274 def create_huffman_tree(word_counts):
275 """Make a huffman tree from a dictionary containing word counts.
276
277 This method creates a binary huffman tree, that is required for
278 :class:`BinaryHierarchicalSoftmax`.
279 For example, ``{0: 8, 1: 5, 2: 6, 3: 4}`` is converted to
280 ``((3, 1), (2, 0))``.
281
282 Args:
283 word_counts (``dict`` of ``int`` key and ``int`` or ``float`` values.):
284 Dictionary representing counts of words.
285
286 Returns:
287 Binary huffman tree with tuples and keys of ``word_coutns``.
288
289 """
290 if len(word_counts) == 0:
291 raise ValueError('Empty vocabulary')
292
293 q = six.moves.queue.PriorityQueue()
294 # Add unique id to each entry so that we can compare two entries with same
295 # counts.
296 # Note that itreitems randomly order the entries.
297 for uid, (w, c) in enumerate(six.iteritems(word_counts)):
298 q.put((c, uid, w))
299
300 while q.qsize() >= 2:
301 (count1, id1, word1) = q.get()
302 (count2, id2, word2) = q.get()
303 count = count1 + count2
304 tree = (word1, word2)
305 q.put((count, min(id1, id2), tree))
306
307 return q.get()[2]
308
[end of chainer/functions/hierarchical_softmax.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/hierarchical_softmax.py b/chainer/functions/hierarchical_softmax.py
--- a/chainer/functions/hierarchical_softmax.py
+++ b/chainer/functions/hierarchical_softmax.py
@@ -104,8 +104,20 @@
def __init__(self, in_size, tree):
parser = TreeParser()
parser.parse(tree)
- self.paths = parser.get_paths()
- self.codes = parser.get_codes()
+ paths = parser.get_paths()
+ codes = parser.get_codes()
+ n_vocab = max(paths.keys()) + 1
+
+ self.paths = numpy.concatenate(
+ [paths[i] for i in range(n_vocab) if i in paths])
+ self.codes = numpy.concatenate(
+ [codes[i] for i in range(n_vocab) if i in codes])
+ begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)
+ begins[0] = 0
+ for i in range(0, n_vocab):
+ length = len(paths[i]) if i in paths else 0
+ begins[i + 1] = begins[i] + length
+ self.begins = begins
self.W = numpy.random.uniform(
-1, 1, (parser.size(), in_size)).astype(numpy.float32)
@@ -139,10 +151,11 @@
return numpy.array(loss),
def _forward_cpu_one(self, x, t):
- assert t in self.paths
+ begin = self.begins[t]
+ end = self.begins[t + 1]
- w = self.W[self.paths[t]]
- wxy = w.dot(x) * self.codes[t]
+ w = self.W[self.paths[begin:end]]
+ wxy = w.dot(x) * self.codes[begin:end]
loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))
return numpy.sum(loss)
@@ -155,10 +168,13 @@
return gx, None
def _backward_cpu_one(self, x, t, gloss):
- path = self.paths[t]
+ begin = self.begins[t]
+ end = self.begins[t + 1]
+
+ path = self.paths[begin:end]
w = self.W[path]
- wxy = w.dot(x) * self.codes[t]
- g = -gloss * self.codes[t] / (1.0 + numpy.exp(wxy))
+ wxy = w.dot(x) * self.codes[begin:end]
+ g = -gloss * self.codes[begin:end] / (1.0 + numpy.exp(wxy))
gx = g.dot(w)
gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))
self.gW[path] += gw
@@ -167,21 +183,16 @@
def to_gpu(self, device=None):
function.Function.to_gpu(self, device)
- n_vocab = max(self.paths.keys()) + 1
- paths = cuda.to_gpu(numpy.concatenate(
- [self.paths[i] for i in range(n_vocab) if i in self.paths]))
- codes = cuda.to_gpu(numpy.concatenate(
- [self.codes[i] for i in range(n_vocab) if i in self.codes]))
+ self.paths = cuda.to_gpu(self.paths, device)
+ self.codes = cuda.to_gpu(self.codes, device)
+ self.begins = cuda.to_gpu(self.begins, device)
- begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)
- begins[0] = 0
- for i in range(0, n_vocab):
- length = len(self.paths[i]) if i in self.paths else 0
- begins[i + 1] = begins[i] + length
+ def to_cpu(self):
+ function.Function.to_cpu(self)
- self.paths = paths
- self.codes = codes
- self.begins = cuda.to_gpu(begins)
+ self.paths = cuda.to_cpu(self.paths)
+ self.codes = cuda.to_cpu(self.codes)
+ self.begins = cuda.to_cpu(self.begins)
def forward_gpu(self, inputs):
x, t = inputs
| {"golden_diff": "diff --git a/chainer/functions/hierarchical_softmax.py b/chainer/functions/hierarchical_softmax.py\n--- a/chainer/functions/hierarchical_softmax.py\n+++ b/chainer/functions/hierarchical_softmax.py\n@@ -104,8 +104,20 @@\n def __init__(self, in_size, tree):\n parser = TreeParser()\n parser.parse(tree)\n- self.paths = parser.get_paths()\n- self.codes = parser.get_codes()\n+ paths = parser.get_paths()\n+ codes = parser.get_codes()\n+ n_vocab = max(paths.keys()) + 1\n+\n+ self.paths = numpy.concatenate(\n+ [paths[i] for i in range(n_vocab) if i in paths])\n+ self.codes = numpy.concatenate(\n+ [codes[i] for i in range(n_vocab) if i in codes])\n+ begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)\n+ begins[0] = 0\n+ for i in range(0, n_vocab):\n+ length = len(paths[i]) if i in paths else 0\n+ begins[i + 1] = begins[i] + length\n+ self.begins = begins\n \n self.W = numpy.random.uniform(\n -1, 1, (parser.size(), in_size)).astype(numpy.float32)\n@@ -139,10 +151,11 @@\n return numpy.array(loss),\n \n def _forward_cpu_one(self, x, t):\n- assert t in self.paths\n+ begin = self.begins[t]\n+ end = self.begins[t + 1]\n \n- w = self.W[self.paths[t]]\n- wxy = w.dot(x) * self.codes[t]\n+ w = self.W[self.paths[begin:end]]\n+ wxy = w.dot(x) * self.codes[begin:end]\n loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))\n return numpy.sum(loss)\n \n@@ -155,10 +168,13 @@\n return gx, None\n \n def _backward_cpu_one(self, x, t, gloss):\n- path = self.paths[t]\n+ begin = self.begins[t]\n+ end = self.begins[t + 1]\n+\n+ path = self.paths[begin:end]\n w = self.W[path]\n- wxy = w.dot(x) * self.codes[t]\n- g = -gloss * self.codes[t] / (1.0 + numpy.exp(wxy))\n+ wxy = w.dot(x) * self.codes[begin:end]\n+ g = -gloss * self.codes[begin:end] / (1.0 + numpy.exp(wxy))\n gx = g.dot(w)\n gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))\n self.gW[path] += gw\n@@ -167,21 +183,16 @@\n def to_gpu(self, device=None):\n function.Function.to_gpu(self, device)\n \n- n_vocab = max(self.paths.keys()) + 1\n- paths = cuda.to_gpu(numpy.concatenate(\n- [self.paths[i] for i in range(n_vocab) if i in self.paths]))\n- codes = cuda.to_gpu(numpy.concatenate(\n- [self.codes[i] for i in range(n_vocab) if i in self.codes]))\n+ self.paths = cuda.to_gpu(self.paths, device)\n+ self.codes = cuda.to_gpu(self.codes, device)\n+ self.begins = cuda.to_gpu(self.begins, device)\n \n- begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)\n- begins[0] = 0\n- for i in range(0, n_vocab):\n- length = len(self.paths[i]) if i in self.paths else 0\n- begins[i + 1] = begins[i] + length\n+ def to_cpu(self):\n+ function.Function.to_cpu(self)\n \n- self.paths = paths\n- self.codes = codes\n- self.begins = cuda.to_gpu(begins)\n+ self.paths = cuda.to_cpu(self.paths)\n+ self.codes = cuda.to_cpu(self.codes)\n+ self.begins = cuda.to_cpu(self.begins)\n \n def forward_gpu(self, inputs):\n x, t = inputs\n", "issue": "Hierarchical softmax doesn't have `to_cpu`\nSame problem as #276 \n\n", "before_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass TreeParser(object):\n\n def __init__(self):\n self.next_id = 0\n\n def size(self):\n return self.next_id\n\n def get_paths(self):\n return self.paths\n\n def get_codes(self):\n return self.codes\n\n def parse(self, tree):\n self.next_id = 0\n self.path = []\n self.code = []\n self.paths = {}\n self.codes = {}\n self._parse(tree)\n\n assert(len(self.path) == 0)\n assert(len(self.code) == 0)\n assert(len(self.paths) == len(self.codes))\n\n def _parse(self, node):\n if isinstance(node, tuple):\n # internal node\n if len(node) != 2:\n raise ValueError(\n 'All internal nodes must have two child nodes')\n left, right = node\n self.path.append(self.next_id)\n self.next_id += 1\n self.code.append(1.0)\n self._parse(left)\n\n self.code[-1] = -1.0\n self._parse(right)\n\n self.path.pop()\n self.code.pop()\n\n else:\n # leaf node\n self.paths[node] = numpy.array(self.path).astype(numpy.int32)\n self.codes[node] = numpy.array(self.code).astype(numpy.float32)\n\n\nclass BinaryHierarchicalSoftmax(function.Function):\n\n \"\"\"Implementation of hierarchical softmax (HSM).\n\n In natural language applications, vocabulary size is too large to use\n softmax loss.\n Instead, the hierarchical softmax uses product of sigmoid functions.\n It costs only :math:`O(\\log(n))` time where :math:`n` is the vocabulary\n size in average.\n\n At first a user need to prepare a binary tree whose each leaf is\n corresponding to a word in a vocabulary.\n When a word :math:`x` is given, exactly one path from the root of the tree\n to the leaf of the word exists.\n Let :math:`\\mbox{path}(x) = ((e_1, b_1), \\dots, (e_m, b_m))` be the path of\n :math:`x`, where :math:`e_i` is an index of :math:`i`-th internal node, and\n :math:`b_i \\in \\{-1, 1\\}` indicates direction to move at :math:`i`-th\n internal node (-1 is left, and 1 is right).\n Then, the probability of :math:`x` is given as below:\n\n .. math::\n\n P(x) &= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}P(b_i | e_i) \\\\\\\\\n &= \\prod_{(e_i, b_i) \\in \\mbox{path}(x)}\\sigma(b_i x^\\\\top\n w_{e_i}),\n\n where :math:`\\sigma(\\\\cdot)` is a sigmoid function, and :math:`w` is a\n weight matrix.\n\n This function costs :math:`O(\\log(n))` time as an average length of paths\n is :math:`O(\\log(n))`, and :math:`O(n)` memory as the number of internal\n nodes equals :math:`n - 1`.\n\n Args:\n in_size (int): Dimension of input vectors.\n tree: A binary tree made with tuples like `((1, 2), 3)`.\n\n See: Hierarchical Probabilistic Neural Network Language Model [Morin+,\n AISTAT2005].\n\n \"\"\"\n\n parameter_names = ('W',)\n gradient_names = ('gW',)\n\n def __init__(self, in_size, tree):\n parser = TreeParser()\n parser.parse(tree)\n self.paths = parser.get_paths()\n self.codes = parser.get_codes()\n\n self.W = numpy.random.uniform(\n -1, 1, (parser.size(), in_size)).astype(numpy.float32)\n self.gW = numpy.zeros(self.W.shape, numpy.float32)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n x_type.ndim == 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n x_type.shape[0] == t_type.shape[0]\n )\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n out_types.size() == 1,\n out_types[0].dtype == numpy.float32,\n out_types[0].ndim == 0\n )\n\n def forward_cpu(self, args):\n x, t = args\n\n loss = numpy.float32(0.0)\n for ix, it in six.moves.zip(x, t):\n loss += self._forward_cpu_one(ix, it)\n return numpy.array(loss),\n\n def _forward_cpu_one(self, x, t):\n assert t in self.paths\n\n w = self.W[self.paths[t]]\n wxy = w.dot(x) * self.codes[t]\n loss = numpy.logaddexp(0.0, -wxy) # == log(1 + exp(-wxy))\n return numpy.sum(loss)\n\n def backward_cpu(self, args, loss):\n x, t = args\n gloss, = loss\n gx = numpy.empty_like(x)\n for i, (ix, it) in enumerate(six.moves.zip(x, t)):\n gx[i] = self._backward_cpu_one(ix, it, gloss)\n return gx, None\n\n def _backward_cpu_one(self, x, t, gloss):\n path = self.paths[t]\n w = self.W[path]\n wxy = w.dot(x) * self.codes[t]\n g = -gloss * self.codes[t] / (1.0 + numpy.exp(wxy))\n gx = g.dot(w)\n gw = g.reshape((g.shape[0], 1)).dot(x.reshape(1, x.shape[0]))\n self.gW[path] += gw\n return gx\n\n def to_gpu(self, device=None):\n function.Function.to_gpu(self, device)\n\n n_vocab = max(self.paths.keys()) + 1\n paths = cuda.to_gpu(numpy.concatenate(\n [self.paths[i] for i in range(n_vocab) if i in self.paths]))\n codes = cuda.to_gpu(numpy.concatenate(\n [self.codes[i] for i in range(n_vocab) if i in self.codes]))\n\n begins = numpy.empty((n_vocab + 1,), dtype=numpy.int32)\n begins[0] = 0\n for i in range(0, n_vocab):\n length = len(self.paths[i]) if i in self.paths else 0\n begins[i + 1] = begins[i] + length\n\n self.paths = paths\n self.codes = codes\n self.begins = cuda.to_gpu(begins)\n\n def forward_gpu(self, inputs):\n x, t = inputs\n\n max_length = cuda.reduce(\n 'int* t, int* begins', 'begins[t[i] + 1] - begins[t[i]]',\n 'max(a,b)', '0', 'binary_hierarchical_softmax_max_length',\n numpy.int32\n )(t, self.begins)\n max_length = cuda.to_cpu(max_length)[()]\n\n length = max_length * x.shape[0]\n ls = cuda.empty((length,), dtype=numpy.float32)\n n_in = x.shape[1]\n wxy = cuda.empty((length,), dtype=numpy.float32)\n cuda.elementwise(\n '''float* ls, float* wxy, const float* x, const float* w,\n const int* ts, const int* paths, const float* codes,\n const int* begins, int c, int max_length''',\n '''\n int ind = i / max_length;\n int offset = i - ind * max_length;\n int t = ts[ind];\n\n int begin = begins[t];\n int length = begins[t + 1] - begins[t];\n\n if (offset < length) {\n int p = begin + offset;\n int node = paths[p];\n\n x = &x[ind * c];\n\n float wx = 0;\n for (int j = 0; j < c; ++j) {\n wx += w[node * c + j] * x[j];\n }\n wxy[i] = wx * codes[p];\n ls[i] = log(1 + exp(-wxy[i]));\n } else {\n ls[i] = 0;\n }\n ''',\n 'binary_hierarchical_softmax_forward'\n )(ls, wxy, x, self.W, t, self.paths, self.codes, self.begins,\n n_in, max_length)\n self.max_length = max_length\n self.wxy = wxy\n return cuda.gpuarray.sum(ls),\n\n def backward_gpu(self, inputs, loss):\n x, t = inputs\n gloss, = loss\n\n n_in = x.shape[1]\n gx = cuda.zeros_like(x)\n cuda.elementwise(\n '''const float* wxy, float* gx, float* gw, const float* x,\n const float* w, const int* ts, const int* paths,\n const float* codes, const int* begins,\n const float* gloss, int c, int max_length''',\n '''\n int ind = i / max_length;\n int offset = i - ind * max_length;\n int t = ts[ind];\n\n int begin = begins[t];\n int length = begins[t + 1] - begins[t];\n\n if (offset < length) {\n int p = begin + offset;\n int node = paths[p];\n float code = codes[p];\n gx = &gx[ind * c];\n x = &x[ind * c];\n\n float g = -*gloss * code / (1.0 + exp(wxy[i]));\n for (int j = 0; j < c; ++j) {\n atomicAdd(gx + j, g * w[node * c + j]);\n atomicAdd(gw + node * c + j, g * x[j]);\n }\n }\n ''',\n 'binary_hierarchical_softmax_bwd'\n )(self.wxy, gx, self.gW, x, self.W, t, self.paths, self.codes,\n self.begins, gloss, n_in, self.max_length)\n return gx, None\n\n\ndef create_huffman_tree(word_counts):\n \"\"\"Make a huffman tree from a dictionary containing word counts.\n\n This method creates a binary huffman tree, that is required for\n :class:`BinaryHierarchicalSoftmax`.\n For example, ``{0: 8, 1: 5, 2: 6, 3: 4}`` is converted to\n ``((3, 1), (2, 0))``.\n\n Args:\n word_counts (``dict`` of ``int`` key and ``int`` or ``float`` values.):\n Dictionary representing counts of words.\n\n Returns:\n Binary huffman tree with tuples and keys of ``word_coutns``.\n\n \"\"\"\n if len(word_counts) == 0:\n raise ValueError('Empty vocabulary')\n\n q = six.moves.queue.PriorityQueue()\n # Add unique id to each entry so that we can compare two entries with same\n # counts.\n # Note that itreitems randomly order the entries.\n for uid, (w, c) in enumerate(six.iteritems(word_counts)):\n q.put((c, uid, w))\n\n while q.qsize() >= 2:\n (count1, id1, word1) = q.get()\n (count2, id2, word2) = q.get()\n count = count1 + count2\n tree = (word1, word2)\n q.put((count, min(id1, id2), tree))\n\n return q.get()[2]\n", "path": "chainer/functions/hierarchical_softmax.py"}]} | 4,046 | 971 |
gh_patches_debug_1331 | rasdani/github-patches | git_diff | litestar-org__litestar-1773 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/dto/exceptions.py]
1 from __future__ import annotations
2
3 from litestar.exceptions import ImproperlyConfiguredException
4
5 __all__ = ("DTOException", "UnsupportedType")
6
7
8 class DTOException(ImproperlyConfiguredException):
9 """Base exception for DTO errors."""
10
11
12 class UnsupportedType(DTOException):
13 """Raised when a type is not supported by Litestar."""
14
[end of litestar/dto/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/dto/exceptions.py b/litestar/dto/exceptions.py
deleted file mode 100644
--- a/litestar/dto/exceptions.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from __future__ import annotations
-
-from litestar.exceptions import ImproperlyConfiguredException
-
-__all__ = ("DTOException", "UnsupportedType")
-
-
-class DTOException(ImproperlyConfiguredException):
- """Base exception for DTO errors."""
-
-
-class UnsupportedType(DTOException):
- """Raised when a type is not supported by Litestar."""
| {"golden_diff": "diff --git a/litestar/dto/exceptions.py b/litestar/dto/exceptions.py\ndeleted file mode 100644\n--- a/litestar/dto/exceptions.py\n+++ /dev/null\n@@ -1,13 +0,0 @@\n-from __future__ import annotations\n-\n-from litestar.exceptions import ImproperlyConfiguredException\n-\n-__all__ = (\"DTOException\", \"UnsupportedType\")\n-\n-\n-class DTOException(ImproperlyConfiguredException):\n- \"\"\"Base exception for DTO errors.\"\"\"\n-\n-\n-class UnsupportedType(DTOException):\n- \"\"\"Raised when a type is not supported by Litestar.\"\"\"\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom litestar.exceptions import ImproperlyConfiguredException\n\n__all__ = (\"DTOException\", \"UnsupportedType\")\n\n\nclass DTOException(ImproperlyConfiguredException):\n \"\"\"Base exception for DTO errors.\"\"\"\n\n\nclass UnsupportedType(DTOException):\n \"\"\"Raised when a type is not supported by Litestar.\"\"\"\n", "path": "litestar/dto/exceptions.py"}]} | 801 | 139 |
gh_patches_debug_34145 | rasdani/github-patches | git_diff | scikit-hep__awkward-1795 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`ak.run_lengths` does not handle leading/trailing empty sublists
### Version of Awkward Array
main
### Description and code to reproduce
This also exists in v1.
The following code raises an `Exception`:
```python
import awkward as ak
import numpy as np
layout = ak.contents.ListOffsetArray(
ak.index.Index(np.array([0, 2, 4, 4], dtype=np.int64)),
ak.contents.NumpyArray(
np.arange(4)
)
)
ak.run_lengths(layout)
```
Also, we should use `regularize_numpyarray` to handle `NumpyArray`, which currently just raises an Exception.
</issue>
<code>
[start of src/awkward/operations/ak_run_lengths.py]
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 import awkward as ak
4
5 np = ak.nplikes.NumpyMetadata.instance()
6
7
8 def run_lengths(array, highlevel=True, behavior=None):
9
10 """
11 Args:
12 array: Data containing runs of numbers to count.
13 highlevel (bool): If True, return an #ak.Array; otherwise, return
14 a low-level #ak.contents.Content subclass.
15 behavior (None or dict): Custom #ak.behavior for the output array, if
16 high-level.
17
18 Computes the lengths of sequences of identical values at the deepest level
19 of nesting, returning an array with the same structure but with `int64` type.
20
21 For example,
22
23 >>> array = ak.Array([1.1, 1.1, 1.1, 2.2, 3.3, 3.3, 4.4, 4.4, 5.5])
24 >>> ak.run_lengths(array)
25 <Array [3, 1, 2, 2, 1] type='5 * int64'>
26
27 There are 3 instances of 1.1, followed by 1 instance of 2.2, 2 instances of 3.3,
28 2 instances of 4.4, and 1 instance of 5.5.
29
30 The order and uniqueness of the input data doesn't matter,
31
32 >>> array = ak.Array([1.1, 1.1, 1.1, 5.5, 4.4, 4.4, 1.1, 1.1, 5.5])
33 >>> ak.run_lengths(array)
34 <Array [3, 1, 2, 2, 1] type='5 * int64'>
35
36 just the difference between each value and its neighbors.
37
38 The data can be nested, but runs don't cross list boundaries.
39
40 >>> array = ak.Array([[1.1, 1.1, 1.1, 2.2, 3.3], [3.3, 4.4], [4.4, 5.5]])
41 >>> ak.run_lengths(array)
42 <Array [[3, 1, 1], [1, 1], [1, 1]] type='3 * var * int64'>
43
44 This function recognizes strings as distinguishable values.
45
46 >>> array = ak.Array([["one", "one"], ["one", "two", "two"], ["three", "two", "two"]])
47 >>> ak.run_lengths(array)
48 <Array [[2], [1, 2], [1, 2]] type='3 * var * int64'>
49
50 Note that this can be combined with #ak.argsort and #ak.unflatten to compute
51 a "group by" operation:
52
53 >>> array = ak.Array([{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 1, "y": 1.1},
54 ... {"x": 3, "y": 3.3}, {"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}])
55 >>> sorted = array[ak.argsort(array.x)]
56 >>> sorted.x
57 <Array [1, 1, 1, 2, 2, 3] type='6 * int64'>
58 >>> ak.run_lengths(sorted.x)
59 <Array [3, 2, 1] type='3 * int64'>
60 >>> ak.unflatten(sorted, ak.run_lengths(sorted.x)).tolist()
61 [[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],
62 [{'x': 2, 'y': 2.2}, {'x': 2, 'y': 2.2}],
63 [{'x': 3, 'y': 3.3}]]
64
65 Unlike a database "group by," this operation can be applied in bulk to many sublists
66 (though the run lengths need to be fully flattened to be used as `counts` for
67 #ak.unflatten, and you need to specify `axis=-1` as the depth).
68
69 >>> array = ak.Array([[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 1, "y": 1.1}],
70 ... [{"x": 3, "y": 3.3}, {"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}]])
71 >>> sorted = array[ak.argsort(array.x)]
72 >>> sorted.x
73 <Array [[1, 1, 2], [1, 2, 3]] type='2 * var * int64'>
74 >>> ak.run_lengths(sorted.x)
75 <Array [[2, 1], [1, 1, 1]] type='2 * var * int64'>
76 >>> counts = ak.flatten(ak.run_lengths(sorted.x), axis=None)
77 >>> ak.unflatten(sorted, counts, axis=-1).tolist()
78 [[[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],
79 [{'x': 2, 'y': 2.2}]],
80 [[{'x': 1, 'y': 1.1}],
81 [{'x': 2, 'y': 2.2}],
82 [{'x': 3, 'y': 3.3}]]]
83
84 See also #ak.num, #ak.argsort, #ak.unflatten.
85 """
86 with ak._errors.OperationErrorContext(
87 "ak.run_lengths",
88 dict(
89 array=array,
90 highlevel=highlevel,
91 behavior=behavior,
92 ),
93 ):
94 return _impl(array, highlevel, behavior)
95
96
97 def _impl(array, highlevel, behavior):
98 nplike = ak.nplikes.nplike_of(array)
99
100 def lengths_of(data, offsets):
101 if len(data) == 0:
102 return nplike.index_nplike.empty(0, np.int64), offsets
103 else:
104 diffs = data[1:] != data[:-1]
105
106 if isinstance(diffs, ak.highlevel.Array):
107 diffs = nplike.asarray(diffs)
108 if offsets is not None:
109 diffs[offsets[1:-1] - 1] = True
110 positions = nplike.index_nplike.nonzero(diffs)[0]
111 full_positions = nplike.index_nplike.empty(len(positions) + 2, np.int64)
112 full_positions[0] = 0
113 full_positions[-1] = len(data)
114 full_positions[1:-1] = positions + 1
115
116 nextcontent = full_positions[1:] - full_positions[:-1]
117 if offsets is None:
118 nextoffsets = None
119 else:
120 nextoffsets = nplike.index_nplike.searchsorted(
121 full_positions, offsets, side="left"
122 )
123 return nextcontent, nextoffsets
124
125 def action(layout, **kwargs):
126 if layout.branch_depth == (False, 1):
127 if layout.is_IndexedType:
128 layout = layout.project()
129
130 if (
131 layout.parameter("__array__") == "string"
132 or layout.parameter("__array__") == "bytestring"
133 ):
134 nextcontent, _ = lengths_of(ak.highlevel.Array(layout), None)
135 return ak.contents.NumpyArray(nextcontent)
136
137 if not isinstance(layout, (ak.contents.NumpyArray, ak.contents.EmptyArray)):
138 raise ak._errors.wrap_error(
139 NotImplementedError("run_lengths on " + type(layout).__name__)
140 )
141
142 nextcontent, _ = lengths_of(nplike.asarray(layout), None)
143 return ak.contents.NumpyArray(nextcontent)
144
145 elif layout.branch_depth == (False, 2):
146 if layout.is_IndexedType:
147 layout = layout.project()
148
149 if not layout.is_ListType:
150 raise ak._errors.wrap_error(
151 NotImplementedError("run_lengths on " + type(layout).__name__)
152 )
153
154 if (
155 layout.content.parameter("__array__") == "string"
156 or layout.content.parameter("__array__") == "bytestring"
157 ):
158 listoffsetarray = layout.toListOffsetArray64(False)
159 offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)
160 content = listoffsetarray.content[offsets[0] : offsets[-1]]
161
162 if content.is_IndexedType:
163 content = content.project()
164
165 nextcontent, nextoffsets = lengths_of(
166 ak.highlevel.Array(content), offsets - offsets[0]
167 )
168 return ak.contents.ListOffsetArray(
169 ak.index.Index64(nextoffsets),
170 ak.contents.NumpyArray(nextcontent),
171 )
172
173 listoffsetarray = layout.toListOffsetArray64(False)
174 offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)
175 content = listoffsetarray.content[offsets[0] : offsets[-1]]
176
177 if content.is_IndexedType:
178 content = content.project()
179
180 if not isinstance(
181 content, (ak.contents.NumpyArray, ak.contents.EmptyArray)
182 ):
183 raise ak._errors.wrap_error(
184 NotImplementedError(
185 "run_lengths on "
186 + type(layout).__name__
187 + " with content "
188 + type(content).__name__
189 )
190 )
191
192 nextcontent, nextoffsets = lengths_of(
193 nplike.asarray(content), offsets - offsets[0]
194 )
195 return ak.contents.ListOffsetArray(
196 ak.index.Index64(nextoffsets),
197 ak.contents.NumpyArray(nextcontent),
198 )
199 else:
200 return None
201
202 layout = ak.operations.to_layout(array, allow_record=False, allow_other=False)
203
204 out = layout.recursively_apply(action, behavior)
205
206 return ak._util.wrap(out, behavior, highlevel)
207
[end of src/awkward/operations/ak_run_lengths.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/awkward/operations/ak_run_lengths.py b/src/awkward/operations/ak_run_lengths.py
--- a/src/awkward/operations/ak_run_lengths.py
+++ b/src/awkward/operations/ak_run_lengths.py
@@ -105,8 +105,21 @@
if isinstance(diffs, ak.highlevel.Array):
diffs = nplike.asarray(diffs)
+ # Do we have list boundaries to consider?
if offsets is not None:
- diffs[offsets[1:-1] - 1] = True
+ # When checking to see whether one element equals its following neighbour
+ # we also want to break runs at list boundaries. The comparison for offset `i`
+ # occurs at `i-1` in `diffs`. Consider this example with an empty sublist:
+ # data = [1 1 2 2 2 3 4 4 5 ]
+ # offsets = [0 6 9 9]
+ # (data) diffs = [ 0 1 0 0 1 1 0 1 ]
+ # diffs index = [ 0 1 2 3 4 5 6 7 ]
+ # boundary diff ^
+ # To consider only the interior boundaries, we ignore the start and end
+ # offset values. These can be repeated with empty sublists, so we mask them out.
+ is_interior = nplike.logical_and(0 < offsets, offsets < len(data) - 1)
+ interior_offsets = offsets[is_interior]
+ diffs[interior_offsets - 1] = True
positions = nplike.index_nplike.nonzero(diffs)[0]
full_positions = nplike.index_nplike.empty(len(positions) + 2, np.int64)
full_positions[0] = 0
@@ -155,6 +168,8 @@
layout.content.parameter("__array__") == "string"
or layout.content.parameter("__array__") == "bytestring"
):
+ # We also want to trim the _upper_ bound of content,
+ # so we manually convert the list type to zero-based
listoffsetarray = layout.toListOffsetArray64(False)
offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)
content = listoffsetarray.content[offsets[0] : offsets[-1]]
| {"golden_diff": "diff --git a/src/awkward/operations/ak_run_lengths.py b/src/awkward/operations/ak_run_lengths.py\n--- a/src/awkward/operations/ak_run_lengths.py\n+++ b/src/awkward/operations/ak_run_lengths.py\n@@ -105,8 +105,21 @@\n \n if isinstance(diffs, ak.highlevel.Array):\n diffs = nplike.asarray(diffs)\n+ # Do we have list boundaries to consider?\n if offsets is not None:\n- diffs[offsets[1:-1] - 1] = True\n+ # When checking to see whether one element equals its following neighbour\n+ # we also want to break runs at list boundaries. The comparison for offset `i`\n+ # occurs at `i-1` in `diffs`. Consider this example with an empty sublist:\n+ # data = [1 1 2 2 2 3 4 4 5 ]\n+ # offsets = [0 6 9 9]\n+ # (data) diffs = [ 0 1 0 0 1 1 0 1 ]\n+ # diffs index = [ 0 1 2 3 4 5 6 7 ]\n+ # boundary diff ^\n+ # To consider only the interior boundaries, we ignore the start and end\n+ # offset values. These can be repeated with empty sublists, so we mask them out.\n+ is_interior = nplike.logical_and(0 < offsets, offsets < len(data) - 1)\n+ interior_offsets = offsets[is_interior]\n+ diffs[interior_offsets - 1] = True\n positions = nplike.index_nplike.nonzero(diffs)[0]\n full_positions = nplike.index_nplike.empty(len(positions) + 2, np.int64)\n full_positions[0] = 0\n@@ -155,6 +168,8 @@\n layout.content.parameter(\"__array__\") == \"string\"\n or layout.content.parameter(\"__array__\") == \"bytestring\"\n ):\n+ # We also want to trim the _upper_ bound of content,\n+ # so we manually convert the list type to zero-based\n listoffsetarray = layout.toListOffsetArray64(False)\n offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)\n content = listoffsetarray.content[offsets[0] : offsets[-1]]\n", "issue": "`ak.run_lengths` does not handle leading/trailing empty sublists\n### Version of Awkward Array\r\n\r\nmain\r\n\r\n### Description and code to reproduce\r\n\r\nThis also exists in v1.\r\n\r\nThe following code raises an `Exception`:\r\n```python\r\nimport awkward as ak\r\nimport numpy as np\r\n\r\n\r\nlayout = ak.contents.ListOffsetArray(\r\n ak.index.Index(np.array([0, 2, 4, 4], dtype=np.int64)),\r\n ak.contents.NumpyArray(\r\n np.arange(4)\r\n )\r\n)\r\nak.run_lengths(layout)\r\n```\r\n\r\nAlso, we should use `regularize_numpyarray` to handle `NumpyArray`, which currently just raises an Exception.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport awkward as ak\n\nnp = ak.nplikes.NumpyMetadata.instance()\n\n\ndef run_lengths(array, highlevel=True, behavior=None):\n\n \"\"\"\n Args:\n array: Data containing runs of numbers to count.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Computes the lengths of sequences of identical values at the deepest level\n of nesting, returning an array with the same structure but with `int64` type.\n\n For example,\n\n >>> array = ak.Array([1.1, 1.1, 1.1, 2.2, 3.3, 3.3, 4.4, 4.4, 5.5])\n >>> ak.run_lengths(array)\n <Array [3, 1, 2, 2, 1] type='5 * int64'>\n\n There are 3 instances of 1.1, followed by 1 instance of 2.2, 2 instances of 3.3,\n 2 instances of 4.4, and 1 instance of 5.5.\n\n The order and uniqueness of the input data doesn't matter,\n\n >>> array = ak.Array([1.1, 1.1, 1.1, 5.5, 4.4, 4.4, 1.1, 1.1, 5.5])\n >>> ak.run_lengths(array)\n <Array [3, 1, 2, 2, 1] type='5 * int64'>\n\n just the difference between each value and its neighbors.\n\n The data can be nested, but runs don't cross list boundaries.\n\n >>> array = ak.Array([[1.1, 1.1, 1.1, 2.2, 3.3], [3.3, 4.4], [4.4, 5.5]])\n >>> ak.run_lengths(array)\n <Array [[3, 1, 1], [1, 1], [1, 1]] type='3 * var * int64'>\n\n This function recognizes strings as distinguishable values.\n\n >>> array = ak.Array([[\"one\", \"one\"], [\"one\", \"two\", \"two\"], [\"three\", \"two\", \"two\"]])\n >>> ak.run_lengths(array)\n <Array [[2], [1, 2], [1, 2]] type='3 * var * int64'>\n\n Note that this can be combined with #ak.argsort and #ak.unflatten to compute\n a \"group by\" operation:\n\n >>> array = ak.Array([{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 1, \"y\": 1.1},\n ... {\"x\": 3, \"y\": 3.3}, {\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}])\n >>> sorted = array[ak.argsort(array.x)]\n >>> sorted.x\n <Array [1, 1, 1, 2, 2, 3] type='6 * int64'>\n >>> ak.run_lengths(sorted.x)\n <Array [3, 2, 1] type='3 * int64'>\n >>> ak.unflatten(sorted, ak.run_lengths(sorted.x)).tolist()\n [[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],\n [{'x': 2, 'y': 2.2}, {'x': 2, 'y': 2.2}],\n [{'x': 3, 'y': 3.3}]]\n\n Unlike a database \"group by,\" this operation can be applied in bulk to many sublists\n (though the run lengths need to be fully flattened to be used as `counts` for\n #ak.unflatten, and you need to specify `axis=-1` as the depth).\n\n >>> array = ak.Array([[{\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}, {\"x\": 1, \"y\": 1.1}],\n ... [{\"x\": 3, \"y\": 3.3}, {\"x\": 1, \"y\": 1.1}, {\"x\": 2, \"y\": 2.2}]])\n >>> sorted = array[ak.argsort(array.x)]\n >>> sorted.x\n <Array [[1, 1, 2], [1, 2, 3]] type='2 * var * int64'>\n >>> ak.run_lengths(sorted.x)\n <Array [[2, 1], [1, 1, 1]] type='2 * var * int64'>\n >>> counts = ak.flatten(ak.run_lengths(sorted.x), axis=None)\n >>> ak.unflatten(sorted, counts, axis=-1).tolist()\n [[[{'x': 1, 'y': 1.1}, {'x': 1, 'y': 1.1}],\n [{'x': 2, 'y': 2.2}]],\n [[{'x': 1, 'y': 1.1}],\n [{'x': 2, 'y': 2.2}],\n [{'x': 3, 'y': 3.3}]]]\n\n See also #ak.num, #ak.argsort, #ak.unflatten.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.run_lengths\",\n dict(\n array=array,\n highlevel=highlevel,\n behavior=behavior,\n ),\n ):\n return _impl(array, highlevel, behavior)\n\n\ndef _impl(array, highlevel, behavior):\n nplike = ak.nplikes.nplike_of(array)\n\n def lengths_of(data, offsets):\n if len(data) == 0:\n return nplike.index_nplike.empty(0, np.int64), offsets\n else:\n diffs = data[1:] != data[:-1]\n\n if isinstance(diffs, ak.highlevel.Array):\n diffs = nplike.asarray(diffs)\n if offsets is not None:\n diffs[offsets[1:-1] - 1] = True\n positions = nplike.index_nplike.nonzero(diffs)[0]\n full_positions = nplike.index_nplike.empty(len(positions) + 2, np.int64)\n full_positions[0] = 0\n full_positions[-1] = len(data)\n full_positions[1:-1] = positions + 1\n\n nextcontent = full_positions[1:] - full_positions[:-1]\n if offsets is None:\n nextoffsets = None\n else:\n nextoffsets = nplike.index_nplike.searchsorted(\n full_positions, offsets, side=\"left\"\n )\n return nextcontent, nextoffsets\n\n def action(layout, **kwargs):\n if layout.branch_depth == (False, 1):\n if layout.is_IndexedType:\n layout = layout.project()\n\n if (\n layout.parameter(\"__array__\") == \"string\"\n or layout.parameter(\"__array__\") == \"bytestring\"\n ):\n nextcontent, _ = lengths_of(ak.highlevel.Array(layout), None)\n return ak.contents.NumpyArray(nextcontent)\n\n if not isinstance(layout, (ak.contents.NumpyArray, ak.contents.EmptyArray)):\n raise ak._errors.wrap_error(\n NotImplementedError(\"run_lengths on \" + type(layout).__name__)\n )\n\n nextcontent, _ = lengths_of(nplike.asarray(layout), None)\n return ak.contents.NumpyArray(nextcontent)\n\n elif layout.branch_depth == (False, 2):\n if layout.is_IndexedType:\n layout = layout.project()\n\n if not layout.is_ListType:\n raise ak._errors.wrap_error(\n NotImplementedError(\"run_lengths on \" + type(layout).__name__)\n )\n\n if (\n layout.content.parameter(\"__array__\") == \"string\"\n or layout.content.parameter(\"__array__\") == \"bytestring\"\n ):\n listoffsetarray = layout.toListOffsetArray64(False)\n offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)\n content = listoffsetarray.content[offsets[0] : offsets[-1]]\n\n if content.is_IndexedType:\n content = content.project()\n\n nextcontent, nextoffsets = lengths_of(\n ak.highlevel.Array(content), offsets - offsets[0]\n )\n return ak.contents.ListOffsetArray(\n ak.index.Index64(nextoffsets),\n ak.contents.NumpyArray(nextcontent),\n )\n\n listoffsetarray = layout.toListOffsetArray64(False)\n offsets = nplike.index_nplike.asarray(listoffsetarray.offsets)\n content = listoffsetarray.content[offsets[0] : offsets[-1]]\n\n if content.is_IndexedType:\n content = content.project()\n\n if not isinstance(\n content, (ak.contents.NumpyArray, ak.contents.EmptyArray)\n ):\n raise ak._errors.wrap_error(\n NotImplementedError(\n \"run_lengths on \"\n + type(layout).__name__\n + \" with content \"\n + type(content).__name__\n )\n )\n\n nextcontent, nextoffsets = lengths_of(\n nplike.asarray(content), offsets - offsets[0]\n )\n return ak.contents.ListOffsetArray(\n ak.index.Index64(nextoffsets),\n ak.contents.NumpyArray(nextcontent),\n )\n else:\n return None\n\n layout = ak.operations.to_layout(array, allow_record=False, allow_other=False)\n\n out = layout.recursively_apply(action, behavior)\n\n return ak._util.wrap(out, behavior, highlevel)\n", "path": "src/awkward/operations/ak_run_lengths.py"}]} | 3,415 | 553 |
gh_patches_debug_13767 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1504 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pending follow requests still show up in suggested users
An additional signal is needed to remove users from suggestions when a follow request is created
</issue>
<code>
[start of bookwyrm/suggested_users.py]
1 """ store recommended follows in redis """
2 import math
3 import logging
4 from django.dispatch import receiver
5 from django.db.models import signals, Count, Q
6
7 from bookwyrm import models
8 from bookwyrm.redis_store import RedisStore, r
9 from bookwyrm.tasks import app
10
11
12 logger = logging.getLogger(__name__)
13
14
15 class SuggestedUsers(RedisStore):
16 """suggested users for a user"""
17
18 max_length = 30
19
20 def get_rank(self, obj):
21 """get computed rank"""
22 return obj.mutuals # + (1.0 - (1.0 / (obj.shared_books + 1)))
23
24 def store_id(self, user): # pylint: disable=no-self-use
25 """the key used to store this user's recs"""
26 if isinstance(user, int):
27 return f"{user}-suggestions"
28 return f"{user.id}-suggestions"
29
30 def get_counts_from_rank(self, rank): # pylint: disable=no-self-use
31 """calculate mutuals count and shared books count from rank"""
32 return {
33 "mutuals": math.floor(rank),
34 # "shared_books": int(1 / (-1 * (rank % 1 - 1))) - 1,
35 }
36
37 def get_objects_for_store(self, store):
38 """a list of potential follows for a user"""
39 user = models.User.objects.get(id=store.split("-")[0])
40
41 return get_annotated_users(
42 user,
43 ~Q(id=user.id),
44 ~Q(followers=user),
45 ~Q(follower_requests=user),
46 bookwyrm_user=True,
47 )
48
49 def get_stores_for_object(self, obj):
50 return [self.store_id(u) for u in self.get_users_for_object(obj)]
51
52 def get_users_for_object(self, obj): # pylint: disable=no-self-use
53 """given a user, who might want to follow them"""
54 return models.User.objects.filter(local=True,).exclude(
55 Q(id=obj.id) | Q(followers=obj) | Q(id__in=obj.blocks.all()) | Q(blocks=obj)
56 )
57
58 def rerank_obj(self, obj, update_only=True):
59 """update all the instances of this user with new ranks"""
60 pipeline = r.pipeline()
61 for store_user in self.get_users_for_object(obj):
62 annotated_user = get_annotated_users(
63 store_user,
64 id=obj.id,
65 ).first()
66 if not annotated_user:
67 continue
68
69 pipeline.zadd(
70 self.store_id(store_user),
71 self.get_value(annotated_user),
72 xx=update_only,
73 )
74 pipeline.execute()
75
76 def rerank_user_suggestions(self, user):
77 """update the ranks of the follows suggested to a user"""
78 self.populate_store(self.store_id(user))
79
80 def remove_suggestion(self, user, suggested_user):
81 """take a user out of someone's suggestions"""
82 self.bulk_remove_objects_from_store([suggested_user], self.store_id(user))
83
84 def get_suggestions(self, user):
85 """get suggestions"""
86 values = self.get_store(self.store_id(user), withscores=True)
87 results = []
88 # annotate users with mutuals and shared book counts
89 for user_id, rank in values:
90 counts = self.get_counts_from_rank(rank)
91 try:
92 user = models.User.objects.get(
93 id=user_id, is_active=True, bookwyrm_user=True
94 )
95 except models.User.DoesNotExist as err:
96 # if this happens, the suggestions are janked way up
97 logger.exception(err)
98 continue
99 user.mutuals = counts["mutuals"]
100 # user.shared_books = counts["shared_books"]
101 results.append(user)
102 if len(results) >= 5:
103 break
104 return results
105
106
107 def get_annotated_users(viewer, *args, **kwargs):
108 """Users, annotated with things they have in common"""
109 return (
110 models.User.objects.filter(discoverable=True, is_active=True, *args, **kwargs)
111 .exclude(Q(id__in=viewer.blocks.all()) | Q(blocks=viewer))
112 .annotate(
113 mutuals=Count(
114 "followers",
115 filter=Q(
116 ~Q(id=viewer.id),
117 ~Q(id__in=viewer.following.all()),
118 followers__in=viewer.following.all(),
119 ),
120 distinct=True,
121 ),
122 # shared_books=Count(
123 # "shelfbook",
124 # filter=Q(
125 # ~Q(id=viewer.id),
126 # shelfbook__book__parent_work__in=[
127 # s.book.parent_work for s in viewer.shelfbook_set.all()
128 # ],
129 # ),
130 # distinct=True,
131 # ),
132 )
133 )
134
135
136 suggested_users = SuggestedUsers()
137
138
139 @receiver(signals.post_save, sender=models.UserFollows)
140 # pylint: disable=unused-argument
141 def update_suggestions_on_follow(sender, instance, created, *args, **kwargs):
142 """remove a follow from the recs and update the ranks"""
143 if not created or not instance.user_object.discoverable:
144 return
145
146 if instance.user_subject.local:
147 remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)
148 rerank_user_task.delay(instance.user_object.id, update_only=False)
149
150
151 @receiver(signals.post_save, sender=models.UserBlocks)
152 # pylint: disable=unused-argument
153 def update_suggestions_on_block(sender, instance, *args, **kwargs):
154 """remove blocked users from recs"""
155 if instance.user_subject.local and instance.user_object.discoverable:
156 remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)
157 if instance.user_object.local and instance.user_subject.discoverable:
158 remove_suggestion_task.delay(instance.user_object.id, instance.user_subject.id)
159
160
161 @receiver(signals.post_delete, sender=models.UserFollows)
162 # pylint: disable=unused-argument
163 def update_suggestions_on_unfollow(sender, instance, **kwargs):
164 """update rankings, but don't re-suggest because it was probably intentional"""
165 if instance.user_object.discoverable:
166 rerank_user_task.delay(instance.user_object.id, update_only=False)
167
168
169 # @receiver(signals.post_save, sender=models.ShelfBook)
170 # @receiver(signals.post_delete, sender=models.ShelfBook)
171 # # pylint: disable=unused-argument
172 # def update_rank_on_shelving(sender, instance, *args, **kwargs):
173 # """when a user shelves or unshelves a book, re-compute their rank"""
174 # # if it's a local user, re-calculate who is rec'ed to them
175 # if instance.user.local:
176 # rerank_suggestions_task.delay(instance.user.id)
177 #
178 # # if the user is discoverable, update their rankings
179 # if instance.user.discoverable:
180 # rerank_user_task.delay(instance.user.id)
181
182
183 @receiver(signals.post_save, sender=models.User)
184 # pylint: disable=unused-argument, too-many-arguments
185 def update_user(sender, instance, created, update_fields=None, **kwargs):
186 """an updated user, neat"""
187 # a new user is found, create suggestions for them
188 if created and instance.local:
189 rerank_suggestions_task.delay(instance.id)
190
191 # we know what fields were updated and discoverability didn't change
192 if not instance.bookwyrm_user or (
193 update_fields and not "discoverable" in update_fields
194 ):
195 return
196
197 # deleted the user
198 if not created and not instance.is_active:
199 remove_user_task.delay(instance.id)
200 return
201
202 # this happens on every save, not just when discoverability changes, annoyingly
203 if instance.discoverable:
204 rerank_user_task.delay(instance.id, update_only=False)
205 elif not created:
206 remove_user_task.delay(instance.id)
207
208
209 @receiver(signals.post_save, sender=models.FederatedServer)
210 def domain_level_update(sender, instance, created, update_fields=None, **kwargs):
211 """remove users on a domain block"""
212 if (
213 not update_fields
214 or "status" not in update_fields
215 or instance.application_type != "bookwyrm"
216 ):
217 return
218
219 if instance.status == "blocked":
220 bulk_remove_instance_task.delay(instance.id)
221 return
222 bulk_add_instance_task.delay(instance.id)
223
224
225 # ------------------- TASKS
226
227
228 @app.task(queue="low_priority")
229 def rerank_suggestions_task(user_id):
230 """do the hard work in celery"""
231 suggested_users.rerank_user_suggestions(user_id)
232
233
234 @app.task(queue="low_priority")
235 def rerank_user_task(user_id, update_only=False):
236 """do the hard work in celery"""
237 user = models.User.objects.get(id=user_id)
238 suggested_users.rerank_obj(user, update_only=update_only)
239
240
241 @app.task(queue="low_priority")
242 def remove_user_task(user_id):
243 """do the hard work in celery"""
244 user = models.User.objects.get(id=user_id)
245 suggested_users.remove_object_from_related_stores(user)
246
247
248 @app.task(queue="medium_priority")
249 def remove_suggestion_task(user_id, suggested_user_id):
250 """remove a specific user from a specific user's suggestions"""
251 suggested_user = models.User.objects.get(id=suggested_user_id)
252 suggested_users.remove_suggestion(user_id, suggested_user)
253
254
255 @app.task(queue="low_priority")
256 def bulk_remove_instance_task(instance_id):
257 """remove a bunch of users from recs"""
258 for user in models.User.objects.filter(federated_server__id=instance_id):
259 suggested_users.remove_object_from_related_stores(user)
260
261
262 @app.task(queue="low_priority")
263 def bulk_add_instance_task(instance_id):
264 """remove a bunch of users from recs"""
265 for user in models.User.objects.filter(federated_server__id=instance_id):
266 suggested_users.rerank_obj(user, update_only=False)
267
[end of bookwyrm/suggested_users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/suggested_users.py b/bookwyrm/suggested_users.py
--- a/bookwyrm/suggested_users.py
+++ b/bookwyrm/suggested_users.py
@@ -148,6 +148,17 @@
rerank_user_task.delay(instance.user_object.id, update_only=False)
+@receiver(signals.post_save, sender=models.UserFollowRequest)
+# pylint: disable=unused-argument
+def update_suggestions_on_follow_request(sender, instance, created, *args, **kwargs):
+ """remove a follow from the recs and update the ranks"""
+ if not created or not instance.user_object.discoverable:
+ return
+
+ if instance.user_subject.local:
+ remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)
+
+
@receiver(signals.post_save, sender=models.UserBlocks)
# pylint: disable=unused-argument
def update_suggestions_on_block(sender, instance, *args, **kwargs):
| {"golden_diff": "diff --git a/bookwyrm/suggested_users.py b/bookwyrm/suggested_users.py\n--- a/bookwyrm/suggested_users.py\n+++ b/bookwyrm/suggested_users.py\n@@ -148,6 +148,17 @@\n rerank_user_task.delay(instance.user_object.id, update_only=False)\n \n \n+@receiver(signals.post_save, sender=models.UserFollowRequest)\n+# pylint: disable=unused-argument\n+def update_suggestions_on_follow_request(sender, instance, created, *args, **kwargs):\n+ \"\"\"remove a follow from the recs and update the ranks\"\"\"\n+ if not created or not instance.user_object.discoverable:\n+ return\n+\n+ if instance.user_subject.local:\n+ remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)\n+\n+\n @receiver(signals.post_save, sender=models.UserBlocks)\n # pylint: disable=unused-argument\n def update_suggestions_on_block(sender, instance, *args, **kwargs):\n", "issue": "Pending follow requests still show up in suggested users\nAn additional signal is needed to remove users from suggestions when a follow request is created\n", "before_files": [{"content": "\"\"\" store recommended follows in redis \"\"\"\nimport math\nimport logging\nfrom django.dispatch import receiver\nfrom django.db.models import signals, Count, Q\n\nfrom bookwyrm import models\nfrom bookwyrm.redis_store import RedisStore, r\nfrom bookwyrm.tasks import app\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SuggestedUsers(RedisStore):\n \"\"\"suggested users for a user\"\"\"\n\n max_length = 30\n\n def get_rank(self, obj):\n \"\"\"get computed rank\"\"\"\n return obj.mutuals # + (1.0 - (1.0 / (obj.shared_books + 1)))\n\n def store_id(self, user): # pylint: disable=no-self-use\n \"\"\"the key used to store this user's recs\"\"\"\n if isinstance(user, int):\n return f\"{user}-suggestions\"\n return f\"{user.id}-suggestions\"\n\n def get_counts_from_rank(self, rank): # pylint: disable=no-self-use\n \"\"\"calculate mutuals count and shared books count from rank\"\"\"\n return {\n \"mutuals\": math.floor(rank),\n # \"shared_books\": int(1 / (-1 * (rank % 1 - 1))) - 1,\n }\n\n def get_objects_for_store(self, store):\n \"\"\"a list of potential follows for a user\"\"\"\n user = models.User.objects.get(id=store.split(\"-\")[0])\n\n return get_annotated_users(\n user,\n ~Q(id=user.id),\n ~Q(followers=user),\n ~Q(follower_requests=user),\n bookwyrm_user=True,\n )\n\n def get_stores_for_object(self, obj):\n return [self.store_id(u) for u in self.get_users_for_object(obj)]\n\n def get_users_for_object(self, obj): # pylint: disable=no-self-use\n \"\"\"given a user, who might want to follow them\"\"\"\n return models.User.objects.filter(local=True,).exclude(\n Q(id=obj.id) | Q(followers=obj) | Q(id__in=obj.blocks.all()) | Q(blocks=obj)\n )\n\n def rerank_obj(self, obj, update_only=True):\n \"\"\"update all the instances of this user with new ranks\"\"\"\n pipeline = r.pipeline()\n for store_user in self.get_users_for_object(obj):\n annotated_user = get_annotated_users(\n store_user,\n id=obj.id,\n ).first()\n if not annotated_user:\n continue\n\n pipeline.zadd(\n self.store_id(store_user),\n self.get_value(annotated_user),\n xx=update_only,\n )\n pipeline.execute()\n\n def rerank_user_suggestions(self, user):\n \"\"\"update the ranks of the follows suggested to a user\"\"\"\n self.populate_store(self.store_id(user))\n\n def remove_suggestion(self, user, suggested_user):\n \"\"\"take a user out of someone's suggestions\"\"\"\n self.bulk_remove_objects_from_store([suggested_user], self.store_id(user))\n\n def get_suggestions(self, user):\n \"\"\"get suggestions\"\"\"\n values = self.get_store(self.store_id(user), withscores=True)\n results = []\n # annotate users with mutuals and shared book counts\n for user_id, rank in values:\n counts = self.get_counts_from_rank(rank)\n try:\n user = models.User.objects.get(\n id=user_id, is_active=True, bookwyrm_user=True\n )\n except models.User.DoesNotExist as err:\n # if this happens, the suggestions are janked way up\n logger.exception(err)\n continue\n user.mutuals = counts[\"mutuals\"]\n # user.shared_books = counts[\"shared_books\"]\n results.append(user)\n if len(results) >= 5:\n break\n return results\n\n\ndef get_annotated_users(viewer, *args, **kwargs):\n \"\"\"Users, annotated with things they have in common\"\"\"\n return (\n models.User.objects.filter(discoverable=True, is_active=True, *args, **kwargs)\n .exclude(Q(id__in=viewer.blocks.all()) | Q(blocks=viewer))\n .annotate(\n mutuals=Count(\n \"followers\",\n filter=Q(\n ~Q(id=viewer.id),\n ~Q(id__in=viewer.following.all()),\n followers__in=viewer.following.all(),\n ),\n distinct=True,\n ),\n # shared_books=Count(\n # \"shelfbook\",\n # filter=Q(\n # ~Q(id=viewer.id),\n # shelfbook__book__parent_work__in=[\n # s.book.parent_work for s in viewer.shelfbook_set.all()\n # ],\n # ),\n # distinct=True,\n # ),\n )\n )\n\n\nsuggested_users = SuggestedUsers()\n\n\n@receiver(signals.post_save, sender=models.UserFollows)\n# pylint: disable=unused-argument\ndef update_suggestions_on_follow(sender, instance, created, *args, **kwargs):\n \"\"\"remove a follow from the recs and update the ranks\"\"\"\n if not created or not instance.user_object.discoverable:\n return\n\n if instance.user_subject.local:\n remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)\n rerank_user_task.delay(instance.user_object.id, update_only=False)\n\n\n@receiver(signals.post_save, sender=models.UserBlocks)\n# pylint: disable=unused-argument\ndef update_suggestions_on_block(sender, instance, *args, **kwargs):\n \"\"\"remove blocked users from recs\"\"\"\n if instance.user_subject.local and instance.user_object.discoverable:\n remove_suggestion_task.delay(instance.user_subject.id, instance.user_object.id)\n if instance.user_object.local and instance.user_subject.discoverable:\n remove_suggestion_task.delay(instance.user_object.id, instance.user_subject.id)\n\n\n@receiver(signals.post_delete, sender=models.UserFollows)\n# pylint: disable=unused-argument\ndef update_suggestions_on_unfollow(sender, instance, **kwargs):\n \"\"\"update rankings, but don't re-suggest because it was probably intentional\"\"\"\n if instance.user_object.discoverable:\n rerank_user_task.delay(instance.user_object.id, update_only=False)\n\n\n# @receiver(signals.post_save, sender=models.ShelfBook)\n# @receiver(signals.post_delete, sender=models.ShelfBook)\n# # pylint: disable=unused-argument\n# def update_rank_on_shelving(sender, instance, *args, **kwargs):\n# \"\"\"when a user shelves or unshelves a book, re-compute their rank\"\"\"\n# # if it's a local user, re-calculate who is rec'ed to them\n# if instance.user.local:\n# rerank_suggestions_task.delay(instance.user.id)\n#\n# # if the user is discoverable, update their rankings\n# if instance.user.discoverable:\n# rerank_user_task.delay(instance.user.id)\n\n\n@receiver(signals.post_save, sender=models.User)\n# pylint: disable=unused-argument, too-many-arguments\ndef update_user(sender, instance, created, update_fields=None, **kwargs):\n \"\"\"an updated user, neat\"\"\"\n # a new user is found, create suggestions for them\n if created and instance.local:\n rerank_suggestions_task.delay(instance.id)\n\n # we know what fields were updated and discoverability didn't change\n if not instance.bookwyrm_user or (\n update_fields and not \"discoverable\" in update_fields\n ):\n return\n\n # deleted the user\n if not created and not instance.is_active:\n remove_user_task.delay(instance.id)\n return\n\n # this happens on every save, not just when discoverability changes, annoyingly\n if instance.discoverable:\n rerank_user_task.delay(instance.id, update_only=False)\n elif not created:\n remove_user_task.delay(instance.id)\n\n\n@receiver(signals.post_save, sender=models.FederatedServer)\ndef domain_level_update(sender, instance, created, update_fields=None, **kwargs):\n \"\"\"remove users on a domain block\"\"\"\n if (\n not update_fields\n or \"status\" not in update_fields\n or instance.application_type != \"bookwyrm\"\n ):\n return\n\n if instance.status == \"blocked\":\n bulk_remove_instance_task.delay(instance.id)\n return\n bulk_add_instance_task.delay(instance.id)\n\n\n# ------------------- TASKS\n\n\[email protected](queue=\"low_priority\")\ndef rerank_suggestions_task(user_id):\n \"\"\"do the hard work in celery\"\"\"\n suggested_users.rerank_user_suggestions(user_id)\n\n\[email protected](queue=\"low_priority\")\ndef rerank_user_task(user_id, update_only=False):\n \"\"\"do the hard work in celery\"\"\"\n user = models.User.objects.get(id=user_id)\n suggested_users.rerank_obj(user, update_only=update_only)\n\n\[email protected](queue=\"low_priority\")\ndef remove_user_task(user_id):\n \"\"\"do the hard work in celery\"\"\"\n user = models.User.objects.get(id=user_id)\n suggested_users.remove_object_from_related_stores(user)\n\n\[email protected](queue=\"medium_priority\")\ndef remove_suggestion_task(user_id, suggested_user_id):\n \"\"\"remove a specific user from a specific user's suggestions\"\"\"\n suggested_user = models.User.objects.get(id=suggested_user_id)\n suggested_users.remove_suggestion(user_id, suggested_user)\n\n\[email protected](queue=\"low_priority\")\ndef bulk_remove_instance_task(instance_id):\n \"\"\"remove a bunch of users from recs\"\"\"\n for user in models.User.objects.filter(federated_server__id=instance_id):\n suggested_users.remove_object_from_related_stores(user)\n\n\[email protected](queue=\"low_priority\")\ndef bulk_add_instance_task(instance_id):\n \"\"\"remove a bunch of users from recs\"\"\"\n for user in models.User.objects.filter(federated_server__id=instance_id):\n suggested_users.rerank_obj(user, update_only=False)\n", "path": "bookwyrm/suggested_users.py"}]} | 3,381 | 212 |
gh_patches_debug_47653 | rasdani/github-patches | git_diff | DataBiosphere__toil-4528 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
WES ignores host in production
When trying to run `toil server --host 0.0.0.0`, I noticed that it would always only listen on `127.0.0.1` no matter what `--host` is set to but running with `--debug` didn't have this problem.
```
❯ toil server --host 0.0.0.0
...
[2022-11-11 16:50:46 +0000] [7173] [INFO] Starting gunicorn 20.1.0
[2022-11-11 16:50:46 +0000] [7173] [INFO] Listening at: http://127.0.0.1:8000
...
```
vs
```
❯ toil server --host 0.0.0.0 --debug
...
INFO:werkzeug:WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
* Running on all addresses (0.0.0.0)
* Running on http://127.0.0.1:8080
...
```
I tracked the problem down to [this line](https://github.com/DataBiosphere/toil/blob/master/src/toil/server/wsgi_app.py#L44). It appears to be overwriting the settings taken from the command line with Gunicorn's defaults before checking to see if anything has been set which `bind` won't be as it's been set to `None` in the merge.
Swapping the dictionaries around seems to have fixed it.
```python
for key, value in {**vars(env_args), **self.options}.items():
```
┆Issue is synchronized with this [Jira Story](https://ucsc-cgl.atlassian.net/browse/TOIL-1242)
┆Issue Number: TOIL-1242
</issue>
<code>
[start of src/toil/server/wsgi_app.py]
1 # Copyright (C) 2015-2021 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Dict, Optional
15
16 from gunicorn.app.base import BaseApplication # type: ignore
17
18
19 class GunicornApplication(BaseApplication): # type: ignore
20 """
21 An entry point to integrate a Gunicorn WSGI server in Python. To start a
22 WSGI application with callable `app`, run the following code:
23
24 WSGIApplication(app, options={
25 ...
26 }).run()
27
28 For more details, see: https://docs.gunicorn.org/en/latest/custom.html
29 """
30 def __init__(self, app: object, options: Optional[Dict[str, Any]] = None):
31 self.options = options or {}
32 self.application = app
33 super().__init__()
34
35 def init(self, *args: Any) -> None:
36 pass
37
38 def load_config(self) -> None:
39 parser = self.cfg.parser()
40 env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())
41
42 # TODO: also read from the Gunicorn config file?
43
44 for key, value in {**self.options, **vars(env_args)}.items():
45 if key in self.cfg.settings and value is not None:
46 self.cfg.set(key.lower(), value)
47
48 def load(self) -> object:
49 return self.application
50
51
52 def run_app(app: object, options: Optional[Dict[str, Any]] = None) -> None:
53 """
54 Run a Gunicorn WSGI server.
55 """
56 GunicornApplication(app, options=options).run()
57
[end of src/toil/server/wsgi_app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/toil/server/wsgi_app.py b/src/toil/server/wsgi_app.py
--- a/src/toil/server/wsgi_app.py
+++ b/src/toil/server/wsgi_app.py
@@ -41,7 +41,7 @@
# TODO: also read from the Gunicorn config file?
- for key, value in {**self.options, **vars(env_args)}.items():
+ for key, value in {**vars(env_args), **self.options}.items():
if key in self.cfg.settings and value is not None:
self.cfg.set(key.lower(), value)
| {"golden_diff": "diff --git a/src/toil/server/wsgi_app.py b/src/toil/server/wsgi_app.py\n--- a/src/toil/server/wsgi_app.py\n+++ b/src/toil/server/wsgi_app.py\n@@ -41,7 +41,7 @@\n \n # TODO: also read from the Gunicorn config file?\n \n- for key, value in {**self.options, **vars(env_args)}.items():\n+ for key, value in {**vars(env_args), **self.options}.items():\n if key in self.cfg.settings and value is not None:\n self.cfg.set(key.lower(), value)\n", "issue": "WES ignores host in production\nWhen trying to run `toil server --host 0.0.0.0`, I noticed that it would always only listen on `127.0.0.1` no matter what `--host` is set to but running with `--debug` didn't have this problem.\n\n```\n\u276f toil server --host 0.0.0.0\n...\n[2022-11-11 16:50:46 +0000] [7173] [INFO] Starting gunicorn 20.1.0\n[2022-11-11 16:50:46 +0000] [7173] [INFO] Listening at: http://127.0.0.1:8000\n...\n```\nvs\n```\n\u276f toil server --host 0.0.0.0 --debug\n...\nINFO:werkzeug:WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.\n * Running on all addresses (0.0.0.0)\n * Running on http://127.0.0.1:8080\n...\n```\n\nI tracked the problem down to [this line](https://github.com/DataBiosphere/toil/blob/master/src/toil/server/wsgi_app.py#L44). It appears to be overwriting the settings taken from the command line with Gunicorn's defaults before checking to see if anything has been set which `bind` won't be as it's been set to `None` in the merge.\n\nSwapping the dictionaries around seems to have fixed it.\n```python\n for key, value in {**vars(env_args), **self.options}.items():\n```\n\n\u2506Issue is synchronized with this [Jira Story](https://ucsc-cgl.atlassian.net/browse/TOIL-1242)\n\u2506Issue Number: TOIL-1242\n\n", "before_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Dict, Optional\n\nfrom gunicorn.app.base import BaseApplication # type: ignore\n\n\nclass GunicornApplication(BaseApplication): # type: ignore\n \"\"\"\n An entry point to integrate a Gunicorn WSGI server in Python. To start a\n WSGI application with callable `app`, run the following code:\n\n WSGIApplication(app, options={\n ...\n }).run()\n\n For more details, see: https://docs.gunicorn.org/en/latest/custom.html\n \"\"\"\n def __init__(self, app: object, options: Optional[Dict[str, Any]] = None):\n self.options = options or {}\n self.application = app\n super().__init__()\n\n def init(self, *args: Any) -> None:\n pass\n\n def load_config(self) -> None:\n parser = self.cfg.parser()\n env_args = parser.parse_args(self.cfg.get_cmd_args_from_env())\n\n # TODO: also read from the Gunicorn config file?\n\n for key, value in {**self.options, **vars(env_args)}.items():\n if key in self.cfg.settings and value is not None:\n self.cfg.set(key.lower(), value)\n\n def load(self) -> object:\n return self.application\n\n\ndef run_app(app: object, options: Optional[Dict[str, Any]] = None) -> None:\n \"\"\"\n Run a Gunicorn WSGI server.\n \"\"\"\n GunicornApplication(app, options=options).run()\n", "path": "src/toil/server/wsgi_app.py"}]} | 1,550 | 131 |
gh_patches_debug_40009 | rasdani/github-patches | git_diff | Textualize__textual-1051 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Button.watch_variant does not remove old_variant class
When changing the variant of a Button widget, the new variant gets added to the classes, but the old one is not removed.
Minimal example:
```python
from textual.app import App, ComposeResult
from textual.widgets import Button
class MyApp(App):
def __init__(self):
super().__init__()
self.button = Button()
self.button.variant = "warning"
self.button.variant = "default"
def compose(self) -> ComposeResult:
yield self.button
MyApp().run()
```
This still displays the button in "warning" style as it has both classes.
I believe the problem is the underscore instead of a dash in the watch_variant method.
https://github.com/Textualize/textual/blob/main/src/textual/widgets/_button.py#L218
textual version 0.2.1
terminal: xterm-256color on linux ubuntu 22.04 (Regolith)
</issue>
<code>
[start of src/textual/widgets/_button.py]
1 from __future__ import annotations
2
3 import sys
4 from functools import partial
5 from typing import cast
6
7 if sys.version_info >= (3, 8):
8 from typing import Literal
9 else:
10 from typing_extensions import Literal # pragma: no cover
11
12 import rich.repr
13 from rich.console import RenderableType
14 from rich.text import Text, TextType
15
16 from .. import events
17 from ..css._error_tools import friendly_list
18 from ..message import Message
19 from ..reactive import Reactive
20 from ..widgets import Static
21
22 ButtonVariant = Literal["default", "primary", "success", "warning", "error"]
23 _VALID_BUTTON_VARIANTS = {"default", "primary", "success", "warning", "error"}
24
25
26 class InvalidButtonVariant(Exception):
27 pass
28
29
30 class Button(Static, can_focus=True):
31 """A simple clickable button."""
32
33 DEFAULT_CSS = """
34 Button {
35 width: auto;
36 min-width: 16;
37 height: 3;
38 background: $panel;
39 color: $text;
40 border: none;
41 border-top: tall $panel-lighten-2;
42 border-bottom: tall $panel-darken-3;
43 content-align: center middle;
44 text-style: bold;
45 }
46
47 Button.-disabled {
48 opacity: 0.4;
49 text-opacity: 0.7;
50 }
51
52 Button:focus {
53 text-style: bold reverse;
54 }
55
56 Button:hover {
57 border-top: tall $panel-lighten-1;
58 background: $panel-darken-2;
59 color: $text;
60 }
61
62 Button.-active {
63 background: $panel;
64 border-bottom: tall $panel-lighten-2;
65 border-top: tall $panel-darken-2;
66 tint: $background 30%;
67 }
68
69 /* Primary variant */
70 Button.-primary {
71 background: $primary;
72 color: $text;
73 border-top: tall $primary-lighten-3;
74 border-bottom: tall $primary-darken-3;
75
76 }
77
78 Button.-primary:hover {
79 background: $primary-darken-2;
80 color: $text;
81 border-top: tall $primary-lighten-2;
82 }
83
84 Button.-primary.-active {
85 background: $primary;
86 border-bottom: tall $primary-lighten-3;
87 border-top: tall $primary-darken-3;
88 }
89
90
91 /* Success variant */
92 Button.-success {
93 background: $success;
94 color: $text;
95 border-top: tall $success-lighten-2;
96 border-bottom: tall $success-darken-3;
97 }
98
99 Button.-success:hover {
100 background: $success-darken-2;
101 color: $text;
102 }
103
104 Button.-success.-active {
105 background: $success;
106 border-bottom: tall $success-lighten-2;
107 border-top: tall $success-darken-2;
108 }
109
110
111 /* Warning variant */
112 Button.-warning {
113 background: $warning;
114 color: $text;
115 border-top: tall $warning-lighten-2;
116 border-bottom: tall $warning-darken-3;
117 }
118
119 Button.-warning:hover {
120 background: $warning-darken-2;
121 color: $text;
122
123 }
124
125 Button.-warning.-active {
126 background: $warning;
127 border-bottom: tall $warning-lighten-2;
128 border-top: tall $warning-darken-2;
129 }
130
131
132 /* Error variant */
133 Button.-error {
134 background: $error;
135 color: $text;
136 border-top: tall $error-lighten-2;
137 border-bottom: tall $error-darken-3;
138
139 }
140
141 Button.-error:hover {
142 background: $error-darken-1;
143 color: $text;
144
145 }
146
147 Button.-error.-active {
148 background: $error;
149 border-bottom: tall $error-lighten-2;
150 border-top: tall $error-darken-2;
151 }
152
153 """
154
155 ACTIVE_EFFECT_DURATION = 0.3
156 """When buttons are clicked they get the `-active` class for this duration (in seconds)"""
157
158 class Pressed(Message, bubble=True):
159 @property
160 def button(self) -> Button:
161 return cast(Button, self.sender)
162
163 def __init__(
164 self,
165 label: TextType | None = None,
166 disabled: bool = False,
167 variant: ButtonVariant = "default",
168 *,
169 name: str | None = None,
170 id: str | None = None,
171 classes: str | None = None,
172 ):
173 """Create a Button widget.
174
175 Args:
176 label (str): The text that appears within the button.
177 disabled (bool): Whether the button is disabled or not.
178 variant (ButtonVariant): The variant of the button.
179 name: The name of the button.
180 id: The ID of the button in the DOM.
181 classes: The CSS classes of the button.
182 """
183 super().__init__(name=name, id=id, classes=classes)
184
185 if label is None:
186 label = self.css_identifier_styled
187
188 self.label = self.validate_label(label)
189
190 self.disabled = disabled
191 if disabled:
192 self.add_class("-disabled")
193
194 self.variant = variant
195
196 label: Reactive[RenderableType] = Reactive("")
197 variant = Reactive.init("default")
198 disabled = Reactive(False)
199
200 def __rich_repr__(self) -> rich.repr.Result:
201 yield from super().__rich_repr__()
202 yield "variant", self.variant, "default"
203 yield "disabled", self.disabled, False
204
205 def watch_mouse_over(self, value: bool) -> None:
206 """Update from CSS if mouse over state changes."""
207 if self._has_hover_style and not self.disabled:
208 self.app.update_styles(self)
209
210 def validate_variant(self, variant: str) -> str:
211 if variant not in _VALID_BUTTON_VARIANTS:
212 raise InvalidButtonVariant(
213 f"Valid button variants are {friendly_list(_VALID_BUTTON_VARIANTS)}"
214 )
215 return variant
216
217 def watch_variant(self, old_variant: str, variant: str):
218 self.remove_class(f"_{old_variant}")
219 self.add_class(f"-{variant}")
220
221 def watch_disabled(self, disabled: bool) -> None:
222 self.set_class(disabled, "-disabled")
223 self.can_focus = not disabled
224
225 def validate_label(self, label: RenderableType) -> RenderableType:
226 """Parse markup for self.label"""
227 if isinstance(label, str):
228 return Text.from_markup(label)
229 return label
230
231 def render(self) -> RenderableType:
232 label = self.label.copy()
233 label = Text.assemble(" ", label, " ")
234 label.stylize(self.text_style)
235 return label
236
237 async def _on_click(self, event: events.Click) -> None:
238 event.stop()
239 self.press()
240
241 def press(self) -> None:
242 """Respond to a button press."""
243 if self.disabled or not self.display:
244 return
245 # Manage the "active" effect:
246 self._start_active_affect()
247 # ...and let other components know that we've just been clicked:
248 self.emit_no_wait(Button.Pressed(self))
249
250 def _start_active_affect(self) -> None:
251 """Start a small animation to show the button was clicked."""
252 self.add_class("-active")
253 self.set_timer(
254 self.ACTIVE_EFFECT_DURATION, partial(self.remove_class, "-active")
255 )
256
257 async def _on_key(self, event: events.Key) -> None:
258 if event.key == "enter" and not self.disabled:
259 self._start_active_affect()
260 await self.emit(Button.Pressed(self))
261
262 @classmethod
263 def success(
264 cls,
265 label: TextType | None = None,
266 disabled: bool = False,
267 *,
268 name: str | None = None,
269 id: str | None = None,
270 classes: str | None = None,
271 ) -> Button:
272 """Utility constructor for creating a success Button variant.
273
274 Args:
275 label (str): The text that appears within the button.
276 disabled (bool): Whether the button is disabled or not.
277 name: The name of the button.
278 id: The ID of the button in the DOM.
279 classes: The CSS classes of the button.
280
281 Returns:
282 Button: A Button widget of the 'success' variant.
283 """
284 return Button(
285 label=label,
286 disabled=disabled,
287 variant="success",
288 name=name,
289 id=id,
290 classes=classes,
291 )
292
293 @classmethod
294 def warning(
295 cls,
296 label: TextType | None = None,
297 disabled: bool = False,
298 *,
299 name: str | None = None,
300 id: str | None = None,
301 classes: str | None = None,
302 ) -> Button:
303 """Utility constructor for creating a warning Button variant.
304
305 Args:
306 label (str): The text that appears within the button.
307 disabled (bool): Whether the button is disabled or not.
308 name: The name of the button.
309 id: The ID of the button in the DOM.
310 classes: The CSS classes of the button.
311
312 Returns:
313 Button: A Button widget of the 'warning' variant.
314 """
315 return Button(
316 label=label,
317 disabled=disabled,
318 variant="warning",
319 name=name,
320 id=id,
321 classes=classes,
322 )
323
324 @classmethod
325 def error(
326 cls,
327 label: TextType | None = None,
328 disabled: bool = False,
329 *,
330 name: str | None = None,
331 id: str | None = None,
332 classes: str | None = None,
333 ) -> Button:
334 """Utility constructor for creating an error Button variant.
335
336 Args:
337 label (str): The text that appears within the button.
338 disabled (bool): Whether the button is disabled or not.
339 name: The name of the button.
340 id: The ID of the button in the DOM.
341 classes: The CSS classes of the button.
342
343 Returns:
344 Button: A Button widget of the 'error' variant.
345 """
346 return Button(
347 label=label,
348 disabled=disabled,
349 variant="error",
350 name=name,
351 id=id,
352 classes=classes,
353 )
354
[end of src/textual/widgets/_button.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/textual/widgets/_button.py b/src/textual/widgets/_button.py
--- a/src/textual/widgets/_button.py
+++ b/src/textual/widgets/_button.py
@@ -36,12 +36,12 @@
min-width: 16;
height: 3;
background: $panel;
- color: $text;
- border: none;
+ color: $text;
+ border: none;
border-top: tall $panel-lighten-2;
border-bottom: tall $panel-darken-3;
- content-align: center middle;
- text-style: bold;
+ content-align: center middle;
+ text-style: bold;
}
Button.-disabled {
@@ -50,7 +50,7 @@
}
Button:focus {
- text-style: bold reverse;
+ text-style: bold reverse;
}
Button:hover {
@@ -62,7 +62,7 @@
Button.-active {
background: $panel;
border-bottom: tall $panel-lighten-2;
- border-top: tall $panel-darken-2;
+ border-top: tall $panel-darken-2;
tint: $background 30%;
}
@@ -72,7 +72,7 @@
color: $text;
border-top: tall $primary-lighten-3;
border-bottom: tall $primary-darken-3;
-
+
}
Button.-primary:hover {
@@ -107,7 +107,7 @@
border-top: tall $success-darken-2;
}
-
+
/* Warning variant */
Button.-warning {
background: $warning;
@@ -119,7 +119,7 @@
Button.-warning:hover {
background: $warning-darken-2;
color: $text;
-
+
}
Button.-warning.-active {
@@ -127,7 +127,7 @@
border-bottom: tall $warning-lighten-2;
border-top: tall $warning-darken-2;
}
-
+
/* Error variant */
Button.-error {
@@ -135,7 +135,7 @@
color: $text;
border-top: tall $error-lighten-2;
border-bottom: tall $error-darken-3;
-
+
}
Button.-error:hover {
@@ -215,7 +215,7 @@
return variant
def watch_variant(self, old_variant: str, variant: str):
- self.remove_class(f"_{old_variant}")
+ self.remove_class(f"-{old_variant}")
self.add_class(f"-{variant}")
def watch_disabled(self, disabled: bool) -> None:
| {"golden_diff": "diff --git a/src/textual/widgets/_button.py b/src/textual/widgets/_button.py\n--- a/src/textual/widgets/_button.py\n+++ b/src/textual/widgets/_button.py\n@@ -36,12 +36,12 @@\n min-width: 16;\n height: 3;\n background: $panel;\n- color: $text; \n- border: none; \n+ color: $text;\n+ border: none;\n border-top: tall $panel-lighten-2;\n border-bottom: tall $panel-darken-3;\n- content-align: center middle; \n- text-style: bold; \n+ content-align: center middle;\n+ text-style: bold;\n }\n \n Button.-disabled {\n@@ -50,7 +50,7 @@\n }\n \n Button:focus {\n- text-style: bold reverse; \n+ text-style: bold reverse;\n }\n \n Button:hover {\n@@ -62,7 +62,7 @@\n Button.-active {\n background: $panel;\n border-bottom: tall $panel-lighten-2;\n- border-top: tall $panel-darken-2; \n+ border-top: tall $panel-darken-2;\n tint: $background 30%;\n }\n \n@@ -72,7 +72,7 @@\n color: $text;\n border-top: tall $primary-lighten-3;\n border-bottom: tall $primary-darken-3;\n- \n+\n }\n \n Button.-primary:hover {\n@@ -107,7 +107,7 @@\n border-top: tall $success-darken-2;\n }\n \n- \n+\n /* Warning variant */\n Button.-warning {\n background: $warning;\n@@ -119,7 +119,7 @@\n Button.-warning:hover {\n background: $warning-darken-2;\n color: $text;\n- \n+\n }\n \n Button.-warning.-active {\n@@ -127,7 +127,7 @@\n border-bottom: tall $warning-lighten-2;\n border-top: tall $warning-darken-2;\n }\n- \n+\n \n /* Error variant */\n Button.-error {\n@@ -135,7 +135,7 @@\n color: $text;\n border-top: tall $error-lighten-2;\n border-bottom: tall $error-darken-3;\n- \n+\n }\n \n Button.-error:hover {\n@@ -215,7 +215,7 @@\n return variant\n \n def watch_variant(self, old_variant: str, variant: str):\n- self.remove_class(f\"_{old_variant}\")\n+ self.remove_class(f\"-{old_variant}\")\n self.add_class(f\"-{variant}\")\n \n def watch_disabled(self, disabled: bool) -> None:\n", "issue": "Button.watch_variant does not remove old_variant class\nWhen changing the variant of a Button widget, the new variant gets added to the classes, but the old one is not removed.\r\nMinimal example:\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.widgets import Button\r\n\r\n\r\nclass MyApp(App):\r\n\tdef __init__(self):\r\n\t\tsuper().__init__()\r\n\t\tself.button = Button()\r\n\t\tself.button.variant = \"warning\"\r\n\t\tself.button.variant = \"default\"\r\n\r\n\tdef compose(self) -> ComposeResult:\r\n\t\tyield self.button\r\n\r\nMyApp().run()\r\n```\r\nThis still displays the button in \"warning\" style as it has both classes.\r\nI believe the problem is the underscore instead of a dash in the watch_variant method.\r\nhttps://github.com/Textualize/textual/blob/main/src/textual/widgets/_button.py#L218\r\n\r\ntextual version 0.2.1\r\nterminal: xterm-256color on linux ubuntu 22.04 (Regolith)\n", "before_files": [{"content": "from __future__ import annotations\n\nimport sys\nfrom functools import partial\nfrom typing import cast\n\nif sys.version_info >= (3, 8):\n from typing import Literal\nelse:\n from typing_extensions import Literal # pragma: no cover\n\nimport rich.repr\nfrom rich.console import RenderableType\nfrom rich.text import Text, TextType\n\nfrom .. import events\nfrom ..css._error_tools import friendly_list\nfrom ..message import Message\nfrom ..reactive import Reactive\nfrom ..widgets import Static\n\nButtonVariant = Literal[\"default\", \"primary\", \"success\", \"warning\", \"error\"]\n_VALID_BUTTON_VARIANTS = {\"default\", \"primary\", \"success\", \"warning\", \"error\"}\n\n\nclass InvalidButtonVariant(Exception):\n pass\n\n\nclass Button(Static, can_focus=True):\n \"\"\"A simple clickable button.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Button {\n width: auto;\n min-width: 16;\n height: 3;\n background: $panel;\n color: $text; \n border: none; \n border-top: tall $panel-lighten-2;\n border-bottom: tall $panel-darken-3;\n content-align: center middle; \n text-style: bold; \n }\n\n Button.-disabled {\n opacity: 0.4;\n text-opacity: 0.7;\n }\n\n Button:focus {\n text-style: bold reverse; \n }\n\n Button:hover {\n border-top: tall $panel-lighten-1;\n background: $panel-darken-2;\n color: $text;\n }\n\n Button.-active {\n background: $panel;\n border-bottom: tall $panel-lighten-2;\n border-top: tall $panel-darken-2; \n tint: $background 30%;\n }\n\n /* Primary variant */\n Button.-primary {\n background: $primary;\n color: $text;\n border-top: tall $primary-lighten-3;\n border-bottom: tall $primary-darken-3;\n \n }\n\n Button.-primary:hover {\n background: $primary-darken-2;\n color: $text;\n border-top: tall $primary-lighten-2;\n }\n\n Button.-primary.-active {\n background: $primary;\n border-bottom: tall $primary-lighten-3;\n border-top: tall $primary-darken-3;\n }\n\n\n /* Success variant */\n Button.-success {\n background: $success;\n color: $text;\n border-top: tall $success-lighten-2;\n border-bottom: tall $success-darken-3;\n }\n\n Button.-success:hover {\n background: $success-darken-2;\n color: $text;\n }\n\n Button.-success.-active {\n background: $success;\n border-bottom: tall $success-lighten-2;\n border-top: tall $success-darken-2;\n }\n\n \n /* Warning variant */\n Button.-warning {\n background: $warning;\n color: $text;\n border-top: tall $warning-lighten-2;\n border-bottom: tall $warning-darken-3;\n }\n\n Button.-warning:hover {\n background: $warning-darken-2;\n color: $text;\n \n }\n\n Button.-warning.-active {\n background: $warning;\n border-bottom: tall $warning-lighten-2;\n border-top: tall $warning-darken-2;\n }\n \n\n /* Error variant */\n Button.-error {\n background: $error;\n color: $text;\n border-top: tall $error-lighten-2;\n border-bottom: tall $error-darken-3;\n \n }\n\n Button.-error:hover {\n background: $error-darken-1;\n color: $text;\n\n }\n\n Button.-error.-active {\n background: $error;\n border-bottom: tall $error-lighten-2;\n border-top: tall $error-darken-2;\n }\n\n \"\"\"\n\n ACTIVE_EFFECT_DURATION = 0.3\n \"\"\"When buttons are clicked they get the `-active` class for this duration (in seconds)\"\"\"\n\n class Pressed(Message, bubble=True):\n @property\n def button(self) -> Button:\n return cast(Button, self.sender)\n\n def __init__(\n self,\n label: TextType | None = None,\n disabled: bool = False,\n variant: ButtonVariant = \"default\",\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ):\n \"\"\"Create a Button widget.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n variant (ButtonVariant): The variant of the button.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n \"\"\"\n super().__init__(name=name, id=id, classes=classes)\n\n if label is None:\n label = self.css_identifier_styled\n\n self.label = self.validate_label(label)\n\n self.disabled = disabled\n if disabled:\n self.add_class(\"-disabled\")\n\n self.variant = variant\n\n label: Reactive[RenderableType] = Reactive(\"\")\n variant = Reactive.init(\"default\")\n disabled = Reactive(False)\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield from super().__rich_repr__()\n yield \"variant\", self.variant, \"default\"\n yield \"disabled\", self.disabled, False\n\n def watch_mouse_over(self, value: bool) -> None:\n \"\"\"Update from CSS if mouse over state changes.\"\"\"\n if self._has_hover_style and not self.disabled:\n self.app.update_styles(self)\n\n def validate_variant(self, variant: str) -> str:\n if variant not in _VALID_BUTTON_VARIANTS:\n raise InvalidButtonVariant(\n f\"Valid button variants are {friendly_list(_VALID_BUTTON_VARIANTS)}\"\n )\n return variant\n\n def watch_variant(self, old_variant: str, variant: str):\n self.remove_class(f\"_{old_variant}\")\n self.add_class(f\"-{variant}\")\n\n def watch_disabled(self, disabled: bool) -> None:\n self.set_class(disabled, \"-disabled\")\n self.can_focus = not disabled\n\n def validate_label(self, label: RenderableType) -> RenderableType:\n \"\"\"Parse markup for self.label\"\"\"\n if isinstance(label, str):\n return Text.from_markup(label)\n return label\n\n def render(self) -> RenderableType:\n label = self.label.copy()\n label = Text.assemble(\" \", label, \" \")\n label.stylize(self.text_style)\n return label\n\n async def _on_click(self, event: events.Click) -> None:\n event.stop()\n self.press()\n\n def press(self) -> None:\n \"\"\"Respond to a button press.\"\"\"\n if self.disabled or not self.display:\n return\n # Manage the \"active\" effect:\n self._start_active_affect()\n # ...and let other components know that we've just been clicked:\n self.emit_no_wait(Button.Pressed(self))\n\n def _start_active_affect(self) -> None:\n \"\"\"Start a small animation to show the button was clicked.\"\"\"\n self.add_class(\"-active\")\n self.set_timer(\n self.ACTIVE_EFFECT_DURATION, partial(self.remove_class, \"-active\")\n )\n\n async def _on_key(self, event: events.Key) -> None:\n if event.key == \"enter\" and not self.disabled:\n self._start_active_affect()\n await self.emit(Button.Pressed(self))\n\n @classmethod\n def success(\n cls,\n label: TextType | None = None,\n disabled: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> Button:\n \"\"\"Utility constructor for creating a success Button variant.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n\n Returns:\n Button: A Button widget of the 'success' variant.\n \"\"\"\n return Button(\n label=label,\n disabled=disabled,\n variant=\"success\",\n name=name,\n id=id,\n classes=classes,\n )\n\n @classmethod\n def warning(\n cls,\n label: TextType | None = None,\n disabled: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> Button:\n \"\"\"Utility constructor for creating a warning Button variant.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n\n Returns:\n Button: A Button widget of the 'warning' variant.\n \"\"\"\n return Button(\n label=label,\n disabled=disabled,\n variant=\"warning\",\n name=name,\n id=id,\n classes=classes,\n )\n\n @classmethod\n def error(\n cls,\n label: TextType | None = None,\n disabled: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> Button:\n \"\"\"Utility constructor for creating an error Button variant.\n\n Args:\n label (str): The text that appears within the button.\n disabled (bool): Whether the button is disabled or not.\n name: The name of the button.\n id: The ID of the button in the DOM.\n classes: The CSS classes of the button.\n\n Returns:\n Button: A Button widget of the 'error' variant.\n \"\"\"\n return Button(\n label=label,\n disabled=disabled,\n variant=\"error\",\n name=name,\n id=id,\n classes=classes,\n )\n", "path": "src/textual/widgets/_button.py"}]} | 3,932 | 622 |
gh_patches_debug_22293 | rasdani/github-patches | git_diff | dmlc__dgl-4218 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Example][Bug] Running error on the example case: example/pytorch/arma
## 🐛 Bug
Example of using DGL to implement GNN with convolutional ARMA filters failed.
## To Reproduce
`python citation.py --gpu 0`
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
```
Traceback (most recent call last):
File "citation.py", line 142, in <module>
acc_lists.append(main(args))
File "citation.py", line 84, in main
train_loss.backward()
File "/opt/conda/lib/python3.8/site-packages/torch/_tensor.py", line 396, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
File "/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py", line 173, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [2708, 7]], which is output 0 of ReluBackward0, is at version 1; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).
```
`python citation.py --gpu 0 --dataset Citeseer --num-stacks 3`
```
Traceback (most recent call last):
File "citation.py", line 142, in <module>
acc_lists.append(main(args))
File "citation.py", line 84, in main
train_loss.backward()
File "/opt/conda/lib/python3.8/site-packages/torch/_tensor.py", line 396, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
File "/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py", line 173, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
RuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [3327, 6]], which is output 0 of ReluBackward0, is at version 2; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).
```
## Expected behavior
The case should run through
## Environment
- DGL Version (e.g., 1.0): 0.9
- Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): 1.12
- OS (e.g., Linux): ubuntu
- How you installed DGL (`conda`, `pip`, source): source
- Build command you used (if compiling from source):
- Python version: 3.8
- CUDA/cuDNN version (if applicable): 11.7
- GPU models and configuration (e.g. V100): A100
- Any other relevant information:
## Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of examples/pytorch/arma/model.py]
1 import torch
2 import torch.nn as nn
3 import torch.nn.functional as F
4 import dgl.function as fn
5 import math
6
7 def glorot(tensor):
8 if tensor is not None:
9 stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
10 tensor.data.uniform_(-stdv, stdv)
11
12 def zeros(tensor):
13 if tensor is not None:
14 tensor.data.fill_(0)
15
16 class ARMAConv(nn.Module):
17 def __init__(self,
18 in_dim,
19 out_dim,
20 num_stacks,
21 num_layers,
22 activation=None,
23 dropout=0.0,
24 bias=True):
25 super(ARMAConv, self).__init__()
26
27 self.in_dim = in_dim
28 self.out_dim = out_dim
29 self.K = num_stacks
30 self.T = num_layers
31 self.activation = activation
32 self.dropout = nn.Dropout(p=dropout)
33
34 # init weight
35 self.w_0 = nn.ModuleDict({
36 str(k): nn.Linear(in_dim, out_dim, bias=False) for k in range(self.K)
37 })
38 # deeper weight
39 self.w = nn.ModuleDict({
40 str(k): nn.Linear(out_dim, out_dim, bias=False) for k in range(self.K)
41 })
42 # v
43 self.v = nn.ModuleDict({
44 str(k): nn.Linear(in_dim, out_dim, bias=False) for k in range(self.K)
45 })
46 # bias
47 if bias:
48 self.bias = nn.Parameter(torch.Tensor(self.K, self.T, 1, self.out_dim))
49 else:
50 self.register_parameter('bias', None)
51
52 self.reset_parameters()
53
54 def reset_parameters(self):
55 for k in range(self.K):
56 glorot(self.w_0[str(k)].weight)
57 glorot(self.w[str(k)].weight)
58 glorot(self.v[str(k)].weight)
59 zeros(self.bias)
60
61 def forward(self, g, feats):
62 with g.local_scope():
63 init_feats = feats
64 # assume that the graphs are undirected and graph.in_degrees() is the same as graph.out_degrees()
65 degs = g.in_degrees().float().clamp(min=1)
66 norm = torch.pow(degs, -0.5).to(feats.device).unsqueeze(1)
67 output = None
68
69 for k in range(self.K):
70 feats = init_feats
71 for t in range(self.T):
72 feats = feats * norm
73 g.ndata['h'] = feats
74 g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))
75 feats = g.ndata.pop('h')
76 feats = feats * norm
77
78 if t == 0:
79 feats = self.w_0[str(k)](feats)
80 else:
81 feats = self.w[str(k)](feats)
82
83 feats += self.dropout(self.v[str(k)](init_feats))
84 feats += self.v[str(k)](self.dropout(init_feats))
85
86 if self.bias is not None:
87 feats += self.bias[k][t]
88
89 if self.activation is not None:
90 feats = self.activation(feats)
91
92 if output is None:
93 output = feats
94 else:
95 output += feats
96
97 return output / self.K
98
99 class ARMA4NC(nn.Module):
100 def __init__(self,
101 in_dim,
102 hid_dim,
103 out_dim,
104 num_stacks,
105 num_layers,
106 activation=None,
107 dropout=0.0):
108 super(ARMA4NC, self).__init__()
109
110 self.conv1 = ARMAConv(in_dim=in_dim,
111 out_dim=hid_dim,
112 num_stacks=num_stacks,
113 num_layers=num_layers,
114 activation=activation,
115 dropout=dropout)
116
117 self.conv2 = ARMAConv(in_dim=hid_dim,
118 out_dim=out_dim,
119 num_stacks=num_stacks,
120 num_layers=num_layers,
121 activation=activation,
122 dropout=dropout)
123
124 self.dropout = nn.Dropout(p=dropout)
125
126 def forward(self, g, feats):
127 feats = F.relu(self.conv1(g, feats))
128 feats = self.dropout(feats)
129 feats = self.conv2(g, feats)
130 return feats
131
[end of examples/pytorch/arma/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/pytorch/arma/model.py b/examples/pytorch/arma/model.py
--- a/examples/pytorch/arma/model.py
+++ b/examples/pytorch/arma/model.py
@@ -64,7 +64,7 @@
# assume that the graphs are undirected and graph.in_degrees() is the same as graph.out_degrees()
degs = g.in_degrees().float().clamp(min=1)
norm = torch.pow(degs, -0.5).to(feats.device).unsqueeze(1)
- output = None
+ output = []
for k in range(self.K):
feats = init_feats
@@ -88,13 +88,9 @@
if self.activation is not None:
feats = self.activation(feats)
-
- if output is None:
- output = feats
- else:
- output += feats
-
- return output / self.K
+ output.append(feats)
+
+ return torch.stack(output).mean(dim=0)
class ARMA4NC(nn.Module):
def __init__(self,
| {"golden_diff": "diff --git a/examples/pytorch/arma/model.py b/examples/pytorch/arma/model.py\n--- a/examples/pytorch/arma/model.py\n+++ b/examples/pytorch/arma/model.py\n@@ -64,7 +64,7 @@\n # assume that the graphs are undirected and graph.in_degrees() is the same as graph.out_degrees()\n degs = g.in_degrees().float().clamp(min=1)\n norm = torch.pow(degs, -0.5).to(feats.device).unsqueeze(1)\n- output = None\n+ output = [] \n \n for k in range(self.K):\n feats = init_feats\n@@ -88,13 +88,9 @@\n \n if self.activation is not None:\n feats = self.activation(feats)\n- \n- if output is None:\n- output = feats\n- else:\n- output += feats\n- \n- return output / self.K \n+ output.append(feats)\n+\n+ return torch.stack(output).mean(dim=0)\n \n class ARMA4NC(nn.Module):\n def __init__(self,\n", "issue": "[Example][Bug] Running error on the example case: example/pytorch/arma\n## \ud83d\udc1b Bug\r\n\r\nExample of using DGL to implement GNN with convolutional ARMA filters failed.\r\n\r\n## To Reproduce\r\n\r\n`python citation.py --gpu 0`\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n```\r\nTraceback (most recent call last):\r\n File \"citation.py\", line 142, in <module>\r\n acc_lists.append(main(args))\r\n File \"citation.py\", line 84, in main\r\n train_loss.backward()\r\n File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 396, in backward\r\n torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\r\n File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\r\n Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass\r\nRuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [2708, 7]], which is output 0 of ReluBackward0, is at version 1; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).\r\n```\r\n\r\n`python citation.py --gpu 0 --dataset Citeseer --num-stacks 3`\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"citation.py\", line 142, in <module>\r\n acc_lists.append(main(args))\r\n File \"citation.py\", line 84, in main\r\n train_loss.backward()\r\n File \"/opt/conda/lib/python3.8/site-packages/torch/_tensor.py\", line 396, in backward\r\n torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)\r\n File \"/opt/conda/lib/python3.8/site-packages/torch/autograd/__init__.py\", line 173, in backward\r\n Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass\r\nRuntimeError: one of the variables needed for gradient computation has been modified by an inplace operation: [torch.cuda.FloatTensor [3327, 6]], which is output 0 of ReluBackward0, is at version 2; expected version 0 instead. Hint: enable anomaly detection to find the operation that failed to compute its gradient, with torch.autograd.set_detect_anomaly(True).\r\n\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe case should run through\r\n\r\n## Environment\r\n\r\n - DGL Version (e.g., 1.0): 0.9\r\n - Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): 1.12\r\n - OS (e.g., Linux): ubuntu\r\n - How you installed DGL (`conda`, `pip`, source): source\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.8\r\n - CUDA/cuDNN version (if applicable): 11.7\r\n - GPU models and configuration (e.g. V100): A100\r\n - Any other relevant information:\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport dgl.function as fn\nimport math\n\ndef glorot(tensor):\n if tensor is not None:\n stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))\n tensor.data.uniform_(-stdv, stdv)\n\ndef zeros(tensor):\n if tensor is not None:\n tensor.data.fill_(0)\n\nclass ARMAConv(nn.Module):\n def __init__(self,\n in_dim,\n out_dim,\n num_stacks,\n num_layers,\n activation=None,\n dropout=0.0,\n bias=True):\n super(ARMAConv, self).__init__()\n \n self.in_dim = in_dim\n self.out_dim = out_dim\n self.K = num_stacks\n self.T = num_layers\n self.activation = activation\n self.dropout = nn.Dropout(p=dropout)\n\n # init weight\n self.w_0 = nn.ModuleDict({\n str(k): nn.Linear(in_dim, out_dim, bias=False) for k in range(self.K)\n })\n # deeper weight\n self.w = nn.ModuleDict({\n str(k): nn.Linear(out_dim, out_dim, bias=False) for k in range(self.K)\n })\n # v\n self.v = nn.ModuleDict({\n str(k): nn.Linear(in_dim, out_dim, bias=False) for k in range(self.K)\n })\n # bias\n if bias:\n self.bias = nn.Parameter(torch.Tensor(self.K, self.T, 1, self.out_dim))\n else:\n self.register_parameter('bias', None)\n \n self.reset_parameters()\n\n def reset_parameters(self):\n for k in range(self.K):\n glorot(self.w_0[str(k)].weight)\n glorot(self.w[str(k)].weight)\n glorot(self.v[str(k)].weight)\n zeros(self.bias)\n\n def forward(self, g, feats):\n with g.local_scope():\n init_feats = feats\n # assume that the graphs are undirected and graph.in_degrees() is the same as graph.out_degrees()\n degs = g.in_degrees().float().clamp(min=1)\n norm = torch.pow(degs, -0.5).to(feats.device).unsqueeze(1)\n output = None\n\n for k in range(self.K):\n feats = init_feats\n for t in range(self.T):\n feats = feats * norm\n g.ndata['h'] = feats\n g.update_all(fn.copy_u('h', 'm'), fn.sum('m', 'h'))\n feats = g.ndata.pop('h')\n feats = feats * norm\n\n if t == 0:\n feats = self.w_0[str(k)](feats)\n else:\n feats = self.w[str(k)](feats)\n \n feats += self.dropout(self.v[str(k)](init_feats))\n feats += self.v[str(k)](self.dropout(init_feats))\n\n if self.bias is not None:\n feats += self.bias[k][t]\n \n if self.activation is not None:\n feats = self.activation(feats)\n \n if output is None:\n output = feats\n else:\n output += feats\n \n return output / self.K \n\nclass ARMA4NC(nn.Module):\n def __init__(self,\n in_dim,\n hid_dim,\n out_dim,\n num_stacks,\n num_layers,\n activation=None,\n dropout=0.0):\n super(ARMA4NC, self).__init__()\n\n self.conv1 = ARMAConv(in_dim=in_dim,\n out_dim=hid_dim,\n num_stacks=num_stacks,\n num_layers=num_layers,\n activation=activation,\n dropout=dropout)\n\n self.conv2 = ARMAConv(in_dim=hid_dim,\n out_dim=out_dim,\n num_stacks=num_stacks,\n num_layers=num_layers,\n activation=activation,\n dropout=dropout)\n \n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, g, feats):\n feats = F.relu(self.conv1(g, feats))\n feats = self.dropout(feats)\n feats = self.conv2(g, feats)\n return feats\n", "path": "examples/pytorch/arma/model.py"}]} | 2,479 | 241 |
gh_patches_debug_25085 | rasdani/github-patches | git_diff | pex-tool__pex-703 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pex file looses the executable permission of a binary file if packaged
I have a use case where the pex file I am generating has to package another statically compiled binary.
When the pex file is exploded the binary file has lost the executable permission. Is there anyway to preserve the permissions?
</issue>
<code>
[start of pex/util.py]
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import absolute_import
5
6 import contextlib
7 import os
8 import shutil
9 import tempfile
10 import uuid
11 from hashlib import sha1
12 from site import makepath
13 from threading import Lock
14
15 from pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp, safe_open
16 from pex.compatibility import exec_function
17 from pex.finders import register_finders
18 from pex.third_party.pkg_resources import (
19 find_distributions,
20 resource_isdir,
21 resource_listdir,
22 resource_string
23 )
24
25
26 class DistributionHelper(object):
27 @classmethod
28 def walk_data(cls, dist, path='/'):
29 """Yields filename, stream for files identified as data in the distribution"""
30 for rel_fn in filter(None, dist.resource_listdir(path)):
31 full_fn = os.path.join(path, rel_fn)
32 if dist.resource_isdir(full_fn):
33 for fn, stream in cls.walk_data(dist, full_fn):
34 yield fn, stream
35 else:
36 yield full_fn[1:], dist.get_resource_stream(dist._provider, full_fn)
37
38 @staticmethod
39 def zipsafe(dist):
40 """Returns whether or not we determine a distribution is zip-safe."""
41 # zip-safety is only an attribute of eggs. wheels are considered never
42 # zip safe per implications of PEP 427.
43 if hasattr(dist, 'egg_info') and dist.egg_info.endswith('EGG-INFO'):
44 egg_metadata = dist.metadata_listdir('')
45 return 'zip-safe' in egg_metadata and 'native_libs.txt' not in egg_metadata
46 else:
47 return False
48
49 @classmethod
50 def access_zipped_assets(cls, static_module_name, static_path, dir_location=None):
51 """
52 Create a copy of static resource files as we can't serve them from within the pex file.
53
54 :param static_module_name: Module name containing module to cache in a tempdir
55 :type static_module_name: string, for example 'twitter.common.zookeeper' or similar
56 :param static_path: Module name, for example 'serverset'
57 :param dir_location: create a new temporary directory inside, or None to have one created
58 :returns temp_dir: Temporary directory with the zipped assets inside
59 :rtype: str
60 """
61
62 # asset_path is initially a module name that's the same as the static_path, but will be
63 # changed to walk the directory tree
64 def walk_zipped_assets(static_module_name, static_path, asset_path, temp_dir):
65 for asset in resource_listdir(static_module_name, asset_path):
66 asset_target = os.path.normpath(
67 os.path.join(os.path.relpath(asset_path, static_path), asset))
68 if resource_isdir(static_module_name, os.path.join(asset_path, asset)):
69 safe_mkdir(os.path.join(temp_dir, asset_target))
70 walk_zipped_assets(static_module_name, static_path, os.path.join(asset_path, asset),
71 temp_dir)
72 else:
73 with open(os.path.join(temp_dir, asset_target), 'wb') as fp:
74 path = os.path.join(static_path, asset_target)
75 file_data = resource_string(static_module_name, path)
76 fp.write(file_data)
77
78 if dir_location is None:
79 temp_dir = safe_mkdtemp()
80 else:
81 temp_dir = dir_location
82
83 walk_zipped_assets(static_module_name, static_path, static_path, temp_dir)
84
85 return temp_dir
86
87 @classmethod
88 def distribution_from_path(cls, path, name=None):
89 """Return a distribution from a path.
90
91 If name is provided, find the distribution. If none is found matching the name,
92 return None. If name is not provided and there is unambiguously a single
93 distribution, return that distribution otherwise None.
94 """
95 # Monkeypatch pkg_resources finders should it not already be so.
96 register_finders()
97 if name is None:
98 distributions = set(find_distributions(path))
99 if len(distributions) == 1:
100 return distributions.pop()
101 else:
102 for dist in find_distributions(path):
103 if dist.project_name == name:
104 return dist
105
106
107 class CacheHelper(object):
108 @classmethod
109 def update_hash(cls, filelike, digest):
110 """Update the digest of a single file in a memory-efficient manner."""
111 block_size = digest.block_size * 1024
112 for chunk in iter(lambda: filelike.read(block_size), b''):
113 digest.update(chunk)
114
115 @classmethod
116 def hash(cls, path, digest=None, hasher=sha1):
117 """Return the digest of a single file in a memory-efficient manner."""
118 if digest is None:
119 digest = hasher()
120 with open(path, 'rb') as fh:
121 cls.update_hash(fh, digest)
122 return digest.hexdigest()
123
124 @classmethod
125 def _compute_hash(cls, names, stream_factory):
126 digest = sha1()
127 # Always use / as the path separator, since that's what zip uses.
128 hashed_names = [n.replace(os.sep, '/') for n in names]
129 digest.update(''.join(hashed_names).encode('utf-8'))
130 for name in names:
131 with contextlib.closing(stream_factory(name)) as fp:
132 cls.update_hash(fp, digest)
133 return digest.hexdigest()
134
135 @classmethod
136 def zip_hash(cls, zf, prefix=''):
137 """Return the hash of the contents of a zipfile, comparable with a cls.dir_hash."""
138 prefix_length = len(prefix)
139 names = sorted(name[prefix_length:] for name in zf.namelist()
140 if name.startswith(prefix) and not name.endswith('.pyc') and not name.endswith('/'))
141 def stream_factory(name):
142 return zf.open(prefix + name)
143 return cls._compute_hash(names, stream_factory)
144
145 @classmethod
146 def _iter_files(cls, directory):
147 normpath = os.path.realpath(os.path.normpath(directory))
148 for root, _, files in os.walk(normpath):
149 for f in files:
150 yield os.path.relpath(os.path.join(root, f), normpath)
151
152 @classmethod
153 def pex_hash(cls, d):
154 """Return a reproducible hash of the contents of a directory."""
155 names = sorted(f for f in cls._iter_files(d) if not (f.endswith('.pyc') or f.startswith('.')))
156 def stream_factory(name):
157 return open(os.path.join(d, name), 'rb') # noqa: T802
158 return cls._compute_hash(names, stream_factory)
159
160 @classmethod
161 def dir_hash(cls, d):
162 """Return a reproducible hash of the contents of a directory."""
163 names = sorted(f for f in cls._iter_files(d) if not f.endswith('.pyc'))
164 def stream_factory(name):
165 return open(os.path.join(d, name), 'rb') # noqa: T802
166 return cls._compute_hash(names, stream_factory)
167
168 @classmethod
169 def cache_distribution(cls, zf, source, target_dir):
170 """Possibly cache an egg from within a zipfile into target_cache.
171
172 Given a zipfile handle and a filename corresponding to an egg distribution within
173 that zip, maybe write to the target cache and return a Distribution."""
174 dependency_basename = os.path.basename(source)
175 if not os.path.exists(target_dir):
176 target_dir_tmp = target_dir + '.' + uuid.uuid4().hex
177 for name in zf.namelist():
178 if name.startswith(source) and not name.endswith('/'):
179 # strip off prefix + '/'
180 target_name = os.path.join(dependency_basename, name[len(source) + 1:])
181 with contextlib.closing(zf.open(name)) as zi:
182 with safe_open(os.path.join(target_dir_tmp, target_name), 'wb') as fp:
183 shutil.copyfileobj(zi, fp)
184
185 rename_if_empty(target_dir_tmp, target_dir)
186
187 dist = DistributionHelper.distribution_from_path(target_dir)
188 assert dist is not None, 'Failed to cache distribution %s' % source
189 return dist
190
191
192 class Memoizer(object):
193 """A thread safe class for memoizing the results of a computation."""
194
195 def __init__(self):
196 self._data = {}
197 self._lock = Lock()
198
199 def get(self, key, default=None):
200 with self._lock:
201 return self._data.get(key, default)
202
203 def store(self, key, value):
204 with self._lock:
205 self._data[key] = value
206
207
208 @contextlib.contextmanager
209 def named_temporary_file(*args, **kwargs):
210 """
211 Due to a bug in python (https://bugs.python.org/issue14243), we need
212 this to be able to use the temporary file without deleting it.
213 """
214 assert 'delete' not in kwargs
215 kwargs['delete'] = False
216 fp = tempfile.NamedTemporaryFile(*args, **kwargs)
217 try:
218 with fp:
219 yield fp
220 finally:
221 os.remove(fp.name)
222
223
224 def iter_pth_paths(filename):
225 """Given a .pth file, extract and yield all inner paths without honoring imports. This shadows
226 python's site.py behavior, which is invoked at interpreter startup."""
227 try:
228 f = open(filename, 'rU') # noqa
229 except IOError:
230 return
231
232 dirname = os.path.dirname(filename)
233 known_paths = set()
234
235 with f:
236 for line in f:
237 line = line.rstrip()
238 if not line or line.startswith('#'):
239 continue
240 elif line.startswith(('import ', 'import\t')):
241 try:
242 exec_function(line, globals_map={})
243 continue
244 except Exception:
245 # NB: import lines are routinely abused with extra code appended using `;` so the class of
246 # exceptions that might be raised in broader than ImportError. As such we cacth broadly
247 # here.
248
249 # Defer error handling to the higher level site.py logic invoked at startup.
250 return
251 else:
252 extras_dir, extras_dir_case_insensitive = makepath(dirname, line)
253 if extras_dir_case_insensitive not in known_paths and os.path.exists(extras_dir):
254 yield extras_dir
255 known_paths.add(extras_dir_case_insensitive)
256
257
258 def merge_split(*paths):
259 """Merge paths into a single path delimited by colons and split on colons to return
260 a list of paths.
261
262 :param paths: a variable length list of path strings
263 :return: a list of paths from the merged path list split by colons
264 """
265 filtered_paths = filter(None, paths)
266 return [p for p in ':'.join(filtered_paths).split(':') if p]
267
[end of pex/util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/util.py b/pex/util.py
--- a/pex/util.py
+++ b/pex/util.py
@@ -5,14 +5,13 @@
import contextlib
import os
-import shutil
import tempfile
import uuid
from hashlib import sha1
from site import makepath
from threading import Lock
-from pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp, safe_open
+from pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp
from pex.compatibility import exec_function
from pex.finders import register_finders
from pex.third_party.pkg_resources import (
@@ -176,11 +175,9 @@
target_dir_tmp = target_dir + '.' + uuid.uuid4().hex
for name in zf.namelist():
if name.startswith(source) and not name.endswith('/'):
- # strip off prefix + '/'
- target_name = os.path.join(dependency_basename, name[len(source) + 1:])
- with contextlib.closing(zf.open(name)) as zi:
- with safe_open(os.path.join(target_dir_tmp, target_name), 'wb') as fp:
- shutil.copyfileobj(zi, fp)
+ zf.extract(name, target_dir_tmp)
+ os.rename(os.path.join(target_dir_tmp, source),
+ os.path.join(target_dir_tmp, dependency_basename))
rename_if_empty(target_dir_tmp, target_dir)
| {"golden_diff": "diff --git a/pex/util.py b/pex/util.py\n--- a/pex/util.py\n+++ b/pex/util.py\n@@ -5,14 +5,13 @@\n \n import contextlib\n import os\n-import shutil\n import tempfile\n import uuid\n from hashlib import sha1\n from site import makepath\n from threading import Lock\n \n-from pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp, safe_open\n+from pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp\n from pex.compatibility import exec_function\n from pex.finders import register_finders\n from pex.third_party.pkg_resources import (\n@@ -176,11 +175,9 @@\n target_dir_tmp = target_dir + '.' + uuid.uuid4().hex\n for name in zf.namelist():\n if name.startswith(source) and not name.endswith('/'):\n- # strip off prefix + '/'\n- target_name = os.path.join(dependency_basename, name[len(source) + 1:])\n- with contextlib.closing(zf.open(name)) as zi:\n- with safe_open(os.path.join(target_dir_tmp, target_name), 'wb') as fp:\n- shutil.copyfileobj(zi, fp)\n+ zf.extract(name, target_dir_tmp)\n+ os.rename(os.path.join(target_dir_tmp, source),\n+ os.path.join(target_dir_tmp, dependency_basename))\n \n rename_if_empty(target_dir_tmp, target_dir)\n", "issue": "Pex file looses the executable permission of a binary file if packaged \nI have a use case where the pex file I am generating has to package another statically compiled binary. \nWhen the pex file is exploded the binary file has lost the executable permission. Is there anyway to preserve the permissions?\n\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import\n\nimport contextlib\nimport os\nimport shutil\nimport tempfile\nimport uuid\nfrom hashlib import sha1\nfrom site import makepath\nfrom threading import Lock\n\nfrom pex.common import rename_if_empty, safe_mkdir, safe_mkdtemp, safe_open\nfrom pex.compatibility import exec_function\nfrom pex.finders import register_finders\nfrom pex.third_party.pkg_resources import (\n find_distributions,\n resource_isdir,\n resource_listdir,\n resource_string\n)\n\n\nclass DistributionHelper(object):\n @classmethod\n def walk_data(cls, dist, path='/'):\n \"\"\"Yields filename, stream for files identified as data in the distribution\"\"\"\n for rel_fn in filter(None, dist.resource_listdir(path)):\n full_fn = os.path.join(path, rel_fn)\n if dist.resource_isdir(full_fn):\n for fn, stream in cls.walk_data(dist, full_fn):\n yield fn, stream\n else:\n yield full_fn[1:], dist.get_resource_stream(dist._provider, full_fn)\n\n @staticmethod\n def zipsafe(dist):\n \"\"\"Returns whether or not we determine a distribution is zip-safe.\"\"\"\n # zip-safety is only an attribute of eggs. wheels are considered never\n # zip safe per implications of PEP 427.\n if hasattr(dist, 'egg_info') and dist.egg_info.endswith('EGG-INFO'):\n egg_metadata = dist.metadata_listdir('')\n return 'zip-safe' in egg_metadata and 'native_libs.txt' not in egg_metadata\n else:\n return False\n\n @classmethod\n def access_zipped_assets(cls, static_module_name, static_path, dir_location=None):\n \"\"\"\n Create a copy of static resource files as we can't serve them from within the pex file.\n\n :param static_module_name: Module name containing module to cache in a tempdir\n :type static_module_name: string, for example 'twitter.common.zookeeper' or similar\n :param static_path: Module name, for example 'serverset'\n :param dir_location: create a new temporary directory inside, or None to have one created\n :returns temp_dir: Temporary directory with the zipped assets inside\n :rtype: str\n \"\"\"\n\n # asset_path is initially a module name that's the same as the static_path, but will be\n # changed to walk the directory tree\n def walk_zipped_assets(static_module_name, static_path, asset_path, temp_dir):\n for asset in resource_listdir(static_module_name, asset_path):\n asset_target = os.path.normpath(\n os.path.join(os.path.relpath(asset_path, static_path), asset))\n if resource_isdir(static_module_name, os.path.join(asset_path, asset)):\n safe_mkdir(os.path.join(temp_dir, asset_target))\n walk_zipped_assets(static_module_name, static_path, os.path.join(asset_path, asset),\n temp_dir)\n else:\n with open(os.path.join(temp_dir, asset_target), 'wb') as fp:\n path = os.path.join(static_path, asset_target)\n file_data = resource_string(static_module_name, path)\n fp.write(file_data)\n\n if dir_location is None:\n temp_dir = safe_mkdtemp()\n else:\n temp_dir = dir_location\n\n walk_zipped_assets(static_module_name, static_path, static_path, temp_dir)\n\n return temp_dir\n\n @classmethod\n def distribution_from_path(cls, path, name=None):\n \"\"\"Return a distribution from a path.\n\n If name is provided, find the distribution. If none is found matching the name,\n return None. If name is not provided and there is unambiguously a single\n distribution, return that distribution otherwise None.\n \"\"\"\n # Monkeypatch pkg_resources finders should it not already be so.\n register_finders()\n if name is None:\n distributions = set(find_distributions(path))\n if len(distributions) == 1:\n return distributions.pop()\n else:\n for dist in find_distributions(path):\n if dist.project_name == name:\n return dist\n\n\nclass CacheHelper(object):\n @classmethod\n def update_hash(cls, filelike, digest):\n \"\"\"Update the digest of a single file in a memory-efficient manner.\"\"\"\n block_size = digest.block_size * 1024\n for chunk in iter(lambda: filelike.read(block_size), b''):\n digest.update(chunk)\n\n @classmethod\n def hash(cls, path, digest=None, hasher=sha1):\n \"\"\"Return the digest of a single file in a memory-efficient manner.\"\"\"\n if digest is None:\n digest = hasher()\n with open(path, 'rb') as fh:\n cls.update_hash(fh, digest)\n return digest.hexdigest()\n\n @classmethod\n def _compute_hash(cls, names, stream_factory):\n digest = sha1()\n # Always use / as the path separator, since that's what zip uses.\n hashed_names = [n.replace(os.sep, '/') for n in names]\n digest.update(''.join(hashed_names).encode('utf-8'))\n for name in names:\n with contextlib.closing(stream_factory(name)) as fp:\n cls.update_hash(fp, digest)\n return digest.hexdigest()\n\n @classmethod\n def zip_hash(cls, zf, prefix=''):\n \"\"\"Return the hash of the contents of a zipfile, comparable with a cls.dir_hash.\"\"\"\n prefix_length = len(prefix)\n names = sorted(name[prefix_length:] for name in zf.namelist()\n if name.startswith(prefix) and not name.endswith('.pyc') and not name.endswith('/'))\n def stream_factory(name):\n return zf.open(prefix + name)\n return cls._compute_hash(names, stream_factory)\n\n @classmethod\n def _iter_files(cls, directory):\n normpath = os.path.realpath(os.path.normpath(directory))\n for root, _, files in os.walk(normpath):\n for f in files:\n yield os.path.relpath(os.path.join(root, f), normpath)\n\n @classmethod\n def pex_hash(cls, d):\n \"\"\"Return a reproducible hash of the contents of a directory.\"\"\"\n names = sorted(f for f in cls._iter_files(d) if not (f.endswith('.pyc') or f.startswith('.')))\n def stream_factory(name):\n return open(os.path.join(d, name), 'rb') # noqa: T802\n return cls._compute_hash(names, stream_factory)\n\n @classmethod\n def dir_hash(cls, d):\n \"\"\"Return a reproducible hash of the contents of a directory.\"\"\"\n names = sorted(f for f in cls._iter_files(d) if not f.endswith('.pyc'))\n def stream_factory(name):\n return open(os.path.join(d, name), 'rb') # noqa: T802\n return cls._compute_hash(names, stream_factory)\n\n @classmethod\n def cache_distribution(cls, zf, source, target_dir):\n \"\"\"Possibly cache an egg from within a zipfile into target_cache.\n\n Given a zipfile handle and a filename corresponding to an egg distribution within\n that zip, maybe write to the target cache and return a Distribution.\"\"\"\n dependency_basename = os.path.basename(source)\n if not os.path.exists(target_dir):\n target_dir_tmp = target_dir + '.' + uuid.uuid4().hex\n for name in zf.namelist():\n if name.startswith(source) and not name.endswith('/'):\n # strip off prefix + '/'\n target_name = os.path.join(dependency_basename, name[len(source) + 1:])\n with contextlib.closing(zf.open(name)) as zi:\n with safe_open(os.path.join(target_dir_tmp, target_name), 'wb') as fp:\n shutil.copyfileobj(zi, fp)\n\n rename_if_empty(target_dir_tmp, target_dir)\n\n dist = DistributionHelper.distribution_from_path(target_dir)\n assert dist is not None, 'Failed to cache distribution %s' % source\n return dist\n\n\nclass Memoizer(object):\n \"\"\"A thread safe class for memoizing the results of a computation.\"\"\"\n\n def __init__(self):\n self._data = {}\n self._lock = Lock()\n\n def get(self, key, default=None):\n with self._lock:\n return self._data.get(key, default)\n\n def store(self, key, value):\n with self._lock:\n self._data[key] = value\n\n\[email protected]\ndef named_temporary_file(*args, **kwargs):\n \"\"\"\n Due to a bug in python (https://bugs.python.org/issue14243), we need\n this to be able to use the temporary file without deleting it.\n \"\"\"\n assert 'delete' not in kwargs\n kwargs['delete'] = False\n fp = tempfile.NamedTemporaryFile(*args, **kwargs)\n try:\n with fp:\n yield fp\n finally:\n os.remove(fp.name)\n\n\ndef iter_pth_paths(filename):\n \"\"\"Given a .pth file, extract and yield all inner paths without honoring imports. This shadows\n python's site.py behavior, which is invoked at interpreter startup.\"\"\"\n try:\n f = open(filename, 'rU') # noqa\n except IOError:\n return\n\n dirname = os.path.dirname(filename)\n known_paths = set()\n\n with f:\n for line in f:\n line = line.rstrip()\n if not line or line.startswith('#'):\n continue\n elif line.startswith(('import ', 'import\\t')):\n try:\n exec_function(line, globals_map={})\n continue\n except Exception:\n # NB: import lines are routinely abused with extra code appended using `;` so the class of\n # exceptions that might be raised in broader than ImportError. As such we cacth broadly\n # here.\n\n # Defer error handling to the higher level site.py logic invoked at startup.\n return\n else:\n extras_dir, extras_dir_case_insensitive = makepath(dirname, line)\n if extras_dir_case_insensitive not in known_paths and os.path.exists(extras_dir):\n yield extras_dir\n known_paths.add(extras_dir_case_insensitive)\n\n\ndef merge_split(*paths):\n \"\"\"Merge paths into a single path delimited by colons and split on colons to return\n a list of paths.\n\n :param paths: a variable length list of path strings\n :return: a list of paths from the merged path list split by colons\n \"\"\"\n filtered_paths = filter(None, paths)\n return [p for p in ':'.join(filtered_paths).split(':') if p]\n", "path": "pex/util.py"}]} | 3,600 | 317 |
gh_patches_debug_39354 | rasdani/github-patches | git_diff | marshmallow-code__webargs-509 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'Not a valid tuple.' when trying to use marshmallow fields.Tuple for argument validation
I'm trying to use the marshmallow fields.Tuple for querystring argument validation on a GET request using Flask. The issue I'm running into is that no matter what type of object I declare and no matter what I use in the request, I always get the default 'Not a valid tuple.' response. I have tried using a tuple of size 1 and 2; using fields.String and/or fields.Integer, etc with the same result.
- I'm using Python 3.6.9 with these dependencies:
anyjson==0.3.3
apipkg==1.5
arrow==0.15.5
attrs==19.3.0
backports.functools-lru-cache==1.6.1
cassandra-driver==3.22.0
Cerberus==1.3.2
certifi==2019.11.28
cffi==1.13.2
chardet==3.0.4
click==7.1.1
execnet==1.7.1
Flask==1.1.1
Flask-Cors==3.0.8
funcsigs==1.0.2
futures==3.1.1
geomet==0.1.2
gevent==1.4.0
greenlet==0.4.13
gunicorn==20.0.4
idna==2.9
importlib-metadata==1.6.0
itsdangerous==1.1.0
Jinja2==2.11.1
jsonklog==0.15.0
MarkupSafe==1.1.1
marshmallow==3.5.1
neurolab==0.3.5
numpy==1.18.1
pluggy==0.13.1
py==1.8.1
pyaml==20.3.1
pymongo==3.10.1
pytest==3.3.0
pytest-forked==0.2
pytest-xdist==1.20.1
python-dateutil==2.8.1
PyYAML==5.3.1
readline==6.2.4.1
requests==2.23.0
six==1.14.0
urllib3==1.25.8
webargs==6.0.0
Werkzeug==1.0.0
zipp==3.1.0
- Here is an example of what I'm trying to do:
```
from flask import Flask
from webargs.flaskparser import parser, use_kwargs
from marshmallow import EXCLUDE, fields, Schema
app = Flask(__name__)
@app.errorhandler(422)
def custom_handler(error):
errors = []
if 'query' in error.data['messages']:
for arg in error.data['messages']['query']:
for item in error.data['messages']['query'][arg]:
errors.append(item)
return str(errors), 400
class test_schema(Schema):
class Meta:
unknown = EXCLUDE
strict = True
test_tup = fields.Tuple((fields.Integer(required=True), fields.Integer(required=True)), required=True)
@app.route('/test/', strict_slashes=False)
@parser.use_kwargs(test_schema, location='query')
def test_the_mallow(**kwargs):
return "True"
```
- Finally, here are a couple example url's I've tried:
localhost:2300/test/?test_tup=[0,0]
localhost:2300/test/?test_tup=(0,0)
localhost:2300/test/?test_tup=0,0
</issue>
<code>
[start of src/webargs/fields.py]
1 """Field classes.
2
3 Includes all fields from `marshmallow.fields` in addition to a custom
4 `Nested` field and `DelimitedList`.
5
6 All fields can optionally take a special `location` keyword argument, which
7 tells webargs where to parse the request argument from.
8
9 .. code-block:: python
10
11 args = {
12 "active": fields.Bool(location="query"),
13 "content_type": fields.Str(data_key="Content-Type", location="headers"),
14 }
15
16 Note: `data_key` replaced `load_from` in marshmallow 3.
17 When using marshmallow 2, use `load_from`.
18 """
19 import marshmallow as ma
20
21 # Expose all fields from marshmallow.fields.
22 from marshmallow.fields import * # noqa: F40
23 from webargs.compat import MARSHMALLOW_VERSION_INFO
24 from webargs.dict2schema import dict2schema
25
26 __all__ = ["DelimitedList"] + ma.fields.__all__
27
28
29 class Nested(ma.fields.Nested):
30 """Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
31 the first argument, which will be converted to a `marshmallow.Schema`.
32
33 .. note::
34
35 The schema class here will always be `marshmallow.Schema`, regardless
36 of whether a custom schema class is set on the parser. Pass an explicit schema
37 class if necessary.
38 """
39
40 def __init__(self, nested, *args, **kwargs):
41 if isinstance(nested, dict):
42 nested = dict2schema(nested)
43 super().__init__(nested, *args, **kwargs)
44
45
46 class DelimitedList(ma.fields.List):
47 """A field which is similar to a List, but takes its input as a delimited
48 string (e.g. "foo,bar,baz").
49
50 Like List, it can be given a nested field type which it will use to
51 de/serialize each element of the list.
52
53 :param Field cls_or_instance: A field class or instance.
54 :param str delimiter: Delimiter between values.
55 """
56
57 default_error_messages = {"invalid": "Not a valid delimited list."}
58 delimiter = ","
59
60 def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):
61 self.delimiter = delimiter or self.delimiter
62 super().__init__(cls_or_instance, **kwargs)
63
64 def _serialize(self, value, attr, obj):
65 # serializing will start with List serialization, so that we correctly
66 # output lists of non-primitive types, e.g. DelimitedList(DateTime)
67 return self.delimiter.join(
68 format(each) for each in super()._serialize(value, attr, obj)
69 )
70
71 def _deserialize(self, value, attr, data, **kwargs):
72 # attempting to deserialize from a non-string source is an error
73 if not isinstance(value, (str, bytes)):
74 if MARSHMALLOW_VERSION_INFO[0] < 3:
75 self.fail("invalid")
76 else:
77 raise self.make_error("invalid")
78 return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)
79
[end of src/webargs/fields.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/webargs/fields.py b/src/webargs/fields.py
--- a/src/webargs/fields.py
+++ b/src/webargs/fields.py
@@ -43,26 +43,24 @@
super().__init__(nested, *args, **kwargs)
-class DelimitedList(ma.fields.List):
- """A field which is similar to a List, but takes its input as a delimited
- string (e.g. "foo,bar,baz").
+class DelimitedFieldMixin:
+ """
+ This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple
+ which split on a pre-specified delimiter. By default, the delimiter will be ","
- Like List, it can be given a nested field type which it will use to
- de/serialize each element of the list.
+ Because we want the MRO to reach this class before the List or Tuple class,
+ it must be listed first in the superclasses
- :param Field cls_or_instance: A field class or instance.
- :param str delimiter: Delimiter between values.
+ For example, a DelimitedList-like type can be defined like so:
+
+ >>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):
+ >>> pass
"""
- default_error_messages = {"invalid": "Not a valid delimited list."}
delimiter = ","
- def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):
- self.delimiter = delimiter or self.delimiter
- super().__init__(cls_or_instance, **kwargs)
-
def _serialize(self, value, attr, obj):
- # serializing will start with List serialization, so that we correctly
+ # serializing will start with parent-class serialization, so that we correctly
# output lists of non-primitive types, e.g. DelimitedList(DateTime)
return self.delimiter.join(
format(each) for each in super()._serialize(value, attr, obj)
@@ -76,3 +74,45 @@
else:
raise self.make_error("invalid")
return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)
+
+
+class DelimitedList(DelimitedFieldMixin, ma.fields.List):
+ """A field which is similar to a List, but takes its input as a delimited
+ string (e.g. "foo,bar,baz").
+
+ Like List, it can be given a nested field type which it will use to
+ de/serialize each element of the list.
+
+ :param Field cls_or_instance: A field class or instance.
+ :param str delimiter: Delimiter between values.
+ """
+
+ default_error_messages = {"invalid": "Not a valid delimited list."}
+ delimiter = ","
+
+ def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):
+ self.delimiter = delimiter or self.delimiter
+ super().__init__(cls_or_instance, **kwargs)
+
+
+# DelimitedTuple can only be defined when using marshmallow3, when Tuple was
+# added
+if MARSHMALLOW_VERSION_INFO[0] >= 3:
+
+ class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):
+ """A field which is similar to a Tuple, but takes its input as a delimited
+ string (e.g. "foo,bar,baz").
+
+ Like Tuple, it can be given a tuple of nested field types which it will use to
+ de/serialize each element of the tuple.
+
+ :param Iterable[Field] tuple_fields: An iterable of field classes or instances.
+ :param str delimiter: Delimiter between values.
+ """
+
+ default_error_messages = {"invalid": "Not a valid delimited tuple."}
+ delimiter = ","
+
+ def __init__(self, tuple_fields, *, delimiter=None, **kwargs):
+ self.delimiter = delimiter or self.delimiter
+ super().__init__(tuple_fields, **kwargs)
| {"golden_diff": "diff --git a/src/webargs/fields.py b/src/webargs/fields.py\n--- a/src/webargs/fields.py\n+++ b/src/webargs/fields.py\n@@ -43,26 +43,24 @@\n super().__init__(nested, *args, **kwargs)\n \n \n-class DelimitedList(ma.fields.List):\n- \"\"\"A field which is similar to a List, but takes its input as a delimited\n- string (e.g. \"foo,bar,baz\").\n+class DelimitedFieldMixin:\n+ \"\"\"\n+ This is a mixin class for subclasses of ma.fields.List and ma.fields.Tuple\n+ which split on a pre-specified delimiter. By default, the delimiter will be \",\"\n \n- Like List, it can be given a nested field type which it will use to\n- de/serialize each element of the list.\n+ Because we want the MRO to reach this class before the List or Tuple class,\n+ it must be listed first in the superclasses\n \n- :param Field cls_or_instance: A field class or instance.\n- :param str delimiter: Delimiter between values.\n+ For example, a DelimitedList-like type can be defined like so:\n+\n+ >>> class MyDelimitedList(DelimitedFieldMixin, ma.fields.List):\n+ >>> pass\n \"\"\"\n \n- default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n \n- def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):\n- self.delimiter = delimiter or self.delimiter\n- super().__init__(cls_or_instance, **kwargs)\n-\n def _serialize(self, value, attr, obj):\n- # serializing will start with List serialization, so that we correctly\n+ # serializing will start with parent-class serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj)\n@@ -76,3 +74,45 @@\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n+\n+\n+class DelimitedList(DelimitedFieldMixin, ma.fields.List):\n+ \"\"\"A field which is similar to a List, but takes its input as a delimited\n+ string (e.g. \"foo,bar,baz\").\n+\n+ Like List, it can be given a nested field type which it will use to\n+ de/serialize each element of the list.\n+\n+ :param Field cls_or_instance: A field class or instance.\n+ :param str delimiter: Delimiter between values.\n+ \"\"\"\n+\n+ default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n+ delimiter = \",\"\n+\n+ def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):\n+ self.delimiter = delimiter or self.delimiter\n+ super().__init__(cls_or_instance, **kwargs)\n+\n+\n+# DelimitedTuple can only be defined when using marshmallow3, when Tuple was\n+# added\n+if MARSHMALLOW_VERSION_INFO[0] >= 3:\n+\n+ class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):\n+ \"\"\"A field which is similar to a Tuple, but takes its input as a delimited\n+ string (e.g. \"foo,bar,baz\").\n+\n+ Like Tuple, it can be given a tuple of nested field types which it will use to\n+ de/serialize each element of the tuple.\n+\n+ :param Iterable[Field] tuple_fields: An iterable of field classes or instances.\n+ :param str delimiter: Delimiter between values.\n+ \"\"\"\n+\n+ default_error_messages = {\"invalid\": \"Not a valid delimited tuple.\"}\n+ delimiter = \",\"\n+\n+ def __init__(self, tuple_fields, *, delimiter=None, **kwargs):\n+ self.delimiter = delimiter or self.delimiter\n+ super().__init__(tuple_fields, **kwargs)\n", "issue": "'Not a valid tuple.' when trying to use marshmallow fields.Tuple for argument validation\nI'm trying to use the marshmallow fields.Tuple for querystring argument validation on a GET request using Flask. The issue I'm running into is that no matter what type of object I declare and no matter what I use in the request, I always get the default 'Not a valid tuple.' response. I have tried using a tuple of size 1 and 2; using fields.String and/or fields.Integer, etc with the same result.\r\n\r\n- I'm using Python 3.6.9 with these dependencies:\r\nanyjson==0.3.3\r\napipkg==1.5\r\narrow==0.15.5\r\nattrs==19.3.0\r\nbackports.functools-lru-cache==1.6.1\r\ncassandra-driver==3.22.0\r\nCerberus==1.3.2\r\ncertifi==2019.11.28\r\ncffi==1.13.2\r\nchardet==3.0.4\r\nclick==7.1.1\r\nexecnet==1.7.1\r\nFlask==1.1.1\r\nFlask-Cors==3.0.8\r\nfuncsigs==1.0.2\r\nfutures==3.1.1\r\ngeomet==0.1.2\r\ngevent==1.4.0\r\ngreenlet==0.4.13\r\ngunicorn==20.0.4\r\nidna==2.9\r\nimportlib-metadata==1.6.0\r\nitsdangerous==1.1.0\r\nJinja2==2.11.1\r\njsonklog==0.15.0\r\nMarkupSafe==1.1.1\r\nmarshmallow==3.5.1\r\nneurolab==0.3.5\r\nnumpy==1.18.1\r\npluggy==0.13.1\r\npy==1.8.1\r\npyaml==20.3.1\r\npymongo==3.10.1\r\npytest==3.3.0\r\npytest-forked==0.2\r\npytest-xdist==1.20.1\r\npython-dateutil==2.8.1\r\nPyYAML==5.3.1\r\nreadline==6.2.4.1\r\nrequests==2.23.0\r\nsix==1.14.0\r\nurllib3==1.25.8\r\nwebargs==6.0.0\r\nWerkzeug==1.0.0\r\nzipp==3.1.0\r\n\r\n- Here is an example of what I'm trying to do:\r\n```\r\nfrom flask import Flask\r\nfrom webargs.flaskparser import parser, use_kwargs\r\nfrom marshmallow import EXCLUDE, fields, Schema\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n\r\[email protected](422)\r\ndef custom_handler(error):\r\n errors = []\r\n if 'query' in error.data['messages']:\r\n for arg in error.data['messages']['query']:\r\n for item in error.data['messages']['query'][arg]:\r\n errors.append(item)\r\n return str(errors), 400\r\n\r\n\r\nclass test_schema(Schema):\r\n class Meta:\r\n unknown = EXCLUDE\r\n strict = True\r\n \r\n test_tup = fields.Tuple((fields.Integer(required=True), fields.Integer(required=True)), required=True)\r\n\r\n\r\[email protected]('/test/', strict_slashes=False)\r\[email protected]_kwargs(test_schema, location='query')\r\ndef test_the_mallow(**kwargs):\r\n return \"True\"\r\n```\r\n\r\n- Finally, here are a couple example url's I've tried:\r\n localhost:2300/test/?test_tup=[0,0]\r\n localhost:2300/test/?test_tup=(0,0)\r\n localhost:2300/test/?test_tup=0,0\r\n\n", "before_files": [{"content": "\"\"\"Field classes.\n\nIncludes all fields from `marshmallow.fields` in addition to a custom\n`Nested` field and `DelimitedList`.\n\nAll fields can optionally take a special `location` keyword argument, which\ntells webargs where to parse the request argument from.\n\n.. code-block:: python\n\n args = {\n \"active\": fields.Bool(location=\"query\"),\n \"content_type\": fields.Str(data_key=\"Content-Type\", location=\"headers\"),\n }\n\nNote: `data_key` replaced `load_from` in marshmallow 3.\nWhen using marshmallow 2, use `load_from`.\n\"\"\"\nimport marshmallow as ma\n\n# Expose all fields from marshmallow.fields.\nfrom marshmallow.fields import * # noqa: F40\nfrom webargs.compat import MARSHMALLOW_VERSION_INFO\nfrom webargs.dict2schema import dict2schema\n\n__all__ = [\"DelimitedList\"] + ma.fields.__all__\n\n\nclass Nested(ma.fields.Nested):\n \"\"\"Same as `marshmallow.fields.Nested`, except can be passed a dictionary as\n the first argument, which will be converted to a `marshmallow.Schema`.\n\n .. note::\n\n The schema class here will always be `marshmallow.Schema`, regardless\n of whether a custom schema class is set on the parser. Pass an explicit schema\n class if necessary.\n \"\"\"\n\n def __init__(self, nested, *args, **kwargs):\n if isinstance(nested, dict):\n nested = dict2schema(nested)\n super().__init__(nested, *args, **kwargs)\n\n\nclass DelimitedList(ma.fields.List):\n \"\"\"A field which is similar to a List, but takes its input as a delimited\n string (e.g. \"foo,bar,baz\").\n\n Like List, it can be given a nested field type which it will use to\n de/serialize each element of the list.\n\n :param Field cls_or_instance: A field class or instance.\n :param str delimiter: Delimiter between values.\n \"\"\"\n\n default_error_messages = {\"invalid\": \"Not a valid delimited list.\"}\n delimiter = \",\"\n\n def __init__(self, cls_or_instance, *, delimiter=None, **kwargs):\n self.delimiter = delimiter or self.delimiter\n super().__init__(cls_or_instance, **kwargs)\n\n def _serialize(self, value, attr, obj):\n # serializing will start with List serialization, so that we correctly\n # output lists of non-primitive types, e.g. DelimitedList(DateTime)\n return self.delimiter.join(\n format(each) for each in super()._serialize(value, attr, obj)\n )\n\n def _deserialize(self, value, attr, data, **kwargs):\n # attempting to deserialize from a non-string source is an error\n if not isinstance(value, (str, bytes)):\n if MARSHMALLOW_VERSION_INFO[0] < 3:\n self.fail(\"invalid\")\n else:\n raise self.make_error(\"invalid\")\n return super()._deserialize(value.split(self.delimiter), attr, data, **kwargs)\n", "path": "src/webargs/fields.py"}]} | 2,179 | 889 |
gh_patches_debug_7758 | rasdani/github-patches | git_diff | CTFd__CTFd-1934 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error 500 when visiting /admin/users/1 - AttributeError: 'NoneType' object has no attribute 'get_score'
**Environment**:
- CTFd Version/Commit: HEAD
- Operating System: Docker image based off official Dockerfile
- Web Browser and Version: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1 Safari/605.1.15
**What happened?**
500 Internal server error
**What did you expect to happen?**
Show the admin user details when in team mode
**How to reproduce your issue**
* visited the `/admin/users/1`
* this seems due to the fact that, when in team mode, the admin user does not belong to any team and, for some reason, this one returns `None`
```python
@hybrid_property
def account(self):
from CTFd.utils import get_config
user_mode = get_config("user_mode")
if user_mode == "teams":
return self.team
elif user_mode == "users":
return self
```
**Any associated stack traces or error logs**
```
ERROR [CTFd] Exception on /admin/users/1 [GET]
--
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python3.7/site-packages/flask_restx/api.py", line 639, in error_router
return original_handler(e)
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/opt/CTFd/CTFd/utils/decorators/__init__.py", line 133, in admins_only_wrapper
return f(*args, **kwargs)
File "/opt/CTFd/CTFd/admin/users.py", line 91, in users_detail
score = user.account.get_score(admin=True)
AttributeError: 'NoneType' object has no attribute 'get_score'
```
</issue>
<code>
[start of CTFd/admin/users.py]
1 from flask import render_template, request, url_for
2 from sqlalchemy.sql import not_
3
4 from CTFd.admin import admin
5 from CTFd.models import Challenges, Tracking, Users
6 from CTFd.utils import get_config
7 from CTFd.utils.decorators import admins_only
8 from CTFd.utils.modes import TEAMS_MODE
9
10
11 @admin.route("/admin/users")
12 @admins_only
13 def users_listing():
14 q = request.args.get("q")
15 field = request.args.get("field")
16 page = abs(request.args.get("page", 1, type=int))
17 filters = []
18 users = []
19
20 if q:
21 # The field exists as an exposed column
22 if Users.__mapper__.has_property(field):
23 filters.append(getattr(Users, field).like("%{}%".format(q)))
24
25 if q and field == "ip":
26 users = (
27 Users.query.join(Tracking, Users.id == Tracking.user_id)
28 .filter(Tracking.ip.like("%{}%".format(q)))
29 .order_by(Users.id.asc())
30 .paginate(page=page, per_page=50)
31 )
32 else:
33 users = (
34 Users.query.filter(*filters)
35 .order_by(Users.id.asc())
36 .paginate(page=page, per_page=50)
37 )
38
39 args = dict(request.args)
40 args.pop("page", 1)
41
42 return render_template(
43 "admin/users/users.html",
44 users=users,
45 prev_page=url_for(request.endpoint, page=users.prev_num, **args),
46 next_page=url_for(request.endpoint, page=users.next_num, **args),
47 q=q,
48 field=field,
49 )
50
51
52 @admin.route("/admin/users/new")
53 @admins_only
54 def users_new():
55 return render_template("admin/users/new.html")
56
57
58 @admin.route("/admin/users/<int:user_id>")
59 @admins_only
60 def users_detail(user_id):
61 # Get user object
62 user = Users.query.filter_by(id=user_id).first_or_404()
63
64 # Get the user's solves
65 solves = user.get_solves(admin=True)
66
67 # Get challenges that the user is missing
68 if get_config("user_mode") == TEAMS_MODE:
69 if user.team:
70 all_solves = user.team.get_solves(admin=True)
71 else:
72 all_solves = user.get_solves(admin=True)
73 else:
74 all_solves = user.get_solves(admin=True)
75
76 solve_ids = [s.challenge_id for s in all_solves]
77 missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()
78
79 # Get IP addresses that the User has used
80 addrs = (
81 Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()
82 )
83
84 # Get Fails
85 fails = user.get_fails(admin=True)
86
87 # Get Awards
88 awards = user.get_awards(admin=True)
89
90 # Get user properties
91 score = user.account.get_score(admin=True)
92 place = user.account.get_place(admin=True)
93
94 return render_template(
95 "admin/users/user.html",
96 solves=solves,
97 user=user,
98 addrs=addrs,
99 score=score,
100 missing=missing,
101 place=place,
102 fails=fails,
103 awards=awards,
104 )
105
[end of CTFd/admin/users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/CTFd/admin/users.py b/CTFd/admin/users.py
--- a/CTFd/admin/users.py
+++ b/CTFd/admin/users.py
@@ -87,9 +87,14 @@
# Get Awards
awards = user.get_awards(admin=True)
- # Get user properties
- score = user.account.get_score(admin=True)
- place = user.account.get_place(admin=True)
+ # Check if the user has an account (team or user)
+ # so that we don't throw an error if they dont
+ if user.account:
+ score = user.account.get_score(admin=True)
+ place = user.account.get_place(admin=True)
+ else:
+ score = None
+ place = None
return render_template(
"admin/users/user.html",
| {"golden_diff": "diff --git a/CTFd/admin/users.py b/CTFd/admin/users.py\n--- a/CTFd/admin/users.py\n+++ b/CTFd/admin/users.py\n@@ -87,9 +87,14 @@\n # Get Awards\n awards = user.get_awards(admin=True)\n \n- # Get user properties\n- score = user.account.get_score(admin=True)\n- place = user.account.get_place(admin=True)\n+ # Check if the user has an account (team or user)\n+ # so that we don't throw an error if they dont\n+ if user.account:\n+ score = user.account.get_score(admin=True)\n+ place = user.account.get_place(admin=True)\n+ else:\n+ score = None\n+ place = None\n \n return render_template(\n \"admin/users/user.html\",\n", "issue": "Error 500 when visiting /admin/users/1 - AttributeError: 'NoneType' object has no attribute 'get_score'\n**Environment**:\r\n\r\n- CTFd Version/Commit: HEAD\r\n- Operating System: Docker image based off official Dockerfile\r\n- Web Browser and Version: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1 Safari/605.1.15\r\n\r\n**What happened?**\r\n500 Internal server error\r\n\r\n**What did you expect to happen?**\r\nShow the admin user details when in team mode\r\n\r\n**How to reproduce your issue**\r\n* visited the `/admin/users/1`\r\n* this seems due to the fact that, when in team mode, the admin user does not belong to any team and, for some reason, this one returns `None`\r\n\r\n```python\r\n @hybrid_property\r\n def account(self):\r\n from CTFd.utils import get_config\r\n\r\n user_mode = get_config(\"user_mode\")\r\n if user_mode == \"teams\":\r\n return self.team\r\n elif user_mode == \"users\":\r\n return self\r\n```\r\n\r\n**Any associated stack traces or error logs**\r\n```\r\nERROR [CTFd] Exception on /admin/users/1 [GET]\r\n--\r\nTraceback (most recent call last):\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 2447, in wsgi_app\r\nresponse = self.full_dispatch_request()\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1952, in full_dispatch_request\r\nrv = self.handle_user_exception(e)\r\nFile \"/usr/local/lib/python3.7/site-packages/flask_restx/api.py\", line 639, in error_router\r\nreturn original_handler(e)\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1821, in handle_user_exception\r\nreraise(exc_type, exc_value, tb)\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/_compat.py\", line 39, in reraise\r\nraise value\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\nrv = self.dispatch_request()\r\nFile \"/usr/local/lib/python3.7/site-packages/flask/app.py\", line 1936, in dispatch_request\r\nreturn self.view_functions[rule.endpoint](**req.view_args)\r\nFile \"/opt/CTFd/CTFd/utils/decorators/__init__.py\", line 133, in admins_only_wrapper\r\nreturn f(*args, **kwargs)\r\nFile \"/opt/CTFd/CTFd/admin/users.py\", line 91, in users_detail\r\nscore = user.account.get_score(admin=True)\r\nAttributeError: 'NoneType' object has no attribute 'get_score'\r\n```\r\n\n", "before_files": [{"content": "from flask import render_template, request, url_for\nfrom sqlalchemy.sql import not_\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Tracking, Users\nfrom CTFd.utils import get_config\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.modes import TEAMS_MODE\n\n\[email protected](\"/admin/users\")\n@admins_only\ndef users_listing():\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n page = abs(request.args.get(\"page\", 1, type=int))\n filters = []\n users = []\n\n if q:\n # The field exists as an exposed column\n if Users.__mapper__.has_property(field):\n filters.append(getattr(Users, field).like(\"%{}%\".format(q)))\n\n if q and field == \"ip\":\n users = (\n Users.query.join(Tracking, Users.id == Tracking.user_id)\n .filter(Tracking.ip.like(\"%{}%\".format(q)))\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n else:\n users = (\n Users.query.filter(*filters)\n .order_by(Users.id.asc())\n .paginate(page=page, per_page=50)\n )\n\n args = dict(request.args)\n args.pop(\"page\", 1)\n\n return render_template(\n \"admin/users/users.html\",\n users=users,\n prev_page=url_for(request.endpoint, page=users.prev_num, **args),\n next_page=url_for(request.endpoint, page=users.next_num, **args),\n q=q,\n field=field,\n )\n\n\[email protected](\"/admin/users/new\")\n@admins_only\ndef users_new():\n return render_template(\"admin/users/new.html\")\n\n\[email protected](\"/admin/users/<int:user_id>\")\n@admins_only\ndef users_detail(user_id):\n # Get user object\n user = Users.query.filter_by(id=user_id).first_or_404()\n\n # Get the user's solves\n solves = user.get_solves(admin=True)\n\n # Get challenges that the user is missing\n if get_config(\"user_mode\") == TEAMS_MODE:\n if user.team:\n all_solves = user.team.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n else:\n all_solves = user.get_solves(admin=True)\n\n solve_ids = [s.challenge_id for s in all_solves]\n missing = Challenges.query.filter(not_(Challenges.id.in_(solve_ids))).all()\n\n # Get IP addresses that the User has used\n addrs = (\n Tracking.query.filter_by(user_id=user_id).order_by(Tracking.date.desc()).all()\n )\n\n # Get Fails\n fails = user.get_fails(admin=True)\n\n # Get Awards\n awards = user.get_awards(admin=True)\n\n # Get user properties\n score = user.account.get_score(admin=True)\n place = user.account.get_place(admin=True)\n\n return render_template(\n \"admin/users/user.html\",\n solves=solves,\n user=user,\n addrs=addrs,\n score=score,\n missing=missing,\n place=place,\n fails=fails,\n awards=awards,\n )\n", "path": "CTFd/admin/users.py"}]} | 2,080 | 180 |
gh_patches_debug_29719 | rasdani/github-patches | git_diff | vispy__vispy-823 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Text is misplaced in scene/console example

</issue>
<code>
[start of examples/basics/scene/console.py]
1 # -*- coding: utf-8 -*-
2 # vispy: gallery 30
3 # -----------------------------------------------------------------------------
4 # Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
5 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
6 # -----------------------------------------------------------------------------
7 """
8 Demonstrate the use of the vispy console. Note how the console size is
9 independent of the canvas scaling.
10 """
11 import sys
12
13 from vispy import scene, app
14 from vispy.scene.widgets import Console
15 from vispy.scene.visuals import Text
16
17 canvas = scene.SceneCanvas(keys='interactive', size=(400, 400))
18 grid = canvas.central_widget.add_grid()
19
20 vb = scene.widgets.ViewBox(border_color='b')
21 vb.camera.rect = -1, -1, 2, 2
22 grid.add_widget(vb, row=0, col=0)
23 text = Text('Starting timer...', color='w', font_size=24, parent=vb.scene)
24
25 console = Console(text_color='g', font_size=12., border_color='g')
26 grid.add_widget(console, row=1, col=0)
27
28
29 def on_timer(event):
30 text.text = 'Tick #%s' % event.iteration
31 if event.iteration > 1 and event.iteration % 10 == 0:
32 console.clear()
33 console.write('Elapsed:\n %s' % event.elapsed)
34 canvas.update()
35
36 timer = app.Timer(2.0, connect=on_timer, start=True)
37
38 console.write('This is a line that will be wrapped automatically by the '
39 'console.\n')
40 console.write('This line will be truncated ....................,\n'
41 'but this next line will survive.\n', wrap=False)
42
43 if __name__ == '__main__':
44 canvas.show()
45 if sys.flags.interactive != 1:
46 canvas.app.run()
47
[end of examples/basics/scene/console.py]
[start of examples/basics/scene/grid_large.py]
1 # -*- coding: utf-8 -*-
2 # vispy: testskip # disabled due to segfaults on travis
3 # -----------------------------------------------------------------------------
4 # Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
5 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
6 # -----------------------------------------------------------------------------
7 """
8 Test automatic layout of multiple viewboxes using Grid.
9 """
10
11 import sys
12 from vispy import scene
13 from vispy import app
14 import numpy as np
15
16 canvas = scene.SceneCanvas(keys='interactive')
17 canvas.size = 600, 600
18 canvas.show()
19
20 grid = canvas.central_widget.add_grid()
21
22
23 N = 10000
24 lines = []
25 for i in range(10):
26 lines.append([])
27 for j in range(10):
28 vb = grid.add_view(row=i, col=j)
29 vb.camera.rect = (0, -5), (100, 10)
30 vb.border = (1, 1, 1, 0.4)
31
32 pos = np.empty((N, 2), dtype=np.float32)
33 pos[:, 0] = np.linspace(0, 100, N)
34 pos[:, 1] = np.random.normal(size=N)
35 line = scene.visuals.Line(pos=pos, color=(1, 1, 1, 0.5), mode='gl')
36 vb.add(line)
37
38
39 if __name__ == '__main__' and sys.flags.interactive == 0:
40 app.run()
41
[end of examples/basics/scene/grid_large.py]
[start of examples/basics/scene/grid.py]
1 # -*- coding: utf-8 -*-
2 # vispy: gallery 30
3 # -----------------------------------------------------------------------------
4 # Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
5 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
6 # -----------------------------------------------------------------------------
7 """
8 Test automatic layout of multiple viewboxes using Grid.
9 """
10 import sys
11 import numpy as np
12
13 from vispy import scene, app
14
15 canvas = scene.SceneCanvas(keys='interactive')
16 canvas.size = 600, 600
17 canvas.show()
18
19 # This is the top-level widget that will hold three ViewBoxes, which will
20 # be automatically resized whenever the grid is resized.
21 grid = canvas.central_widget.add_grid()
22
23
24 # Add 3 ViewBoxes to the grid
25 b1 = grid.add_view(row=0, col=0, col_span=2)
26 b1.border_color = (0.5, 0.5, 0.5, 1)
27 b1.camera = scene.PanZoomCamera(rect=(-0.5, -5, 11, 10))
28 b1.border = (1, 0, 0, 1)
29
30 b2 = grid.add_view(row=1, col=0)
31 b2.border_color = (0.5, 0.5, 0.5, 1)
32 b2.camera = scene.PanZoomCamera(rect=(-10, -5, 15, 10))
33 b2.border = (1, 0, 0, 1)
34
35 b3 = grid.add_view(row=1, col=1)
36 b3.border_color = (0.5, 0.5, 0.5, 1)
37 b3.camera = scene.PanZoomCamera(rect=(-5, -5, 10, 10))
38 b3.border = (1, 0, 0, 1)
39
40
41 # Generate some random vertex data and a color gradient
42 N = 10000
43 pos = np.empty((N, 2), dtype=np.float32)
44 pos[:, 0] = np.linspace(0, 10, N)
45 pos[:, 1] = np.random.normal(size=N)
46 pos[5000, 1] += 50
47
48 color = np.ones((N, 4), dtype=np.float32)
49 color[:, 0] = np.linspace(0, 1, N)
50 color[:, 1] = color[::-1, 0]
51
52 # Top grid cell shows plot data in a rectangular coordinate system.
53 l1 = scene.visuals.Line(pos=pos, color=color, antialias=False, mode='gl')
54 b1.add(l1)
55 grid1 = scene.visuals.GridLines(parent=b1.scene)
56
57 # Bottom-left grid cell shows the same data with log-transformed X
58 e2 = scene.Node(parent=b2.scene)
59 e2.transform = scene.transforms.LogTransform(base=(2, 0, 0))
60 l2 = scene.visuals.Line(pos=pos, color=color, antialias=False, parent=e2,
61 mode='gl')
62 grid2 = scene.visuals.GridLines(parent=e2)
63
64 # Bottom-right grid cell shows the same data again, but with a much more
65 # interesting transformation.
66 e3 = scene.Node(parent=b3.scene)
67 affine = scene.transforms.AffineTransform()
68 affine.scale((1, 0.1))
69 affine.rotate(10, (0, 0, 1))
70 affine.translate((0, 1))
71 e3.transform = scene.transforms.ChainTransform([
72 scene.transforms.PolarTransform(),
73 affine])
74 l3 = scene.visuals.Line(pos=pos, color=color, antialias=False, parent=e3,
75 mode='gl')
76 grid3 = scene.visuals.GridLines(scale=(np.pi/6., 1.0), parent=e3)
77
78 if __name__ == '__main__' and sys.flags.interactive == 0:
79 app.run()
80
[end of examples/basics/scene/grid.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/basics/scene/console.py b/examples/basics/scene/console.py
--- a/examples/basics/scene/console.py
+++ b/examples/basics/scene/console.py
@@ -18,6 +18,7 @@
grid = canvas.central_widget.add_grid()
vb = scene.widgets.ViewBox(border_color='b')
+vb.camera = 'panzoom'
vb.camera.rect = -1, -1, 2, 2
grid.add_widget(vb, row=0, col=0)
text = Text('Starting timer...', color='w', font_size=24, parent=vb.scene)
diff --git a/examples/basics/scene/grid.py b/examples/basics/scene/grid.py
--- a/examples/basics/scene/grid.py
+++ b/examples/basics/scene/grid.py
@@ -23,16 +23,19 @@
# Add 3 ViewBoxes to the grid
b1 = grid.add_view(row=0, col=0, col_span=2)
+b1.camera = 'panzoom'
b1.border_color = (0.5, 0.5, 0.5, 1)
b1.camera = scene.PanZoomCamera(rect=(-0.5, -5, 11, 10))
b1.border = (1, 0, 0, 1)
b2 = grid.add_view(row=1, col=0)
+b2.camera = 'panzoom'
b2.border_color = (0.5, 0.5, 0.5, 1)
b2.camera = scene.PanZoomCamera(rect=(-10, -5, 15, 10))
b2.border = (1, 0, 0, 1)
b3 = grid.add_view(row=1, col=1)
+b3.camera = 'panzoom'
b3.border_color = (0.5, 0.5, 0.5, 1)
b3.camera = scene.PanZoomCamera(rect=(-5, -5, 10, 10))
b3.border = (1, 0, 0, 1)
diff --git a/examples/basics/scene/grid_large.py b/examples/basics/scene/grid_large.py
--- a/examples/basics/scene/grid_large.py
+++ b/examples/basics/scene/grid_large.py
@@ -26,6 +26,7 @@
lines.append([])
for j in range(10):
vb = grid.add_view(row=i, col=j)
+ vb.camera = 'panzoom'
vb.camera.rect = (0, -5), (100, 10)
vb.border = (1, 1, 1, 0.4)
| {"golden_diff": "diff --git a/examples/basics/scene/console.py b/examples/basics/scene/console.py\n--- a/examples/basics/scene/console.py\n+++ b/examples/basics/scene/console.py\n@@ -18,6 +18,7 @@\n grid = canvas.central_widget.add_grid()\n \n vb = scene.widgets.ViewBox(border_color='b')\n+vb.camera = 'panzoom'\n vb.camera.rect = -1, -1, 2, 2\n grid.add_widget(vb, row=0, col=0)\n text = Text('Starting timer...', color='w', font_size=24, parent=vb.scene)\ndiff --git a/examples/basics/scene/grid.py b/examples/basics/scene/grid.py\n--- a/examples/basics/scene/grid.py\n+++ b/examples/basics/scene/grid.py\n@@ -23,16 +23,19 @@\n \n # Add 3 ViewBoxes to the grid\n b1 = grid.add_view(row=0, col=0, col_span=2)\n+b1.camera = 'panzoom'\n b1.border_color = (0.5, 0.5, 0.5, 1)\n b1.camera = scene.PanZoomCamera(rect=(-0.5, -5, 11, 10))\n b1.border = (1, 0, 0, 1)\n \n b2 = grid.add_view(row=1, col=0)\n+b2.camera = 'panzoom'\n b2.border_color = (0.5, 0.5, 0.5, 1)\n b2.camera = scene.PanZoomCamera(rect=(-10, -5, 15, 10))\n b2.border = (1, 0, 0, 1)\n \n b3 = grid.add_view(row=1, col=1)\n+b3.camera = 'panzoom'\n b3.border_color = (0.5, 0.5, 0.5, 1)\n b3.camera = scene.PanZoomCamera(rect=(-5, -5, 10, 10))\n b3.border = (1, 0, 0, 1)\ndiff --git a/examples/basics/scene/grid_large.py b/examples/basics/scene/grid_large.py\n--- a/examples/basics/scene/grid_large.py\n+++ b/examples/basics/scene/grid_large.py\n@@ -26,6 +26,7 @@\n lines.append([])\n for j in range(10):\n vb = grid.add_view(row=i, col=j)\n+ vb.camera = 'panzoom'\n vb.camera.rect = (0, -5), (100, 10)\n vb.border = (1, 1, 1, 0.4)\n", "issue": "Text is misplaced in scene/console example\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# vispy: gallery 30\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nDemonstrate the use of the vispy console. Note how the console size is\nindependent of the canvas scaling.\n\"\"\"\nimport sys\n\nfrom vispy import scene, app\nfrom vispy.scene.widgets import Console\nfrom vispy.scene.visuals import Text\n\ncanvas = scene.SceneCanvas(keys='interactive', size=(400, 400))\ngrid = canvas.central_widget.add_grid()\n\nvb = scene.widgets.ViewBox(border_color='b')\nvb.camera.rect = -1, -1, 2, 2\ngrid.add_widget(vb, row=0, col=0)\ntext = Text('Starting timer...', color='w', font_size=24, parent=vb.scene)\n\nconsole = Console(text_color='g', font_size=12., border_color='g')\ngrid.add_widget(console, row=1, col=0)\n\n\ndef on_timer(event):\n text.text = 'Tick #%s' % event.iteration\n if event.iteration > 1 and event.iteration % 10 == 0:\n console.clear()\n console.write('Elapsed:\\n %s' % event.elapsed)\n canvas.update()\n\ntimer = app.Timer(2.0, connect=on_timer, start=True)\n\nconsole.write('This is a line that will be wrapped automatically by the '\n 'console.\\n')\nconsole.write('This line will be truncated ....................,\\n'\n 'but this next line will survive.\\n', wrap=False)\n\nif __name__ == '__main__':\n canvas.show()\n if sys.flags.interactive != 1:\n canvas.app.run()\n", "path": "examples/basics/scene/console.py"}, {"content": "# -*- coding: utf-8 -*-\n# vispy: testskip # disabled due to segfaults on travis\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nTest automatic layout of multiple viewboxes using Grid.\n\"\"\"\n\nimport sys\nfrom vispy import scene\nfrom vispy import app\nimport numpy as np\n\ncanvas = scene.SceneCanvas(keys='interactive')\ncanvas.size = 600, 600\ncanvas.show()\n\ngrid = canvas.central_widget.add_grid()\n\n\nN = 10000\nlines = []\nfor i in range(10):\n lines.append([])\n for j in range(10):\n vb = grid.add_view(row=i, col=j)\n vb.camera.rect = (0, -5), (100, 10)\n vb.border = (1, 1, 1, 0.4)\n\n pos = np.empty((N, 2), dtype=np.float32)\n pos[:, 0] = np.linspace(0, 100, N)\n pos[:, 1] = np.random.normal(size=N)\n line = scene.visuals.Line(pos=pos, color=(1, 1, 1, 0.5), mode='gl')\n vb.add(line)\n\n\nif __name__ == '__main__' and sys.flags.interactive == 0:\n app.run()\n", "path": "examples/basics/scene/grid_large.py"}, {"content": "# -*- coding: utf-8 -*-\n# vispy: gallery 30\n# -----------------------------------------------------------------------------\n# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n# -----------------------------------------------------------------------------\n\"\"\"\nTest automatic layout of multiple viewboxes using Grid.\n\"\"\"\nimport sys\nimport numpy as np\n\nfrom vispy import scene, app\n\ncanvas = scene.SceneCanvas(keys='interactive')\ncanvas.size = 600, 600\ncanvas.show()\n\n# This is the top-level widget that will hold three ViewBoxes, which will\n# be automatically resized whenever the grid is resized.\ngrid = canvas.central_widget.add_grid()\n\n\n# Add 3 ViewBoxes to the grid\nb1 = grid.add_view(row=0, col=0, col_span=2)\nb1.border_color = (0.5, 0.5, 0.5, 1)\nb1.camera = scene.PanZoomCamera(rect=(-0.5, -5, 11, 10))\nb1.border = (1, 0, 0, 1)\n\nb2 = grid.add_view(row=1, col=0)\nb2.border_color = (0.5, 0.5, 0.5, 1)\nb2.camera = scene.PanZoomCamera(rect=(-10, -5, 15, 10))\nb2.border = (1, 0, 0, 1)\n\nb3 = grid.add_view(row=1, col=1)\nb3.border_color = (0.5, 0.5, 0.5, 1)\nb3.camera = scene.PanZoomCamera(rect=(-5, -5, 10, 10))\nb3.border = (1, 0, 0, 1)\n\n\n# Generate some random vertex data and a color gradient\nN = 10000\npos = np.empty((N, 2), dtype=np.float32)\npos[:, 0] = np.linspace(0, 10, N)\npos[:, 1] = np.random.normal(size=N)\npos[5000, 1] += 50\n\ncolor = np.ones((N, 4), dtype=np.float32)\ncolor[:, 0] = np.linspace(0, 1, N)\ncolor[:, 1] = color[::-1, 0]\n\n# Top grid cell shows plot data in a rectangular coordinate system.\nl1 = scene.visuals.Line(pos=pos, color=color, antialias=False, mode='gl')\nb1.add(l1)\ngrid1 = scene.visuals.GridLines(parent=b1.scene)\n\n# Bottom-left grid cell shows the same data with log-transformed X\ne2 = scene.Node(parent=b2.scene)\ne2.transform = scene.transforms.LogTransform(base=(2, 0, 0))\nl2 = scene.visuals.Line(pos=pos, color=color, antialias=False, parent=e2,\n mode='gl')\ngrid2 = scene.visuals.GridLines(parent=e2)\n\n# Bottom-right grid cell shows the same data again, but with a much more\n# interesting transformation.\ne3 = scene.Node(parent=b3.scene)\naffine = scene.transforms.AffineTransform()\naffine.scale((1, 0.1))\naffine.rotate(10, (0, 0, 1))\naffine.translate((0, 1))\ne3.transform = scene.transforms.ChainTransform([\n scene.transforms.PolarTransform(),\n affine])\nl3 = scene.visuals.Line(pos=pos, color=color, antialias=False, parent=e3,\n mode='gl')\ngrid3 = scene.visuals.GridLines(scale=(np.pi/6., 1.0), parent=e3)\n\nif __name__ == '__main__' and sys.flags.interactive == 0:\n app.run()\n", "path": "examples/basics/scene/grid.py"}]} | 2,516 | 590 |
gh_patches_debug_21854 | rasdani/github-patches | git_diff | pydantic__pydantic-1994 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
data.json file appears after `make docs`
# Step to reproduce
Follow the steps [here](https://pydantic-docs.helpmanual.io/contributing/).
When running `make docs`, a new file `data.json` appears at `/`. This should not appear there, or should already be committed there, or should be excluded by `.gitignore`.
For context, see [this comment](https://github.com/samuelcolvin/pydantic/pull/1987/files#r502764774).
</issue>
<code>
[start of docs/build/exec_examples.py]
1 #!/usr/bin/env python3
2 import importlib
3 import inspect
4 import json
5 import os
6 import re
7 import shutil
8 import subprocess
9 import sys
10 import textwrap
11 import traceback
12 from pathlib import Path
13 from typing import Any, List, Tuple
14 from unittest.mock import patch
15
16 from ansi2html import Ansi2HTMLConverter
17 from devtools import PrettyFormat
18
19 THIS_DIR = Path(__file__).parent
20 DOCS_DIR = (THIS_DIR / '..').resolve()
21 EXAMPLES_DIR = DOCS_DIR / 'examples'
22 TMP_EXAMPLES_DIR = DOCS_DIR / '.tmp_examples'
23 MAX_LINE_LENGTH = int(re.search(r'max_line_length = (\d+)', (EXAMPLES_DIR / '.editorconfig').read_text()).group(1))
24 LONG_LINE = 50
25 pformat = PrettyFormat(simple_cutoff=LONG_LINE)
26
27
28 def to_string(value: Any) -> str:
29 # attempt to build a pretty equivalent of the print output
30 if isinstance(value, (dict, list, tuple, set)):
31 return pformat(value)
32 elif isinstance(value, str) and any(re.fullmatch(r, value, flags=re.DOTALL) for r in ['{".+}', r'\[.+\]']):
33 try:
34 obj = json.loads(value)
35 except ValueError:
36 # not JSON, not a problem
37 pass
38 else:
39 s = json.dumps(obj)
40 if len(s) > LONG_LINE:
41 json.dumps(obj, indent=2)
42 else:
43 return s
44
45 return str(value)
46
47
48 class MockPrint:
49 def __init__(self, file: Path):
50 self.file = file
51 self.statements = []
52
53 def __call__(self, *args, file=None, flush=None):
54 frame = inspect.currentframe().f_back.f_back.f_back
55 if sys.version_info >= (3, 8):
56 frame = frame.f_back
57 if not self.file.samefile(frame.f_code.co_filename):
58 # happens when index_error.py imports index_main.py
59 return
60 s = ' '.join(map(to_string, args))
61
62 self.statements.append((frame.f_lineno, s))
63
64
65 def build_print_lines(s: str, max_len_reduction: int = 0):
66 print_lines = []
67 max_len = MAX_LINE_LENGTH - 3 - max_len_reduction
68 for line in s.split('\n'):
69 if len(line) > max_len:
70 print_lines += textwrap.wrap(line, width=max_len)
71 else:
72 print_lines.append(line)
73 return print_lines
74
75
76 def build_print_statement(line_no: int, s: str, lines: List[str]) -> None:
77 indent = ''
78 for back in range(1, 100):
79 m = re.search(r'^( *)print\(', lines[line_no - back])
80 if m:
81 indent = m.group(1)
82 break
83 print_lines = build_print_lines(s, len(indent))
84
85 if len(print_lines) > 2:
86 text = textwrap.indent('"""\n{}\n"""'.format('\n'.join(print_lines)), indent)
87 else:
88 text = '\n'.join(f'{indent}#> {line}' for line in print_lines)
89 lines.insert(line_no, text)
90
91
92 def all_md_contents() -> str:
93 file_contents = []
94 for f in DOCS_DIR.glob('**/*.md'):
95 file_contents.append(f.read_text())
96 return '\n\n\n'.join(file_contents)
97
98
99 def gen_ansi_output():
100
101 conv = Ansi2HTMLConverter()
102
103 input_file = EXAMPLES_DIR / 'devtools_main.py'
104 os.environ['PY_DEVTOOLS_HIGHLIGHT'] = 'true'
105 p = subprocess.run((sys.executable, str(input_file)), stdout=subprocess.PIPE, check=True, encoding='utf8')
106 html = conv.convert(p.stdout, full=False).strip('\r\n')
107 full_html = f'<div class="terminal">\n<pre class="terminal-content">\n{html}\n</pre>\n</div>'
108 path = TMP_EXAMPLES_DIR / f'{input_file.stem}.html'
109 path.write_text(full_html)
110 print(f'generated ansi output to {path}')
111
112
113 dont_execute_re = re.compile(r'^# dont-execute\n', flags=re.M | re.I)
114 required_py_re = re.compile(r'^# *requires *python *(\d+).(\d+)', flags=re.M)
115
116
117 def should_execute(file_name: str, file_text: str) -> Tuple[str, bool]:
118 if dont_execute_re.search(file_text):
119 return dont_execute_re.sub('', file_text), False
120 m = required_py_re.search(file_text)
121 if m:
122 if sys.version_info >= tuple(int(v) for v in m.groups()):
123 return required_py_re.sub('', file_text), True
124 else:
125 v = '.'.join(m.groups())
126 print(f'WARNING: {file_name} requires python {v}, not running')
127 return required_py_re.sub(f'# requires python {v}, NOT EXECUTED!', file_text), False
128 else:
129 return file_text, True
130
131
132 def exec_examples():
133 errors = []
134 all_md = all_md_contents()
135 new_files = {}
136 os.environ.update({'my_auth_key': 'xxx', 'my_api_key': 'xxx'})
137
138 sys.path.append(str(EXAMPLES_DIR))
139 for file in sorted(EXAMPLES_DIR.iterdir()):
140
141 def error(desc: str):
142 errors.append((file, desc))
143 sys.stderr.write(f'error in {file.name}: {desc}\n')
144
145 if not file.is_file():
146 # __pycache__, maybe others
147 continue
148
149 if file.suffix != '.py':
150 # just copy
151 new_files[file.name] = file.read_text()
152 continue
153
154 if f'{{!.tmp_examples/{file.name}!}}' not in all_md:
155 error('file not used anywhere')
156
157 file_text = file.read_text('utf-8')
158 if '\n\n\n\n' in file_text:
159 error('too many new lines')
160 if not file_text.endswith('\n'):
161 error('no trailing new line')
162 if re.search('^ *# *>', file_text, flags=re.M):
163 error('contains comments with print output, please remove')
164
165 file_text, execute = should_execute(file.name, file_text)
166 if execute:
167 no_print_intercept_re = re.compile(r'^# no-print-intercept\n', flags=re.M)
168 print_intercept = not bool(no_print_intercept_re.search(file_text))
169 if not print_intercept:
170 file_text = no_print_intercept_re.sub('', file_text)
171
172 if file.stem in sys.modules:
173 del sys.modules[file.stem]
174 mp = MockPrint(file)
175 mod = None
176 with patch('builtins.print') as mock_print:
177 if print_intercept:
178 mock_print.side_effect = mp
179 try:
180 mod = importlib.import_module(file.stem)
181 except Exception:
182 tb = traceback.format_exception(*sys.exc_info())
183 error(''.join(e for e in tb if '/pydantic/docs/examples/' in e or not e.startswith(' File ')))
184
185 if mod and not mod.__file__.startswith(str(EXAMPLES_DIR)):
186 error(f'module path "{mod.__file__}" not inside "{EXAMPLES_DIR}", name may shadow another module?')
187
188 lines = file_text.split('\n')
189
190 to_json_line = '# output-json'
191 if to_json_line in lines:
192 lines = [line for line in lines if line != to_json_line]
193 if len(mp.statements) != 1:
194 error('should have exactly one print statement')
195 print_lines = build_print_lines(mp.statements[0][1])
196 new_files[file.stem + '.json'] = '\n'.join(print_lines) + '\n'
197 else:
198 for line_no, print_string in reversed(mp.statements):
199 build_print_statement(line_no, print_string, lines)
200 else:
201 lines = file_text.split('\n')
202
203 try:
204 ignore_above = lines.index('# ignore-above')
205 except ValueError:
206 pass
207 else:
208 lines = lines[ignore_above + 1 :]
209
210 lines = '\n'.join(lines).split('\n')
211 if any(len(l) > MAX_LINE_LENGTH for l in lines):
212 error(f'lines longer than {MAX_LINE_LENGTH} characters')
213
214 new_files[file.name] = '\n'.join(lines)
215
216 if errors:
217 print(f'\n{len(errors)} errors, not writing files\n')
218 return 1
219
220 if TMP_EXAMPLES_DIR.exists():
221 shutil.rmtree(TMP_EXAMPLES_DIR)
222
223 print(f'writing {len(new_files)} example files to {TMP_EXAMPLES_DIR}')
224 TMP_EXAMPLES_DIR.mkdir()
225 for file_name, content in new_files.items():
226 (TMP_EXAMPLES_DIR / file_name).write_text(content, 'utf-8')
227 gen_ansi_output()
228 return 0
229
230
231 if __name__ == '__main__':
232 sys.exit(exec_examples())
233
[end of docs/build/exec_examples.py]
[start of docs/examples/models_parse.py]
1 import pickle
2 from datetime import datetime
3 from pydantic import BaseModel, ValidationError
4
5
6 class User(BaseModel):
7 id: int
8 name = 'John Doe'
9 signup_ts: datetime = None
10
11
12 m = User.parse_obj({'id': 123, 'name': 'James'})
13 print(m)
14
15 try:
16 User.parse_obj(['not', 'a', 'dict'])
17 except ValidationError as e:
18 print(e)
19
20 # assumes json as no content type passed
21 m = User.parse_raw('{"id": 123, "name": "James"}')
22 print(m)
23
24 pickle_data = pickle.dumps({
25 'id': 123,
26 'name': 'James',
27 'signup_ts': datetime(2017, 7, 14)
28 })
29 m = User.parse_raw(
30 pickle_data, content_type='application/pickle', allow_pickle=True
31 )
32 print(m)
33
34 path = 'data.json'
35 with open(path, 'w') as f:
36 f.write('{"id": 123, "name": "James"}')
37 m = User.parse_file(path)
38 print(m)
39
[end of docs/examples/models_parse.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/build/exec_examples.py b/docs/build/exec_examples.py
--- a/docs/build/exec_examples.py
+++ b/docs/build/exec_examples.py
@@ -207,6 +207,13 @@
else:
lines = lines[ignore_above + 1 :]
+ try:
+ ignore_below = lines.index('# ignore-below')
+ except ValueError:
+ pass
+ else:
+ lines = lines[:ignore_below]
+
lines = '\n'.join(lines).split('\n')
if any(len(l) > MAX_LINE_LENGTH for l in lines):
error(f'lines longer than {MAX_LINE_LENGTH} characters')
diff --git a/docs/examples/models_parse.py b/docs/examples/models_parse.py
--- a/docs/examples/models_parse.py
+++ b/docs/examples/models_parse.py
@@ -1,5 +1,7 @@
import pickle
from datetime import datetime
+from pathlib import Path
+
from pydantic import BaseModel, ValidationError
@@ -31,8 +33,10 @@
)
print(m)
-path = 'data.json'
-with open(path, 'w') as f:
- f.write('{"id": 123, "name": "James"}')
+path = Path('data.json')
+path.write_text('{"id": 123, "name": "James"}')
m = User.parse_file(path)
print(m)
+# ignore-below
+if path.exists():
+ path.unlink()
| {"golden_diff": "diff --git a/docs/build/exec_examples.py b/docs/build/exec_examples.py\n--- a/docs/build/exec_examples.py\n+++ b/docs/build/exec_examples.py\n@@ -207,6 +207,13 @@\n else:\n lines = lines[ignore_above + 1 :]\n \n+ try:\n+ ignore_below = lines.index('# ignore-below')\n+ except ValueError:\n+ pass\n+ else:\n+ lines = lines[:ignore_below]\n+\n lines = '\\n'.join(lines).split('\\n')\n if any(len(l) > MAX_LINE_LENGTH for l in lines):\n error(f'lines longer than {MAX_LINE_LENGTH} characters')\ndiff --git a/docs/examples/models_parse.py b/docs/examples/models_parse.py\n--- a/docs/examples/models_parse.py\n+++ b/docs/examples/models_parse.py\n@@ -1,5 +1,7 @@\n import pickle\n from datetime import datetime\n+from pathlib import Path\n+\n from pydantic import BaseModel, ValidationError\n \n \n@@ -31,8 +33,10 @@\n )\n print(m)\n \n-path = 'data.json'\n-with open(path, 'w') as f:\n- f.write('{\"id\": 123, \"name\": \"James\"}')\n+path = Path('data.json')\n+path.write_text('{\"id\": 123, \"name\": \"James\"}')\n m = User.parse_file(path)\n print(m)\n+# ignore-below\n+if path.exists():\n+ path.unlink()\n", "issue": "data.json file appears after `make docs`\n# Step to reproduce\r\n\r\nFollow the steps [here](https://pydantic-docs.helpmanual.io/contributing/).\r\n\r\nWhen running `make docs`, a new file `data.json` appears at `/`. This should not appear there, or should already be committed there, or should be excluded by `.gitignore`.\r\n\r\nFor context, see [this comment](https://github.com/samuelcolvin/pydantic/pull/1987/files#r502764774).\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport importlib\nimport inspect\nimport json\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport textwrap\nimport traceback\nfrom pathlib import Path\nfrom typing import Any, List, Tuple\nfrom unittest.mock import patch\n\nfrom ansi2html import Ansi2HTMLConverter\nfrom devtools import PrettyFormat\n\nTHIS_DIR = Path(__file__).parent\nDOCS_DIR = (THIS_DIR / '..').resolve()\nEXAMPLES_DIR = DOCS_DIR / 'examples'\nTMP_EXAMPLES_DIR = DOCS_DIR / '.tmp_examples'\nMAX_LINE_LENGTH = int(re.search(r'max_line_length = (\\d+)', (EXAMPLES_DIR / '.editorconfig').read_text()).group(1))\nLONG_LINE = 50\npformat = PrettyFormat(simple_cutoff=LONG_LINE)\n\n\ndef to_string(value: Any) -> str:\n # attempt to build a pretty equivalent of the print output\n if isinstance(value, (dict, list, tuple, set)):\n return pformat(value)\n elif isinstance(value, str) and any(re.fullmatch(r, value, flags=re.DOTALL) for r in ['{\".+}', r'\\[.+\\]']):\n try:\n obj = json.loads(value)\n except ValueError:\n # not JSON, not a problem\n pass\n else:\n s = json.dumps(obj)\n if len(s) > LONG_LINE:\n json.dumps(obj, indent=2)\n else:\n return s\n\n return str(value)\n\n\nclass MockPrint:\n def __init__(self, file: Path):\n self.file = file\n self.statements = []\n\n def __call__(self, *args, file=None, flush=None):\n frame = inspect.currentframe().f_back.f_back.f_back\n if sys.version_info >= (3, 8):\n frame = frame.f_back\n if not self.file.samefile(frame.f_code.co_filename):\n # happens when index_error.py imports index_main.py\n return\n s = ' '.join(map(to_string, args))\n\n self.statements.append((frame.f_lineno, s))\n\n\ndef build_print_lines(s: str, max_len_reduction: int = 0):\n print_lines = []\n max_len = MAX_LINE_LENGTH - 3 - max_len_reduction\n for line in s.split('\\n'):\n if len(line) > max_len:\n print_lines += textwrap.wrap(line, width=max_len)\n else:\n print_lines.append(line)\n return print_lines\n\n\ndef build_print_statement(line_no: int, s: str, lines: List[str]) -> None:\n indent = ''\n for back in range(1, 100):\n m = re.search(r'^( *)print\\(', lines[line_no - back])\n if m:\n indent = m.group(1)\n break\n print_lines = build_print_lines(s, len(indent))\n\n if len(print_lines) > 2:\n text = textwrap.indent('\"\"\"\\n{}\\n\"\"\"'.format('\\n'.join(print_lines)), indent)\n else:\n text = '\\n'.join(f'{indent}#> {line}' for line in print_lines)\n lines.insert(line_no, text)\n\n\ndef all_md_contents() -> str:\n file_contents = []\n for f in DOCS_DIR.glob('**/*.md'):\n file_contents.append(f.read_text())\n return '\\n\\n\\n'.join(file_contents)\n\n\ndef gen_ansi_output():\n\n conv = Ansi2HTMLConverter()\n\n input_file = EXAMPLES_DIR / 'devtools_main.py'\n os.environ['PY_DEVTOOLS_HIGHLIGHT'] = 'true'\n p = subprocess.run((sys.executable, str(input_file)), stdout=subprocess.PIPE, check=True, encoding='utf8')\n html = conv.convert(p.stdout, full=False).strip('\\r\\n')\n full_html = f'<div class=\"terminal\">\\n<pre class=\"terminal-content\">\\n{html}\\n</pre>\\n</div>'\n path = TMP_EXAMPLES_DIR / f'{input_file.stem}.html'\n path.write_text(full_html)\n print(f'generated ansi output to {path}')\n\n\ndont_execute_re = re.compile(r'^# dont-execute\\n', flags=re.M | re.I)\nrequired_py_re = re.compile(r'^# *requires *python *(\\d+).(\\d+)', flags=re.M)\n\n\ndef should_execute(file_name: str, file_text: str) -> Tuple[str, bool]:\n if dont_execute_re.search(file_text):\n return dont_execute_re.sub('', file_text), False\n m = required_py_re.search(file_text)\n if m:\n if sys.version_info >= tuple(int(v) for v in m.groups()):\n return required_py_re.sub('', file_text), True\n else:\n v = '.'.join(m.groups())\n print(f'WARNING: {file_name} requires python {v}, not running')\n return required_py_re.sub(f'# requires python {v}, NOT EXECUTED!', file_text), False\n else:\n return file_text, True\n\n\ndef exec_examples():\n errors = []\n all_md = all_md_contents()\n new_files = {}\n os.environ.update({'my_auth_key': 'xxx', 'my_api_key': 'xxx'})\n\n sys.path.append(str(EXAMPLES_DIR))\n for file in sorted(EXAMPLES_DIR.iterdir()):\n\n def error(desc: str):\n errors.append((file, desc))\n sys.stderr.write(f'error in {file.name}: {desc}\\n')\n\n if not file.is_file():\n # __pycache__, maybe others\n continue\n\n if file.suffix != '.py':\n # just copy\n new_files[file.name] = file.read_text()\n continue\n\n if f'{{!.tmp_examples/{file.name}!}}' not in all_md:\n error('file not used anywhere')\n\n file_text = file.read_text('utf-8')\n if '\\n\\n\\n\\n' in file_text:\n error('too many new lines')\n if not file_text.endswith('\\n'):\n error('no trailing new line')\n if re.search('^ *# *>', file_text, flags=re.M):\n error('contains comments with print output, please remove')\n\n file_text, execute = should_execute(file.name, file_text)\n if execute:\n no_print_intercept_re = re.compile(r'^# no-print-intercept\\n', flags=re.M)\n print_intercept = not bool(no_print_intercept_re.search(file_text))\n if not print_intercept:\n file_text = no_print_intercept_re.sub('', file_text)\n\n if file.stem in sys.modules:\n del sys.modules[file.stem]\n mp = MockPrint(file)\n mod = None\n with patch('builtins.print') as mock_print:\n if print_intercept:\n mock_print.side_effect = mp\n try:\n mod = importlib.import_module(file.stem)\n except Exception:\n tb = traceback.format_exception(*sys.exc_info())\n error(''.join(e for e in tb if '/pydantic/docs/examples/' in e or not e.startswith(' File ')))\n\n if mod and not mod.__file__.startswith(str(EXAMPLES_DIR)):\n error(f'module path \"{mod.__file__}\" not inside \"{EXAMPLES_DIR}\", name may shadow another module?')\n\n lines = file_text.split('\\n')\n\n to_json_line = '# output-json'\n if to_json_line in lines:\n lines = [line for line in lines if line != to_json_line]\n if len(mp.statements) != 1:\n error('should have exactly one print statement')\n print_lines = build_print_lines(mp.statements[0][1])\n new_files[file.stem + '.json'] = '\\n'.join(print_lines) + '\\n'\n else:\n for line_no, print_string in reversed(mp.statements):\n build_print_statement(line_no, print_string, lines)\n else:\n lines = file_text.split('\\n')\n\n try:\n ignore_above = lines.index('# ignore-above')\n except ValueError:\n pass\n else:\n lines = lines[ignore_above + 1 :]\n\n lines = '\\n'.join(lines).split('\\n')\n if any(len(l) > MAX_LINE_LENGTH for l in lines):\n error(f'lines longer than {MAX_LINE_LENGTH} characters')\n\n new_files[file.name] = '\\n'.join(lines)\n\n if errors:\n print(f'\\n{len(errors)} errors, not writing files\\n')\n return 1\n\n if TMP_EXAMPLES_DIR.exists():\n shutil.rmtree(TMP_EXAMPLES_DIR)\n\n print(f'writing {len(new_files)} example files to {TMP_EXAMPLES_DIR}')\n TMP_EXAMPLES_DIR.mkdir()\n for file_name, content in new_files.items():\n (TMP_EXAMPLES_DIR / file_name).write_text(content, 'utf-8')\n gen_ansi_output()\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(exec_examples())\n", "path": "docs/build/exec_examples.py"}, {"content": "import pickle\nfrom datetime import datetime\nfrom pydantic import BaseModel, ValidationError\n\n\nclass User(BaseModel):\n id: int\n name = 'John Doe'\n signup_ts: datetime = None\n\n\nm = User.parse_obj({'id': 123, 'name': 'James'})\nprint(m)\n\ntry:\n User.parse_obj(['not', 'a', 'dict'])\nexcept ValidationError as e:\n print(e)\n\n# assumes json as no content type passed\nm = User.parse_raw('{\"id\": 123, \"name\": \"James\"}')\nprint(m)\n\npickle_data = pickle.dumps({\n 'id': 123,\n 'name': 'James',\n 'signup_ts': datetime(2017, 7, 14)\n})\nm = User.parse_raw(\n pickle_data, content_type='application/pickle', allow_pickle=True\n)\nprint(m)\n\npath = 'data.json'\nwith open(path, 'w') as f:\n f.write('{\"id\": 123, \"name\": \"James\"}')\nm = User.parse_file(path)\nprint(m)\n", "path": "docs/examples/models_parse.py"}]} | 3,516 | 317 |
gh_patches_debug_24602 | rasdani/github-patches | git_diff | enthought__chaco-215 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ValueError when using PanTool with restrict_to_data and empty data source
This can come up when a plot's data sources contain empty arrays. None-empty data sources may be visible on screen, and the user may wish to pan based on that view. The empty data sources should not keep this from happening. Here's a minimal example.
``` python
import numpy as np
from chaco.array_plot_data import ArrayPlotData
from chaco.plot import Plot
from chaco.tools.pan_tool import PanTool
plot_data = ArrayPlotData()
plot = Plot(plot_data)
arr = np.arange(4.0)
plot_data.set_data("x", arr)
plot_data.set_data("y", arr)
plot_data.set_data("z", np.array([], np.float64)
plot.plot(('x', 'y'))
plot.plot(('z', 'z'))
tool = PanTool(plot, restrict_to_data=True)
plot.tools.append(tool)
```
</issue>
<code>
[start of chaco/tools/pan_tool.py]
1 """ Defines the PanTool class.
2 """
3
4 from numpy import inf
5
6 # Enthought library imports
7 from enable.api import BaseTool, Pointer, KeySpec
8 from traits.api import Bool, Enum, Float, Tuple, Instance
9
10
11 class PanTool(BaseTool):
12 """ A tool that enables the user to pan a plot by clicking a mouse
13 button and dragging.
14 """
15
16 # The mouse button that initiates the drag operation.
17 drag_button = Enum("left", "middle", "right")
18
19 # The cursor to use when panning.
20 drag_pointer = Pointer("hand")
21
22 # Scaling factor on the panning "speed".
23 speed = Float(1.0)
24
25 # The modifier key that, if depressed when the drag is initiated, constrains
26 # the panning to happen in the only direction of largest initial motion.
27 # It is possible to permanently restrict this tool to always drag along one
28 # direction. To do so, set constrain=True, constrain_key=None, and
29 # constrain_direction to the desired direction.
30 constrain_key = Enum(None, "shift", "control", "alt")
31
32 # Keys to Pan via keyboard
33 pan_right_key = Instance(KeySpec, args=("Right",))
34 pan_left_key = Instance(KeySpec, args=("Left",))
35 pan_up_key = Instance(KeySpec, args=("Up",))
36 pan_down_key = Instance(KeySpec, args=("Down",))
37
38 # number of pixels the keys should pan
39 # disabled if 0.0
40 pan_keys_step = Float(0.0)
41
42 # Constrain the panning to one direction?
43 constrain = Bool(False)
44
45 # The direction of constrained draw. A value of None means that the user
46 # has initiated the drag and pressed the constrain_key, but hasn't moved
47 # the mouse yet; the magnitude of the components of the next mouse_move
48 # event will determine the constrain_direction.
49 constrain_direction = Enum(None, "x", "y")
50
51 # Restrict to the bounds of the plot data
52 restrict_to_data = Bool(False)
53
54 # (x,y) of the point where the mouse button was pressed.
55 _original_xy = Tuple
56
57 # Data coordinates of **_original_xy**. This may be either (index,value)
58 # or (value,index) depending on the component's orientation.
59 _original_data = Tuple
60
61 # Was constrain=True triggered by the **contrain_key**? If False, it was
62 # set programmatically.
63 _auto_constrain = Bool(False)
64
65
66 #------------------------------------------------------------------------
67 # Inherited BaseTool traits
68 #------------------------------------------------------------------------
69
70 # The tool does not have a visual representation (overrides
71 # BaseTool).
72 draw_mode = "none"
73
74 # The tool is not visible (overrides BaseTool).
75 visible = False
76
77 # The possible event states of this tool (overrides enable.Interactor).
78 event_state = Enum("normal", "panning")
79
80 def normal_key_pressed(self, event):
81 """ Handles a key being pressed when the tool is in the 'normal'
82 state.
83 """
84 if self.pan_keys_step == 0.0:
85 return
86 src = self.component.bounds[0]/2, self.component.bounds[1]/2
87 dest = src
88 if self.pan_left_key.match(event):
89 dest = (src[0] - self.pan_keys_step,
90 src[1])
91 elif self.pan_right_key.match(event):
92 dest = (src[0] + self.pan_keys_step,
93 src[1])
94 elif self.pan_down_key.match(event):
95 dest = (src[0],
96 src[1] - self.pan_keys_step)
97 elif self.pan_up_key.match(event):
98 dest = (src[0],
99 src[1] + self.pan_keys_step)
100 if src != dest:
101 self._original_xy = src
102 event.x = dest[0]
103 event.y = dest[1]
104 self.panning_mouse_move(event)
105 return
106
107 def normal_left_down(self, event):
108 """ Handles the left mouse button being pressed when the tool is in
109 the 'normal' state.
110
111 Starts panning if the left mouse button is the drag button.
112 """
113 if self.drag_button == "left":
114 self._start_pan(event)
115 return
116
117 def normal_right_down(self, event):
118 """ Handles the right mouse button being pressed when the tool is in
119 the 'normal' state.
120
121 Starts panning if the right mouse button is the drag button.
122 """
123 if self.drag_button == "right":
124 self._start_pan(event)
125 return
126
127 def normal_middle_down(self, event):
128 """ Handles the middle mouse button being pressed when the tool is in
129 the 'normal' state.
130
131 Starts panning if the middle mouse button is the drag button.
132 """
133 if self.drag_button == "middle":
134 self._start_pan(event)
135 return
136
137 def panning_left_up(self, event):
138 """ Handles the left mouse button coming up when the tool is in the
139 'panning' state.
140
141 Stops panning if the left mouse button is the drag button.
142 """
143 if self.drag_button == "left":
144 self._end_pan(event)
145 return
146
147 def panning_right_up(self, event):
148 """ Handles the right mouse button coming up when the tool is in the
149 'panning' state.
150
151 Stops panning if the right mouse button is the drag button.
152 """
153 if self.drag_button == "right":
154 self._end_pan(event)
155 return
156
157 def panning_middle_up(self, event):
158 """ Handles the middle mouse button coming up when the tool is in the
159 'panning' state.
160
161 Stops panning if the middle mouse button is the drag button.
162 """
163 if self.drag_button == "middle":
164 self._end_pan(event)
165 return
166
167 def panning_mouse_move(self, event):
168 """ Handles the mouse being moved when the tool is in the 'panning'
169 state.
170 """
171 plot = self.component
172
173 if self._auto_constrain and self.constrain_direction is None:
174 # Determine the constraint direction
175 x_orig, y_orig = self._original_xy
176 if abs(event.x - x_orig) > abs(event.y - y_orig):
177 self.constrain_direction = "x"
178 else:
179 self.constrain_direction = "y"
180
181 direction_info = [("x", "width", 0), ("y", "height", 1)]
182 for direction, bound_name, index in direction_info:
183 if not self.constrain or self.constrain_direction == direction:
184 mapper = getattr(plot, direction + "_mapper")
185 domain_min, domain_max = mapper.domain_limits
186 eventpos = getattr(event, direction)
187 origpos = self._original_xy[index]
188
189 screenlow, screenhigh = mapper.screen_bounds
190 screendelta = self.speed * (eventpos - origpos)
191
192 newlow = mapper.map_data(screenlow - screendelta)
193 newhigh = mapper.map_data(screenhigh - screendelta)
194
195 # Don't set the range in this dimension if the panning
196 # would exceed the domain limits.
197 # To do this offset properly, we would need to iteratively
198 # solve for a root using map_data on successive trial
199 # values. As a first approximation, we're just going to
200 # use a linear approximation, which works perfectly for
201 # linear mappers (which is used 99% of the time).
202 if domain_min is None:
203 if self.restrict_to_data:
204 domain_min = min([source.get_data().min()
205 for source in mapper.range.sources])
206 else:
207 domain_min = -inf
208 if domain_max is None:
209 if self.restrict_to_data:
210 domain_max = max([source.get_data().max()
211 for source in mapper.range.sources])
212 else:
213 domain_max = inf
214
215 if (newlow <= domain_min) and (newhigh >= domain_max):
216 # Don't do anything; effectively, freeze the pan
217 continue
218
219 if newlow <= domain_min:
220 newlow = domain_min
221 # Calculate delta in screen space, which is always linear.
222 screen_delta = mapper.map_screen(domain_min) - screenlow
223 newhigh = mapper.map_data(screenhigh + screen_delta)
224 elif newhigh >= domain_max:
225 newhigh = domain_max
226 # Calculate delta in screen space, which is always linear.
227 screen_delta = mapper.map_screen(domain_max) - screenhigh
228 newlow = mapper.map_data(screenlow + screen_delta)
229
230 # Use .set_bounds() so that we don't generate two range_changed
231 # events on the DataRange
232 mapper.range.set_bounds(newlow, newhigh)
233
234 event.handled = True
235
236 self._original_xy = (event.x, event.y)
237 plot.request_redraw()
238 return
239
240 def panning_mouse_leave(self, event):
241 """ Handles the mouse leaving the plot when the tool is in the 'panning'
242 state.
243
244 Ends panning.
245 """
246 return self._end_pan(event)
247
248 def _start_pan(self, event, capture_mouse=True):
249 self._original_xy = (event.x, event.y)
250 if self.constrain_key is not None:
251 if getattr(event, self.constrain_key + "_down"):
252 self.constrain = True
253 self._auto_constrain = True
254 self.constrain_direction = None
255 self.event_state = "panning"
256 if capture_mouse:
257 event.window.set_pointer(self.drag_pointer)
258 event.window.set_mouse_owner(self, event.net_transform())
259 event.handled = True
260 return
261
262 def _end_pan(self, event):
263 if self._auto_constrain:
264 self.constrain = False
265 self.constrain_direction = None
266 self.event_state = "normal"
267 event.window.set_pointer("arrow")
268 if event.window.mouse_owner == self:
269 event.window.set_mouse_owner(None)
270 event.handled = True
271 return
272
273 # EOF
274
[end of chaco/tools/pan_tool.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chaco/tools/pan_tool.py b/chaco/tools/pan_tool.py
--- a/chaco/tools/pan_tool.py
+++ b/chaco/tools/pan_tool.py
@@ -199,16 +199,17 @@
# values. As a first approximation, we're just going to
# use a linear approximation, which works perfectly for
# linear mappers (which is used 99% of the time).
+ data = [arr for arr in
+ (source.get_data() for source in mapper.range.sources)
+ if arr.size > 0]
if domain_min is None:
if self.restrict_to_data:
- domain_min = min([source.get_data().min()
- for source in mapper.range.sources])
+ domain_min = min([arr.min() for arr in data])
else:
domain_min = -inf
if domain_max is None:
if self.restrict_to_data:
- domain_max = max([source.get_data().max()
- for source in mapper.range.sources])
+ domain_max = max([arr.max() for arr in data])
else:
domain_max = inf
| {"golden_diff": "diff --git a/chaco/tools/pan_tool.py b/chaco/tools/pan_tool.py\n--- a/chaco/tools/pan_tool.py\n+++ b/chaco/tools/pan_tool.py\n@@ -199,16 +199,17 @@\n # values. As a first approximation, we're just going to\n # use a linear approximation, which works perfectly for\n # linear mappers (which is used 99% of the time).\n+ data = [arr for arr in\n+ (source.get_data() for source in mapper.range.sources)\n+ if arr.size > 0]\n if domain_min is None:\n if self.restrict_to_data:\n- domain_min = min([source.get_data().min()\n- for source in mapper.range.sources])\n+ domain_min = min([arr.min() for arr in data])\n else:\n domain_min = -inf\n if domain_max is None:\n if self.restrict_to_data:\n- domain_max = max([source.get_data().max()\n- for source in mapper.range.sources])\n+ domain_max = max([arr.max() for arr in data])\n else:\n domain_max = inf\n", "issue": "ValueError when using PanTool with restrict_to_data and empty data source\nThis can come up when a plot's data sources contain empty arrays. None-empty data sources may be visible on screen, and the user may wish to pan based on that view. The empty data sources should not keep this from happening. Here's a minimal example.\n\n``` python\n\nimport numpy as np\n\nfrom chaco.array_plot_data import ArrayPlotData\nfrom chaco.plot import Plot\nfrom chaco.tools.pan_tool import PanTool\n\nplot_data = ArrayPlotData()\nplot = Plot(plot_data)\narr = np.arange(4.0)\nplot_data.set_data(\"x\", arr)\nplot_data.set_data(\"y\", arr)\nplot_data.set_data(\"z\", np.array([], np.float64)\nplot.plot(('x', 'y'))\nplot.plot(('z', 'z'))\ntool = PanTool(plot, restrict_to_data=True)\nplot.tools.append(tool)\n\n```\n\n", "before_files": [{"content": "\"\"\" Defines the PanTool class.\n\"\"\"\n\nfrom numpy import inf\n\n# Enthought library imports\nfrom enable.api import BaseTool, Pointer, KeySpec\nfrom traits.api import Bool, Enum, Float, Tuple, Instance\n\n\nclass PanTool(BaseTool):\n \"\"\" A tool that enables the user to pan a plot by clicking a mouse\n button and dragging.\n \"\"\"\n\n # The mouse button that initiates the drag operation.\n drag_button = Enum(\"left\", \"middle\", \"right\")\n\n # The cursor to use when panning.\n drag_pointer = Pointer(\"hand\")\n\n # Scaling factor on the panning \"speed\".\n speed = Float(1.0)\n\n # The modifier key that, if depressed when the drag is initiated, constrains\n # the panning to happen in the only direction of largest initial motion.\n # It is possible to permanently restrict this tool to always drag along one\n # direction. To do so, set constrain=True, constrain_key=None, and\n # constrain_direction to the desired direction.\n constrain_key = Enum(None, \"shift\", \"control\", \"alt\")\n\n # Keys to Pan via keyboard\n pan_right_key = Instance(KeySpec, args=(\"Right\",))\n pan_left_key = Instance(KeySpec, args=(\"Left\",))\n pan_up_key = Instance(KeySpec, args=(\"Up\",))\n pan_down_key = Instance(KeySpec, args=(\"Down\",))\n\n # number of pixels the keys should pan\n # disabled if 0.0\n pan_keys_step = Float(0.0)\n\n # Constrain the panning to one direction?\n constrain = Bool(False)\n\n # The direction of constrained draw. A value of None means that the user\n # has initiated the drag and pressed the constrain_key, but hasn't moved\n # the mouse yet; the magnitude of the components of the next mouse_move\n # event will determine the constrain_direction.\n constrain_direction = Enum(None, \"x\", \"y\")\n\n # Restrict to the bounds of the plot data\n restrict_to_data = Bool(False)\n\n # (x,y) of the point where the mouse button was pressed.\n _original_xy = Tuple\n\n # Data coordinates of **_original_xy**. This may be either (index,value)\n # or (value,index) depending on the component's orientation.\n _original_data = Tuple\n\n # Was constrain=True triggered by the **contrain_key**? If False, it was\n # set programmatically.\n _auto_constrain = Bool(False)\n\n\n #------------------------------------------------------------------------\n # Inherited BaseTool traits\n #------------------------------------------------------------------------\n\n # The tool does not have a visual representation (overrides\n # BaseTool).\n draw_mode = \"none\"\n\n # The tool is not visible (overrides BaseTool).\n visible = False\n\n # The possible event states of this tool (overrides enable.Interactor).\n event_state = Enum(\"normal\", \"panning\")\n\n def normal_key_pressed(self, event):\n \"\"\" Handles a key being pressed when the tool is in the 'normal'\n state.\n \"\"\"\n if self.pan_keys_step == 0.0:\n return\n src = self.component.bounds[0]/2, self.component.bounds[1]/2\n dest = src\n if self.pan_left_key.match(event):\n dest = (src[0] - self.pan_keys_step,\n src[1])\n elif self.pan_right_key.match(event):\n dest = (src[0] + self.pan_keys_step,\n src[1])\n elif self.pan_down_key.match(event):\n dest = (src[0],\n src[1] - self.pan_keys_step)\n elif self.pan_up_key.match(event):\n dest = (src[0],\n src[1] + self.pan_keys_step)\n if src != dest:\n self._original_xy = src\n event.x = dest[0]\n event.y = dest[1]\n self.panning_mouse_move(event)\n return\n\n def normal_left_down(self, event):\n \"\"\" Handles the left mouse button being pressed when the tool is in\n the 'normal' state.\n\n Starts panning if the left mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"left\":\n self._start_pan(event)\n return\n\n def normal_right_down(self, event):\n \"\"\" Handles the right mouse button being pressed when the tool is in\n the 'normal' state.\n\n Starts panning if the right mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"right\":\n self._start_pan(event)\n return\n\n def normal_middle_down(self, event):\n \"\"\" Handles the middle mouse button being pressed when the tool is in\n the 'normal' state.\n\n Starts panning if the middle mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"middle\":\n self._start_pan(event)\n return\n\n def panning_left_up(self, event):\n \"\"\" Handles the left mouse button coming up when the tool is in the\n 'panning' state.\n\n Stops panning if the left mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"left\":\n self._end_pan(event)\n return\n\n def panning_right_up(self, event):\n \"\"\" Handles the right mouse button coming up when the tool is in the\n 'panning' state.\n\n Stops panning if the right mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"right\":\n self._end_pan(event)\n return\n\n def panning_middle_up(self, event):\n \"\"\" Handles the middle mouse button coming up when the tool is in the\n 'panning' state.\n\n Stops panning if the middle mouse button is the drag button.\n \"\"\"\n if self.drag_button == \"middle\":\n self._end_pan(event)\n return\n\n def panning_mouse_move(self, event):\n \"\"\" Handles the mouse being moved when the tool is in the 'panning'\n state.\n \"\"\"\n plot = self.component\n\n if self._auto_constrain and self.constrain_direction is None:\n # Determine the constraint direction\n x_orig, y_orig = self._original_xy\n if abs(event.x - x_orig) > abs(event.y - y_orig):\n self.constrain_direction = \"x\"\n else:\n self.constrain_direction = \"y\"\n\n direction_info = [(\"x\", \"width\", 0), (\"y\", \"height\", 1)]\n for direction, bound_name, index in direction_info:\n if not self.constrain or self.constrain_direction == direction:\n mapper = getattr(plot, direction + \"_mapper\")\n domain_min, domain_max = mapper.domain_limits\n eventpos = getattr(event, direction)\n origpos = self._original_xy[index]\n\n screenlow, screenhigh = mapper.screen_bounds\n screendelta = self.speed * (eventpos - origpos)\n\n newlow = mapper.map_data(screenlow - screendelta)\n newhigh = mapper.map_data(screenhigh - screendelta)\n\n # Don't set the range in this dimension if the panning\n # would exceed the domain limits.\n # To do this offset properly, we would need to iteratively\n # solve for a root using map_data on successive trial\n # values. As a first approximation, we're just going to\n # use a linear approximation, which works perfectly for\n # linear mappers (which is used 99% of the time).\n if domain_min is None:\n if self.restrict_to_data:\n domain_min = min([source.get_data().min()\n for source in mapper.range.sources])\n else:\n domain_min = -inf\n if domain_max is None:\n if self.restrict_to_data:\n domain_max = max([source.get_data().max()\n for source in mapper.range.sources])\n else:\n domain_max = inf\n\n if (newlow <= domain_min) and (newhigh >= domain_max):\n # Don't do anything; effectively, freeze the pan\n continue\n\n if newlow <= domain_min:\n newlow = domain_min\n # Calculate delta in screen space, which is always linear.\n screen_delta = mapper.map_screen(domain_min) - screenlow\n newhigh = mapper.map_data(screenhigh + screen_delta)\n elif newhigh >= domain_max:\n newhigh = domain_max\n # Calculate delta in screen space, which is always linear.\n screen_delta = mapper.map_screen(domain_max) - screenhigh\n newlow = mapper.map_data(screenlow + screen_delta)\n\n # Use .set_bounds() so that we don't generate two range_changed\n # events on the DataRange\n mapper.range.set_bounds(newlow, newhigh)\n\n event.handled = True\n\n self._original_xy = (event.x, event.y)\n plot.request_redraw()\n return\n\n def panning_mouse_leave(self, event):\n \"\"\" Handles the mouse leaving the plot when the tool is in the 'panning'\n state.\n\n Ends panning.\n \"\"\"\n return self._end_pan(event)\n\n def _start_pan(self, event, capture_mouse=True):\n self._original_xy = (event.x, event.y)\n if self.constrain_key is not None:\n if getattr(event, self.constrain_key + \"_down\"):\n self.constrain = True\n self._auto_constrain = True\n self.constrain_direction = None\n self.event_state = \"panning\"\n if capture_mouse:\n event.window.set_pointer(self.drag_pointer)\n event.window.set_mouse_owner(self, event.net_transform())\n event.handled = True\n return\n\n def _end_pan(self, event):\n if self._auto_constrain:\n self.constrain = False\n self.constrain_direction = None\n self.event_state = \"normal\"\n event.window.set_pointer(\"arrow\")\n if event.window.mouse_owner == self:\n event.window.set_mouse_owner(None)\n event.handled = True\n return\n\n# EOF\n", "path": "chaco/tools/pan_tool.py"}]} | 3,629 | 257 |
gh_patches_debug_33037 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-3884 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checking for new version fails
After doing a crash report:
> There was an error while getting the newest version: Invalid JSON received in reply: Expecting value: line 1 column 1 (char 0)!. Please check for a new version on qutebrowser.org by yourself.
Probably something changed on PyPI?
</issue>
<code>
[start of qutebrowser/misc/httpclient.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """An HTTP client based on QNetworkAccessManager."""
21
22 import functools
23 import urllib.request
24 import urllib.parse
25
26 from PyQt5.QtCore import pyqtSignal, QObject, QTimer
27 from PyQt5.QtNetwork import (QNetworkAccessManager, QNetworkRequest,
28 QNetworkReply)
29
30
31 class HTTPClient(QObject):
32
33 """An HTTP client based on QNetworkAccessManager.
34
35 Intended for APIs, automatically decodes data.
36
37 Attributes:
38 _nam: The QNetworkAccessManager used.
39 _timers: A {QNetworkReply: QTimer} dict.
40
41 Signals:
42 success: Emitted when the operation succeeded.
43 arg: The received data.
44 error: Emitted when the request failed.
45 arg: The error message, as string.
46 """
47
48 success = pyqtSignal(str)
49 error = pyqtSignal(str)
50
51 def __init__(self, parent=None):
52 super().__init__(parent)
53 self._nam = QNetworkAccessManager(self)
54 self._timers = {}
55
56 def post(self, url, data=None):
57 """Create a new POST request.
58
59 Args:
60 url: The URL to post to, as QUrl.
61 data: A dict of data to send.
62 """
63 if data is None:
64 data = {}
65 encoded_data = urllib.parse.urlencode(data).encode('utf-8')
66 request = QNetworkRequest(url)
67 request.setHeader(QNetworkRequest.ContentTypeHeader,
68 'application/x-www-form-urlencoded;charset=utf-8')
69 reply = self._nam.post(request, encoded_data)
70 self._handle_reply(reply)
71
72 def get(self, url):
73 """Create a new GET request.
74
75 Emits success/error when done.
76
77 Args:
78 url: The URL to access, as QUrl.
79 """
80 request = QNetworkRequest(url)
81 reply = self._nam.get(request)
82 self._handle_reply(reply)
83
84 def _handle_reply(self, reply):
85 """Handle a new QNetworkReply."""
86 if reply.isFinished():
87 self.on_reply_finished(reply)
88 else:
89 timer = QTimer(self)
90 timer.setInterval(10000)
91 timer.timeout.connect(reply.abort)
92 timer.start()
93 self._timers[reply] = timer
94 reply.finished.connect(functools.partial(
95 self.on_reply_finished, reply))
96
97 def on_reply_finished(self, reply):
98 """Read the data and finish when the reply finished.
99
100 Args:
101 reply: The QNetworkReply which finished.
102 """
103 timer = self._timers.pop(reply)
104 if timer is not None:
105 timer.stop()
106 timer.deleteLater()
107 if reply.error() != QNetworkReply.NoError:
108 self.error.emit(reply.errorString())
109 return
110 try:
111 data = bytes(reply.readAll()).decode('utf-8')
112 except UnicodeDecodeError:
113 self.error.emit("Invalid UTF-8 data received in reply!")
114 return
115 self.success.emit(data)
116
[end of qutebrowser/misc/httpclient.py]
[start of qutebrowser/misc/autoupdate.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Classes related to auto-updating and getting the latest version."""
21
22 import json
23
24 from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QUrl
25
26 from qutebrowser.misc import httpclient
27
28
29 class PyPIVersionClient(QObject):
30
31 """A client for the PyPI API using HTTPClient.
32
33 It gets the latest version of qutebrowser from PyPI.
34
35 Attributes:
36 _client: The HTTPClient used.
37
38 Class attributes:
39 API_URL: The base API URL.
40
41 Signals:
42 success: Emitted when getting the version info succeeded.
43 arg: The newest version.
44 error: Emitted when getting the version info failed.
45 arg: The error message, as string.
46 """
47
48 API_URL = 'https://pypi.python.org/pypi/{}/json'
49 success = pyqtSignal(str)
50 error = pyqtSignal(str)
51
52 def __init__(self, parent=None, client=None):
53 super().__init__(parent)
54 if client is None:
55 self._client = httpclient.HTTPClient(self)
56 else:
57 self._client = client
58 self._client.error.connect(self.error)
59 self._client.success.connect(self.on_client_success)
60
61 def get_version(self, package='qutebrowser'):
62 """Get the newest version of a given package.
63
64 Emits success/error when done.
65
66 Args:
67 package: The name of the package to check.
68 """
69 url = QUrl(self.API_URL.format(package))
70 self._client.get(url)
71
72 @pyqtSlot(str)
73 def on_client_success(self, data):
74 """Process the data and finish when the client finished.
75
76 Args:
77 data: A string with the received data.
78 """
79 try:
80 json_data = json.loads(data)
81 except ValueError as e:
82 self.error.emit("Invalid JSON received in reply: {}!".format(e))
83 return
84 try:
85 self.success.emit(json_data['info']['version'])
86 except KeyError as e:
87 self.error.emit("Malformed data received in reply "
88 "({!r} not found)!".format(e))
89 return
90
[end of qutebrowser/misc/autoupdate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qutebrowser/misc/autoupdate.py b/qutebrowser/misc/autoupdate.py
--- a/qutebrowser/misc/autoupdate.py
+++ b/qutebrowser/misc/autoupdate.py
@@ -45,7 +45,7 @@
arg: The error message, as string.
"""
- API_URL = 'https://pypi.python.org/pypi/{}/json'
+ API_URL = 'https://pypi.org/pypi/{}/json'
success = pyqtSignal(str)
error = pyqtSignal(str)
diff --git a/qutebrowser/misc/httpclient.py b/qutebrowser/misc/httpclient.py
--- a/qutebrowser/misc/httpclient.py
+++ b/qutebrowser/misc/httpclient.py
@@ -28,6 +28,21 @@
QNetworkReply)
+class HTTPRequest(QNetworkRequest):
+ """A QNetworkRquest that follows (secure) redirects by default."""
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ try:
+ self.setAttribute(QNetworkRequest.RedirectPolicyAttribute,
+ QNetworkRequest.NoLessSafeRedirectPolicy)
+ except AttributeError:
+ # RedirectPolicyAttribute was introduced in 5.9 to replace
+ # FollowRedirectsAttribute.
+ self.setAttribute(QNetworkRequest.FollowRedirectsAttribute,
+ True)
+
+
class HTTPClient(QObject):
"""An HTTP client based on QNetworkAccessManager.
@@ -63,7 +78,7 @@
if data is None:
data = {}
encoded_data = urllib.parse.urlencode(data).encode('utf-8')
- request = QNetworkRequest(url)
+ request = HTTPRequest(url)
request.setHeader(QNetworkRequest.ContentTypeHeader,
'application/x-www-form-urlencoded;charset=utf-8')
reply = self._nam.post(request, encoded_data)
@@ -77,7 +92,7 @@
Args:
url: The URL to access, as QUrl.
"""
- request = QNetworkRequest(url)
+ request = HTTPRequest(url)
reply = self._nam.get(request)
self._handle_reply(reply)
| {"golden_diff": "diff --git a/qutebrowser/misc/autoupdate.py b/qutebrowser/misc/autoupdate.py\n--- a/qutebrowser/misc/autoupdate.py\n+++ b/qutebrowser/misc/autoupdate.py\n@@ -45,7 +45,7 @@\n arg: The error message, as string.\n \"\"\"\n \n- API_URL = 'https://pypi.python.org/pypi/{}/json'\n+ API_URL = 'https://pypi.org/pypi/{}/json'\n success = pyqtSignal(str)\n error = pyqtSignal(str)\n \ndiff --git a/qutebrowser/misc/httpclient.py b/qutebrowser/misc/httpclient.py\n--- a/qutebrowser/misc/httpclient.py\n+++ b/qutebrowser/misc/httpclient.py\n@@ -28,6 +28,21 @@\n QNetworkReply)\n \n \n+class HTTPRequest(QNetworkRequest):\n+ \"\"\"A QNetworkRquest that follows (secure) redirects by default.\"\"\"\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ try:\n+ self.setAttribute(QNetworkRequest.RedirectPolicyAttribute,\n+ QNetworkRequest.NoLessSafeRedirectPolicy)\n+ except AttributeError:\n+ # RedirectPolicyAttribute was introduced in 5.9 to replace\n+ # FollowRedirectsAttribute.\n+ self.setAttribute(QNetworkRequest.FollowRedirectsAttribute,\n+ True)\n+\n+\n class HTTPClient(QObject):\n \n \"\"\"An HTTP client based on QNetworkAccessManager.\n@@ -63,7 +78,7 @@\n if data is None:\n data = {}\n encoded_data = urllib.parse.urlencode(data).encode('utf-8')\n- request = QNetworkRequest(url)\n+ request = HTTPRequest(url)\n request.setHeader(QNetworkRequest.ContentTypeHeader,\n 'application/x-www-form-urlencoded;charset=utf-8')\n reply = self._nam.post(request, encoded_data)\n@@ -77,7 +92,7 @@\n Args:\n url: The URL to access, as QUrl.\n \"\"\"\n- request = QNetworkRequest(url)\n+ request = HTTPRequest(url)\n reply = self._nam.get(request)\n self._handle_reply(reply)\n", "issue": "Checking for new version fails\nAfter doing a crash report:\r\n\r\n> There was an error while getting the newest version: Invalid JSON received in reply: Expecting value: line 1 column 1 (char 0)!. Please check for a new version on qutebrowser.org by yourself.\r\n\r\nProbably something changed on PyPI?\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"An HTTP client based on QNetworkAccessManager.\"\"\"\n\nimport functools\nimport urllib.request\nimport urllib.parse\n\nfrom PyQt5.QtCore import pyqtSignal, QObject, QTimer\nfrom PyQt5.QtNetwork import (QNetworkAccessManager, QNetworkRequest,\n QNetworkReply)\n\n\nclass HTTPClient(QObject):\n\n \"\"\"An HTTP client based on QNetworkAccessManager.\n\n Intended for APIs, automatically decodes data.\n\n Attributes:\n _nam: The QNetworkAccessManager used.\n _timers: A {QNetworkReply: QTimer} dict.\n\n Signals:\n success: Emitted when the operation succeeded.\n arg: The received data.\n error: Emitted when the request failed.\n arg: The error message, as string.\n \"\"\"\n\n success = pyqtSignal(str)\n error = pyqtSignal(str)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self._nam = QNetworkAccessManager(self)\n self._timers = {}\n\n def post(self, url, data=None):\n \"\"\"Create a new POST request.\n\n Args:\n url: The URL to post to, as QUrl.\n data: A dict of data to send.\n \"\"\"\n if data is None:\n data = {}\n encoded_data = urllib.parse.urlencode(data).encode('utf-8')\n request = QNetworkRequest(url)\n request.setHeader(QNetworkRequest.ContentTypeHeader,\n 'application/x-www-form-urlencoded;charset=utf-8')\n reply = self._nam.post(request, encoded_data)\n self._handle_reply(reply)\n\n def get(self, url):\n \"\"\"Create a new GET request.\n\n Emits success/error when done.\n\n Args:\n url: The URL to access, as QUrl.\n \"\"\"\n request = QNetworkRequest(url)\n reply = self._nam.get(request)\n self._handle_reply(reply)\n\n def _handle_reply(self, reply):\n \"\"\"Handle a new QNetworkReply.\"\"\"\n if reply.isFinished():\n self.on_reply_finished(reply)\n else:\n timer = QTimer(self)\n timer.setInterval(10000)\n timer.timeout.connect(reply.abort)\n timer.start()\n self._timers[reply] = timer\n reply.finished.connect(functools.partial(\n self.on_reply_finished, reply))\n\n def on_reply_finished(self, reply):\n \"\"\"Read the data and finish when the reply finished.\n\n Args:\n reply: The QNetworkReply which finished.\n \"\"\"\n timer = self._timers.pop(reply)\n if timer is not None:\n timer.stop()\n timer.deleteLater()\n if reply.error() != QNetworkReply.NoError:\n self.error.emit(reply.errorString())\n return\n try:\n data = bytes(reply.readAll()).decode('utf-8')\n except UnicodeDecodeError:\n self.error.emit(\"Invalid UTF-8 data received in reply!\")\n return\n self.success.emit(data)\n", "path": "qutebrowser/misc/httpclient.py"}, {"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Classes related to auto-updating and getting the latest version.\"\"\"\n\nimport json\n\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QUrl\n\nfrom qutebrowser.misc import httpclient\n\n\nclass PyPIVersionClient(QObject):\n\n \"\"\"A client for the PyPI API using HTTPClient.\n\n It gets the latest version of qutebrowser from PyPI.\n\n Attributes:\n _client: The HTTPClient used.\n\n Class attributes:\n API_URL: The base API URL.\n\n Signals:\n success: Emitted when getting the version info succeeded.\n arg: The newest version.\n error: Emitted when getting the version info failed.\n arg: The error message, as string.\n \"\"\"\n\n API_URL = 'https://pypi.python.org/pypi/{}/json'\n success = pyqtSignal(str)\n error = pyqtSignal(str)\n\n def __init__(self, parent=None, client=None):\n super().__init__(parent)\n if client is None:\n self._client = httpclient.HTTPClient(self)\n else:\n self._client = client\n self._client.error.connect(self.error)\n self._client.success.connect(self.on_client_success)\n\n def get_version(self, package='qutebrowser'):\n \"\"\"Get the newest version of a given package.\n\n Emits success/error when done.\n\n Args:\n package: The name of the package to check.\n \"\"\"\n url = QUrl(self.API_URL.format(package))\n self._client.get(url)\n\n @pyqtSlot(str)\n def on_client_success(self, data):\n \"\"\"Process the data and finish when the client finished.\n\n Args:\n data: A string with the received data.\n \"\"\"\n try:\n json_data = json.loads(data)\n except ValueError as e:\n self.error.emit(\"Invalid JSON received in reply: {}!\".format(e))\n return\n try:\n self.success.emit(json_data['info']['version'])\n except KeyError as e:\n self.error.emit(\"Malformed data received in reply \"\n \"({!r} not found)!\".format(e))\n return\n", "path": "qutebrowser/misc/autoupdate.py"}]} | 2,533 | 475 |
gh_patches_debug_13796 | rasdani/github-patches | git_diff | Mailu__Mailu-1874 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Weblate instance is down
I tried accessing the Weblate instance and potentially add another language but it looks down.
</issue>
<code>
[start of setup/server.py]
1 import flask
2 import flask_bootstrap
3 import redis
4 import json
5 import os
6 import jinja2
7 import uuid
8 import string
9 import random
10 import ipaddress
11 import hashlib
12 import time
13
14
15 version = os.getenv("this_version", "master")
16 static_url_path = "/" + version + "/static"
17 app = flask.Flask(__name__, static_url_path=static_url_path)
18 flask_bootstrap.Bootstrap(app)
19 db = redis.StrictRedis(host='redis', port=6379, db=0)
20
21
22 def render_flavor(flavor, template, data):
23 return flask.render_template(
24 os.path.join(flavor, template),
25 **data
26 )
27
28
29 @app.add_template_global
30 def secret(length=16):
31 charset = string.ascii_uppercase + string.digits
32 return ''.join(
33 random.SystemRandom().choice(charset)
34 for _ in range(length)
35 )
36
37 #Original copied from https://github.com/andrewlkho/ulagen
38 def random_ipv6_subnet():
39 eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff
40 eui64_canon = "-".join([format(eui64, "02X")[i:i+2] for i in range(0, 18, 2)])
41
42 h = hashlib.sha1()
43 h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))
44 globalid = h.hexdigest()[0:10]
45
46 prefix = ":".join(("fd" + globalid[0:2], globalid[2:6], globalid[6:10]))
47 return prefix
48
49 def build_app(path):
50
51 app.jinja_env.trim_blocks = True
52 app.jinja_env.lstrip_blocks = True
53
54 @app.context_processor
55 def app_context():
56 return dict(
57 versions=os.getenv("VERSIONS","master").split(','),
58 stable_version = os.getenv("stable_version", "master")
59 )
60
61 prefix_bp = flask.Blueprint(version, __name__)
62 prefix_bp.jinja_loader = jinja2.ChoiceLoader([
63 jinja2.FileSystemLoader(os.path.join(path, "templates")),
64 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
65 ])
66
67 root_bp = flask.Blueprint("root", __name__)
68 root_bp.jinja_loader = jinja2.ChoiceLoader([
69 jinja2.FileSystemLoader(os.path.join(path, "templates")),
70 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
71 ])
72
73 @prefix_bp.context_processor
74 @root_bp.context_processor
75 def bp_context(version=version):
76 return dict(version=version)
77
78 @prefix_bp.route("/")
79 @root_bp.route("/")
80 def wizard():
81 return flask.render_template('wizard.html')
82
83 @prefix_bp.route("/submit_flavor", methods=["POST"])
84 @root_bp.route("/submit_flavor", methods=["POST"])
85 def submit_flavor():
86 data = flask.request.form.copy()
87 subnet6 = random_ipv6_subnet()
88 steps = sorted(os.listdir(os.path.join(path, "templates", "steps", data["flavor"])))
89 return flask.render_template('wizard.html', flavor=data["flavor"], steps=steps, subnet6=subnet6)
90
91 @prefix_bp.route("/submit", methods=["POST"])
92 @root_bp.route("/submit", methods=["POST"])
93 def submit():
94 data = flask.request.form.copy()
95 data['uid'] = str(uuid.uuid4())
96 try:
97 data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])
98 except ValueError as err:
99 return "Error while generating files: " + str(err)
100 db.set(data['uid'], json.dumps(data))
101 return flask.redirect(flask.url_for('.setup', uid=data['uid']))
102
103 @prefix_bp.route("/setup/<uid>", methods=["GET"])
104 @root_bp.route("/setup/<uid>", methods=["GET"])
105 def setup(uid):
106 data = json.loads(db.get(uid))
107 flavor = data.get("flavor", "compose")
108 rendered = render_flavor(flavor, "setup.html", data)
109 return flask.render_template("setup.html", contents=rendered)
110
111 @prefix_bp.route("/file/<uid>/<filepath>", methods=["GET"])
112 @root_bp.route("/file/<uid>/<filepath>", methods=["GET"])
113 def file(uid, filepath):
114 data = json.loads(db.get(uid))
115 flavor = data.get("flavor", "compose")
116 return flask.Response(
117 render_flavor(flavor, filepath, data),
118 mimetype="application/text"
119 )
120
121 app.register_blueprint(prefix_bp, url_prefix="/{}".format(version))
122 app.register_blueprint(root_bp)
123
124
125 if __name__ == "__main__":
126 build_app("/tmp/mailutest")
127 app.run(debug=True)
128
[end of setup/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup/server.py b/setup/server.py
--- a/setup/server.py
+++ b/setup/server.py
@@ -54,11 +54,11 @@
@app.context_processor
def app_context():
return dict(
- versions=os.getenv("VERSIONS","master").split(','),
+ versions=os.getenv("VERSIONS","master").split(','),
stable_version = os.getenv("stable_version", "master")
)
- prefix_bp = flask.Blueprint(version, __name__)
+ prefix_bp = flask.Blueprint(version.replace(".", "_"), __name__)
prefix_bp.jinja_loader = jinja2.ChoiceLoader([
jinja2.FileSystemLoader(os.path.join(path, "templates")),
jinja2.FileSystemLoader(os.path.join(path, "flavors"))
| {"golden_diff": "diff --git a/setup/server.py b/setup/server.py\n--- a/setup/server.py\n+++ b/setup/server.py\n@@ -54,11 +54,11 @@\n @app.context_processor\n def app_context():\n return dict(\n- versions=os.getenv(\"VERSIONS\",\"master\").split(','), \n+ versions=os.getenv(\"VERSIONS\",\"master\").split(','),\n stable_version = os.getenv(\"stable_version\", \"master\")\n )\n \n- prefix_bp = flask.Blueprint(version, __name__)\n+ prefix_bp = flask.Blueprint(version.replace(\".\", \"_\"), __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n", "issue": "Weblate instance is down\nI tried accessing the Weblate instance and potentially add another language but it looks down.\n", "before_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\nimport hashlib\nimport time\n\n\nversion = os.getenv(\"this_version\", \"master\")\nstatic_url_path = \"/\" + version + \"/static\"\napp = flask.Flask(__name__, static_url_path=static_url_path)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n#Original copied from https://github.com/andrewlkho/ulagen\ndef random_ipv6_subnet():\n eui64 = uuid.getnode() >> 24 << 48 | 0xfffe000000 | uuid.getnode() & 0xffffff\n eui64_canon = \"-\".join([format(eui64, \"02X\")[i:i+2] for i in range(0, 18, 2)])\n\n h = hashlib.sha1()\n h.update((eui64_canon + str(time.time() - time.mktime((1900, 1, 1, 0, 0, 0, 0, 1, -1)))).encode('utf-8'))\n globalid = h.hexdigest()[0:10]\n\n prefix = \":\".join((\"fd\" + globalid[0:2], globalid[2:6], globalid[6:10]))\n return prefix\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(\n versions=os.getenv(\"VERSIONS\",\"master\").split(','), \n stable_version = os.getenv(\"stable_version\", \"master\")\n )\n\n prefix_bp = flask.Blueprint(version, __name__)\n prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n root_bp = flask.Blueprint(\"root\", __name__)\n root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @prefix_bp.context_processor\n @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @prefix_bp.route(\"/\")\n @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n subnet6 = random_ipv6_subnet()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps, subnet6=subnet6)\n\n @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n try:\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'], strict=False)[-2])\n except ValueError as err:\n return \"Error while generating files: \" + str(err)\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n app.register_blueprint(root_bp)\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}]} | 1,922 | 170 |
gh_patches_debug_31183 | rasdani/github-patches | git_diff | modin-project__modin-3156 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`ValueError` during initialization of Ray 1.4 in Modin
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu
- **Modin version** (`modin.__version__`): 0.10.0
- **Python version**: 3.8.10
- **Code we can use to reproduce**:
```python
import modin.pandas as pd
df = pd.DataFrame([0])
```
<!--
You can obtain the Modin version with
python -c "import modin; print(modin.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
During initialization of Ray in Modin `ValueError` is occurred:
```
ValueError: The configured object store size (486.0 GB) exceeds /dev/shm size (405.07611136 GB). This will harm performance. Consider deleting files in /dev/shm or increasing its size with --shm-size in Docker. To ignore this warning, set RAY_OBJECT_STORE_ALLOW_SLOW_STORAGE=1.
```
</issue>
<code>
[start of modin/engines/ray/utils.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 """The module holds utility and initialization routines for Modin on Ray."""
15
16 import os
17 import sys
18
19 from modin.config import (
20 Backend,
21 IsRayCluster,
22 RayRedisAddress,
23 RayRedisPassword,
24 CpuCount,
25 GpuCount,
26 Memory,
27 NPartitions,
28 )
29
30
31 def _move_stdlib_ahead_of_site_packages(*args):
32 """
33 Ensure packages from stdlib have higher import priority than from site-packages.
34
35 Parameters
36 ----------
37 *args : tuple
38 Ignored, added for compatibility with Ray.
39
40 Notes
41 -----
42 This function is expected to be run on all workers including the driver.
43 This is a hack solution to fix GH-#647, GH-#746.
44 """
45 site_packages_path = None
46 site_packages_path_index = -1
47 for i, path in enumerate(sys.path):
48 if sys.exec_prefix in path and path.endswith("site-packages"):
49 site_packages_path = path
50 site_packages_path_index = i
51 # break on first found
52 break
53
54 if site_packages_path is not None:
55 # stdlib packages layout as follows:
56 # - python3.x
57 # - typing.py
58 # - site-packages/
59 # - pandas
60 # So extracting the dirname of the site_packages can point us
61 # to the directory containing standard libraries.
62 sys.path.insert(site_packages_path_index, os.path.dirname(site_packages_path))
63
64
65 def _import_pandas(*args):
66 """
67 Import pandas to make sure all its machinery is ready.
68
69 This prevents a race condition between two threads deserializing functions
70 and trying to import pandas at the same time.
71
72 Parameters
73 ----------
74 *args : tuple
75 Ignored, added for compatibility with Ray.
76
77 Notes
78 -----
79 This function is expected to be run on all workers before any
80 serialization or deserialization starts.
81 """
82 import pandas # noqa F401
83
84
85 def initialize_ray(
86 override_is_cluster=False,
87 override_redis_address: str = None,
88 override_redis_password: str = None,
89 ):
90 """
91 Initialize Ray based on parameters, ``modin.config`` variables and internal defaults.
92
93 Parameters
94 ----------
95 override_is_cluster : bool, default: False
96 Whether to override the detection of Modin being run in a cluster
97 and always assume this runs on cluster head node.
98 This also overrides Ray worker detection and always runs the initialization
99 function (runs from main thread only by default).
100 If not specified, ``modin.config.IsRayCluster`` variable is used.
101 override_redis_address : str, optional
102 What Redis address to connect to when running in Ray cluster.
103 If not specified, ``modin.config.RayRedisAddress`` is used.
104 override_redis_password : str, optional
105 What password to use when connecting to Redis.
106 If not specified, ``modin.config.RayRedisPassword`` is used.
107 """
108 import ray
109
110 if not ray.is_initialized() or override_is_cluster:
111 cluster = override_is_cluster or IsRayCluster.get()
112 redis_address = override_redis_address or RayRedisAddress.get()
113 redis_password = override_redis_password or RayRedisPassword.get()
114
115 if cluster:
116 # We only start ray in a cluster setting for the head node.
117 ray.init(
118 address=redis_address or "auto",
119 include_dashboard=False,
120 ignore_reinit_error=True,
121 _redis_password=redis_password,
122 )
123 else:
124 from modin.error_message import ErrorMessage
125
126 # This string is intentionally formatted this way. We want it indented in
127 # the warning message.
128 ErrorMessage.not_initialized(
129 "Ray",
130 """
131 import ray
132 ray.init()
133 """,
134 )
135 object_store_memory = Memory.get()
136 # In case anything failed above, we can still improve the memory for Modin.
137 if object_store_memory is None:
138 # Round down to the nearest Gigabyte.
139 system_memory = ray._private.utils.get_system_memory()
140 object_store_memory = int(0.6 * system_memory // 10 ** 9 * 10 ** 9)
141 # If the memory pool is smaller than 2GB, just use the default in ray.
142 if object_store_memory == 0:
143 object_store_memory = None
144 else:
145 object_store_memory = int(object_store_memory)
146
147 ray_init_kwargs = {
148 "num_cpus": CpuCount.get(),
149 "num_gpus": GpuCount.get(),
150 "include_dashboard": False,
151 "ignore_reinit_error": True,
152 "object_store_memory": object_store_memory,
153 "address": redis_address,
154 "_redis_password": redis_password,
155 "_memory": object_store_memory,
156 }
157 ray.init(**ray_init_kwargs)
158
159 _move_stdlib_ahead_of_site_packages()
160 ray.worker.global_worker.run_function_on_all_workers(
161 _move_stdlib_ahead_of_site_packages
162 )
163 if Backend.get() == "Cudf":
164 from modin.engines.ray.cudf_on_ray.frame.gpu_manager import GPUManager
165 from modin.engines.ray.cudf_on_ray.frame.partition_manager import (
166 GPU_MANAGERS,
167 )
168
169 # Check that GPU_MANAGERS is empty because _update_engine can be called multiple times
170 if not GPU_MANAGERS:
171 for i in range(GpuCount.get()):
172 GPU_MANAGERS.append(GPUManager.remote(i))
173 ray.worker.global_worker.run_function_on_all_workers(_import_pandas)
174 num_cpus = int(ray.cluster_resources()["CPU"])
175 num_gpus = int(ray.cluster_resources().get("GPU", 0))
176 if Backend.get() == "Cudf":
177 NPartitions._put(num_gpus)
178 else:
179 NPartitions._put(num_cpus)
180
[end of modin/engines/ray/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/engines/ray/utils.py b/modin/engines/ray/utils.py
--- a/modin/engines/ray/utils.py
+++ b/modin/engines/ray/utils.py
@@ -15,6 +15,8 @@
import os
import sys
+import psutil
+import warnings
from modin.config import (
Backend,
@@ -135,9 +137,24 @@
object_store_memory = Memory.get()
# In case anything failed above, we can still improve the memory for Modin.
if object_store_memory is None:
- # Round down to the nearest Gigabyte.
- system_memory = ray._private.utils.get_system_memory()
- object_store_memory = int(0.6 * system_memory // 10 ** 9 * 10 ** 9)
+ virtual_memory = psutil.virtual_memory().total
+ if sys.platform.startswith("linux"):
+ shm_fd = os.open("/dev/shm", os.O_RDONLY)
+ try:
+ shm_stats = os.fstatvfs(shm_fd)
+ system_memory = shm_stats.f_bsize * shm_stats.f_bavail
+ if system_memory / (virtual_memory / 2) < 0.99:
+ warnings.warn(
+ f"The size of /dev/shm is too small ({system_memory} bytes). The required size "
+ f"at least half of RAM ({virtual_memory // 2} bytes). Please, delete files in /dev/shm or "
+ "increase size of /dev/shm with --shm-size in Docker. Also, you can set "
+ "the required memory size for each Ray worker in bytes to MODIN_MEMORY environment variable."
+ )
+ finally:
+ os.close(shm_fd)
+ else:
+ system_memory = virtual_memory
+ object_store_memory = int(0.6 * system_memory // 1e9 * 1e9)
# If the memory pool is smaller than 2GB, just use the default in ray.
if object_store_memory == 0:
object_store_memory = None
| {"golden_diff": "diff --git a/modin/engines/ray/utils.py b/modin/engines/ray/utils.py\n--- a/modin/engines/ray/utils.py\n+++ b/modin/engines/ray/utils.py\n@@ -15,6 +15,8 @@\n \n import os\n import sys\n+import psutil\n+import warnings\n \n from modin.config import (\n Backend,\n@@ -135,9 +137,24 @@\n object_store_memory = Memory.get()\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n- # Round down to the nearest Gigabyte.\n- system_memory = ray._private.utils.get_system_memory()\n- object_store_memory = int(0.6 * system_memory // 10 ** 9 * 10 ** 9)\n+ virtual_memory = psutil.virtual_memory().total\n+ if sys.platform.startswith(\"linux\"):\n+ shm_fd = os.open(\"/dev/shm\", os.O_RDONLY)\n+ try:\n+ shm_stats = os.fstatvfs(shm_fd)\n+ system_memory = shm_stats.f_bsize * shm_stats.f_bavail\n+ if system_memory / (virtual_memory / 2) < 0.99:\n+ warnings.warn(\n+ f\"The size of /dev/shm is too small ({system_memory} bytes). The required size \"\n+ f\"at least half of RAM ({virtual_memory // 2} bytes). Please, delete files in /dev/shm or \"\n+ \"increase size of /dev/shm with --shm-size in Docker. Also, you can set \"\n+ \"the required memory size for each Ray worker in bytes to MODIN_MEMORY environment variable.\"\n+ )\n+ finally:\n+ os.close(shm_fd)\n+ else:\n+ system_memory = virtual_memory\n+ object_store_memory = int(0.6 * system_memory // 1e9 * 1e9)\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n", "issue": "`ValueError` during initialization of Ray 1.4 in Modin\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu\r\n- **Modin version** (`modin.__version__`): 0.10.0\r\n- **Python version**: 3.8.10\r\n- **Code we can use to reproduce**:\r\n\r\n```python\r\nimport modin.pandas as pd\r\ndf = pd.DataFrame([0])\r\n```\r\n\r\n<!--\r\nYou can obtain the Modin version with\r\n\r\npython -c \"import modin; print(modin.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nDuring initialization of Ray in Modin `ValueError` is occurred:\r\n```\r\nValueError: The configured object store size (486.0 GB) exceeds /dev/shm size (405.07611136 GB). This will harm performance. Consider deleting files in /dev/shm or increasing its size with --shm-size in Docker. To ignore this warning, set RAY_OBJECT_STORE_ALLOW_SLOW_STORAGE=1.\r\n```\r\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"The module holds utility and initialization routines for Modin on Ray.\"\"\"\n\nimport os\nimport sys\n\nfrom modin.config import (\n Backend,\n IsRayCluster,\n RayRedisAddress,\n RayRedisPassword,\n CpuCount,\n GpuCount,\n Memory,\n NPartitions,\n)\n\n\ndef _move_stdlib_ahead_of_site_packages(*args):\n \"\"\"\n Ensure packages from stdlib have higher import priority than from site-packages.\n\n Parameters\n ----------\n *args : tuple\n Ignored, added for compatibility with Ray.\n\n Notes\n -----\n This function is expected to be run on all workers including the driver.\n This is a hack solution to fix GH-#647, GH-#746.\n \"\"\"\n site_packages_path = None\n site_packages_path_index = -1\n for i, path in enumerate(sys.path):\n if sys.exec_prefix in path and path.endswith(\"site-packages\"):\n site_packages_path = path\n site_packages_path_index = i\n # break on first found\n break\n\n if site_packages_path is not None:\n # stdlib packages layout as follows:\n # - python3.x\n # - typing.py\n # - site-packages/\n # - pandas\n # So extracting the dirname of the site_packages can point us\n # to the directory containing standard libraries.\n sys.path.insert(site_packages_path_index, os.path.dirname(site_packages_path))\n\n\ndef _import_pandas(*args):\n \"\"\"\n Import pandas to make sure all its machinery is ready.\n\n This prevents a race condition between two threads deserializing functions\n and trying to import pandas at the same time.\n\n Parameters\n ----------\n *args : tuple\n Ignored, added for compatibility with Ray.\n\n Notes\n -----\n This function is expected to be run on all workers before any\n serialization or deserialization starts.\n \"\"\"\n import pandas # noqa F401\n\n\ndef initialize_ray(\n override_is_cluster=False,\n override_redis_address: str = None,\n override_redis_password: str = None,\n):\n \"\"\"\n Initialize Ray based on parameters, ``modin.config`` variables and internal defaults.\n\n Parameters\n ----------\n override_is_cluster : bool, default: False\n Whether to override the detection of Modin being run in a cluster\n and always assume this runs on cluster head node.\n This also overrides Ray worker detection and always runs the initialization\n function (runs from main thread only by default).\n If not specified, ``modin.config.IsRayCluster`` variable is used.\n override_redis_address : str, optional\n What Redis address to connect to when running in Ray cluster.\n If not specified, ``modin.config.RayRedisAddress`` is used.\n override_redis_password : str, optional\n What password to use when connecting to Redis.\n If not specified, ``modin.config.RayRedisPassword`` is used.\n \"\"\"\n import ray\n\n if not ray.is_initialized() or override_is_cluster:\n cluster = override_is_cluster or IsRayCluster.get()\n redis_address = override_redis_address or RayRedisAddress.get()\n redis_password = override_redis_password or RayRedisPassword.get()\n\n if cluster:\n # We only start ray in a cluster setting for the head node.\n ray.init(\n address=redis_address or \"auto\",\n include_dashboard=False,\n ignore_reinit_error=True,\n _redis_password=redis_password,\n )\n else:\n from modin.error_message import ErrorMessage\n\n # This string is intentionally formatted this way. We want it indented in\n # the warning message.\n ErrorMessage.not_initialized(\n \"Ray\",\n \"\"\"\n import ray\n ray.init()\n\"\"\",\n )\n object_store_memory = Memory.get()\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n system_memory = ray._private.utils.get_system_memory()\n object_store_memory = int(0.6 * system_memory // 10 ** 9 * 10 ** 9)\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n else:\n object_store_memory = int(object_store_memory)\n\n ray_init_kwargs = {\n \"num_cpus\": CpuCount.get(),\n \"num_gpus\": GpuCount.get(),\n \"include_dashboard\": False,\n \"ignore_reinit_error\": True,\n \"object_store_memory\": object_store_memory,\n \"address\": redis_address,\n \"_redis_password\": redis_password,\n \"_memory\": object_store_memory,\n }\n ray.init(**ray_init_kwargs)\n\n _move_stdlib_ahead_of_site_packages()\n ray.worker.global_worker.run_function_on_all_workers(\n _move_stdlib_ahead_of_site_packages\n )\n if Backend.get() == \"Cudf\":\n from modin.engines.ray.cudf_on_ray.frame.gpu_manager import GPUManager\n from modin.engines.ray.cudf_on_ray.frame.partition_manager import (\n GPU_MANAGERS,\n )\n\n # Check that GPU_MANAGERS is empty because _update_engine can be called multiple times\n if not GPU_MANAGERS:\n for i in range(GpuCount.get()):\n GPU_MANAGERS.append(GPUManager.remote(i))\n ray.worker.global_worker.run_function_on_all_workers(_import_pandas)\n num_cpus = int(ray.cluster_resources()[\"CPU\"])\n num_gpus = int(ray.cluster_resources().get(\"GPU\", 0))\n if Backend.get() == \"Cudf\":\n NPartitions._put(num_gpus)\n else:\n NPartitions._put(num_cpus)\n", "path": "modin/engines/ray/utils.py"}]} | 2,633 | 466 |
gh_patches_debug_28968 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2710 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
testing 2394: moderation cannot add module
**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/testing-dashbaord/basic/
**user:** moderator
**expected behaviour:** can create module
**behaviour:** cannot create module
**important screensize:**
**device & browser:**
**Comment/Question:** We should allow this. In the long run we should reconsider general project changing rights to moderators. Now that we have groups we might no longer need it.
</issue>
<code>
[start of meinberlin/apps/dashboard/views.py]
1 from django.apps import apps
2 from django.contrib import messages
3 from django.contrib.messages.views import SuccessMessageMixin
4 from django.http import HttpResponseRedirect
5 from django.urls import resolve
6 from django.urls import reverse
7 from django.utils.translation import ugettext_lazy as _
8 from django.views import generic
9 from django.views.generic.detail import SingleObjectMixin
10
11 from adhocracy4.dashboard import mixins
12 from adhocracy4.dashboard import signals
13 from adhocracy4.dashboard import views as a4dashboard_views
14 from adhocracy4.dashboard.blueprints import get_blueprints
15 from adhocracy4.modules import models as module_models
16 from adhocracy4.phases import models as phase_models
17 from adhocracy4.projects import models as project_models
18 from adhocracy4.projects.mixins import ProjectMixin
19 from meinberlin.apps.dashboard.forms import DashboardProjectCreateForm
20
21
22 class ModuleBlueprintListView(ProjectMixin,
23 mixins.DashboardBaseMixin,
24 mixins.BlueprintMixin,
25 generic.DetailView):
26 template_name = 'meinberlin_dashboard/module_blueprint_list_dashboard.html'
27 permission_required = 'a4projects.add_project'
28 model = project_models.Project
29 slug_url_kwarg = 'project_slug'
30 menu_item = 'project'
31
32 @property
33 def blueprints(self):
34 return get_blueprints()
35
36 def get_permission_object(self):
37 return self.organisation
38
39
40 class ModuleCreateView(ProjectMixin,
41 mixins.DashboardBaseMixin,
42 mixins.BlueprintMixin,
43 SingleObjectMixin,
44 generic.View):
45 permission_required = 'a4projects.add_project'
46 model = project_models.Project
47 slug_url_kwarg = 'project_slug'
48
49 def post(self, request, *args, **kwargs):
50 project = self.get_object()
51 weight = 1
52 if project.modules:
53 weight = max(
54 project.modules.values_list('weight', flat=True)
55 ) + 1
56 module = module_models.Module(
57 name=self.blueprint.title,
58 weight=weight,
59 project=project,
60 is_draft=True,
61 )
62 module.save()
63 signals.module_created.send(sender=None,
64 module=module,
65 user=self.request.user)
66
67 self._create_module_settings(module)
68 self._create_phases(module, self.blueprint.content)
69
70 return HttpResponseRedirect(self.get_next(module))
71
72 def _create_module_settings(self, module):
73 if self.blueprint.settings_model:
74 settings_model = apps.get_model(*self.blueprint.settings_model)
75 module_settings = settings_model(module=module)
76 module_settings.save()
77
78 def _create_phases(self, module, blueprint_phases):
79 for index, phase_content in enumerate(blueprint_phases):
80 phase = phase_models.Phase(
81 type=phase_content.identifier,
82 name=phase_content.name,
83 description=phase_content.description,
84 weight=index,
85 module=module,
86 )
87 phase.save()
88
89 def get_next(self, module):
90 return reverse('a4dashboard:dashboard-module_basic-edit', kwargs={
91 'module_slug': module.slug
92 })
93
94 def get_permission_object(self):
95 return self.organisation
96
97
98 class ModulePublishView(SingleObjectMixin,
99 generic.View):
100 permission_required = 'a4projects.change_project'
101 model = module_models.Module
102 slug_url_kwarg = 'module_slug'
103
104 def get_permission_object(self):
105 return self.get_object().project
106
107 def post(self, request, *args, **kwargs):
108 action = request.POST.get('action', None)
109 if action == 'publish':
110 self.publish_module()
111 elif action == 'unpublish':
112 self.unpublish_module()
113 else:
114 messages.warning(self.request, _('Invalid action'))
115
116 return HttpResponseRedirect(self.get_next())
117
118 def get_next(self):
119 if 'referrer' in self.request.POST:
120 return self.request.POST['referrer']
121 elif 'HTTP_REFERER' in self.request.META:
122 return self.request.META['HTTP_REFERER']
123
124 return reverse('a4dashboard:project-edit', kwargs={
125 'project_slug': self.project.slug
126 })
127
128 def publish_module(self):
129 module = self.get_object()
130 if not module.is_draft:
131 messages.info(self.request, _('Module is already added'))
132 return
133
134 module.is_draft = False
135 module.save()
136
137 signals.module_published.send(sender=None,
138 module=module,
139 user=self.request.user)
140
141 messages.success(self.request,
142 _('Module successfully added.'))
143
144 def unpublish_module(self):
145 module = self.get_object()
146 if module.is_draft:
147 messages.info(self.request, _('Module is already removed'))
148 return
149
150 module.is_draft = True
151 module.save()
152
153 signals.module_unpublished.send(sender=None,
154 module=module,
155 user=self.request.user)
156
157 messages.success(self.request,
158 _('Module successfully removed.'))
159
160
161 class ModuleDeleteView(generic.DeleteView):
162 permission_required = 'a4projects.change_project'
163 model = module_models.Module
164 success_message = _('The module has been deleted')
165
166 def delete(self, request, *args, **kwargs):
167 messages.success(self.request, self.success_message)
168 return super().delete(request, *args, **kwargs)
169
170 def get_permission_object(self):
171 return self.get_object().project
172
173 def get_success_url(self):
174 referrer = self.request.POST.get('referrer', None) \
175 or self.request.META.get('HTTP_REFERER', None)
176 if referrer:
177 view, args, kwargs = resolve(referrer)
178 if 'module_slug' not in kwargs \
179 or not kwargs['module_slug'] == self.get_object().slug:
180 return referrer
181
182 return reverse('a4dashboard:project-edit', kwargs={
183 'project_slug': self.get_object().project.slug
184 })
185
186
187 class DashboardProjectListView(a4dashboard_views.ProjectListView):
188 def get_queryset(self):
189 return super().get_queryset().filter(
190 projectcontainer=None,
191 externalproject=None
192 )
193
194
195 class ProjectCreateView(mixins.DashboardBaseMixin,
196 SuccessMessageMixin,
197 generic.CreateView):
198 model = project_models.Project
199 slug_url_kwarg = 'project_slug'
200 form_class = DashboardProjectCreateForm
201 template_name = 'a4dashboard/project_create_form.html'
202 permission_required = 'a4projects.add_project'
203 menu_item = 'project'
204 success_message = _('Project successfully created.')
205
206 def get_permission_object(self):
207 return self.organisation
208
209 def get_form_kwargs(self):
210 kwargs = super().get_form_kwargs()
211 kwargs['organisation'] = self.organisation
212 kwargs['creator'] = self.request.user
213 return kwargs
214
215 def get_success_url(self):
216 return reverse('a4dashboard:project-edit',
217 kwargs={'project_slug': self.object.slug})
218
219 def form_valid(self, form):
220 response = super().form_valid(form)
221 signals.project_created.send(sender=None,
222 project=self.object,
223 user=self.request.user)
224
225 return response
226
[end of meinberlin/apps/dashboard/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/meinberlin/apps/dashboard/views.py b/meinberlin/apps/dashboard/views.py
--- a/meinberlin/apps/dashboard/views.py
+++ b/meinberlin/apps/dashboard/views.py
@@ -24,7 +24,7 @@
mixins.BlueprintMixin,
generic.DetailView):
template_name = 'meinberlin_dashboard/module_blueprint_list_dashboard.html'
- permission_required = 'a4projects.add_project'
+ permission_required = 'a4projects.change_project'
model = project_models.Project
slug_url_kwarg = 'project_slug'
menu_item = 'project'
@@ -34,7 +34,7 @@
return get_blueprints()
def get_permission_object(self):
- return self.organisation
+ return self.project
class ModuleCreateView(ProjectMixin,
@@ -42,7 +42,7 @@
mixins.BlueprintMixin,
SingleObjectMixin,
generic.View):
- permission_required = 'a4projects.add_project'
+ permission_required = 'a4projects.change_project'
model = project_models.Project
slug_url_kwarg = 'project_slug'
@@ -92,7 +92,7 @@
})
def get_permission_object(self):
- return self.organisation
+ return self.project
class ModulePublishView(SingleObjectMixin,
| {"golden_diff": "diff --git a/meinberlin/apps/dashboard/views.py b/meinberlin/apps/dashboard/views.py\n--- a/meinberlin/apps/dashboard/views.py\n+++ b/meinberlin/apps/dashboard/views.py\n@@ -24,7 +24,7 @@\n mixins.BlueprintMixin,\n generic.DetailView):\n template_name = 'meinberlin_dashboard/module_blueprint_list_dashboard.html'\n- permission_required = 'a4projects.add_project'\n+ permission_required = 'a4projects.change_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n menu_item = 'project'\n@@ -34,7 +34,7 @@\n return get_blueprints()\n \n def get_permission_object(self):\n- return self.organisation\n+ return self.project\n \n \n class ModuleCreateView(ProjectMixin,\n@@ -42,7 +42,7 @@\n mixins.BlueprintMixin,\n SingleObjectMixin,\n generic.View):\n- permission_required = 'a4projects.add_project'\n+ permission_required = 'a4projects.change_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n \n@@ -92,7 +92,7 @@\n })\n \n def get_permission_object(self):\n- return self.organisation\n+ return self.project\n \n \n class ModulePublishView(SingleObjectMixin,\n", "issue": "testing 2394: moderation cannot add module\n**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/testing-dashbaord/basic/\r\n**user:** moderator\r\n**expected behaviour:** can create module\r\n**behaviour:** cannot create module\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** We should allow this. In the long run we should reconsider general project changing rights to moderators. Now that we have groups we might no longer need it. \r\n\r\n\n", "before_files": [{"content": "from django.apps import apps\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import HttpResponseRedirect\nfrom django.urls import resolve\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\nfrom django.views.generic.detail import SingleObjectMixin\n\nfrom adhocracy4.dashboard import mixins\nfrom adhocracy4.dashboard import signals\nfrom adhocracy4.dashboard import views as a4dashboard_views\nfrom adhocracy4.dashboard.blueprints import get_blueprints\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.phases import models as phase_models\nfrom adhocracy4.projects import models as project_models\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom meinberlin.apps.dashboard.forms import DashboardProjectCreateForm\n\n\nclass ModuleBlueprintListView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.BlueprintMixin,\n generic.DetailView):\n template_name = 'meinberlin_dashboard/module_blueprint_list_dashboard.html'\n permission_required = 'a4projects.add_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n menu_item = 'project'\n\n @property\n def blueprints(self):\n return get_blueprints()\n\n def get_permission_object(self):\n return self.organisation\n\n\nclass ModuleCreateView(ProjectMixin,\n mixins.DashboardBaseMixin,\n mixins.BlueprintMixin,\n SingleObjectMixin,\n generic.View):\n permission_required = 'a4projects.add_project'\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n\n def post(self, request, *args, **kwargs):\n project = self.get_object()\n weight = 1\n if project.modules:\n weight = max(\n project.modules.values_list('weight', flat=True)\n ) + 1\n module = module_models.Module(\n name=self.blueprint.title,\n weight=weight,\n project=project,\n is_draft=True,\n )\n module.save()\n signals.module_created.send(sender=None,\n module=module,\n user=self.request.user)\n\n self._create_module_settings(module)\n self._create_phases(module, self.blueprint.content)\n\n return HttpResponseRedirect(self.get_next(module))\n\n def _create_module_settings(self, module):\n if self.blueprint.settings_model:\n settings_model = apps.get_model(*self.blueprint.settings_model)\n module_settings = settings_model(module=module)\n module_settings.save()\n\n def _create_phases(self, module, blueprint_phases):\n for index, phase_content in enumerate(blueprint_phases):\n phase = phase_models.Phase(\n type=phase_content.identifier,\n name=phase_content.name,\n description=phase_content.description,\n weight=index,\n module=module,\n )\n phase.save()\n\n def get_next(self, module):\n return reverse('a4dashboard:dashboard-module_basic-edit', kwargs={\n 'module_slug': module.slug\n })\n\n def get_permission_object(self):\n return self.organisation\n\n\nclass ModulePublishView(SingleObjectMixin,\n generic.View):\n permission_required = 'a4projects.change_project'\n model = module_models.Module\n slug_url_kwarg = 'module_slug'\n\n def get_permission_object(self):\n return self.get_object().project\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get('action', None)\n if action == 'publish':\n self.publish_module()\n elif action == 'unpublish':\n self.unpublish_module()\n else:\n messages.warning(self.request, _('Invalid action'))\n\n return HttpResponseRedirect(self.get_next())\n\n def get_next(self):\n if 'referrer' in self.request.POST:\n return self.request.POST['referrer']\n elif 'HTTP_REFERER' in self.request.META:\n return self.request.META['HTTP_REFERER']\n\n return reverse('a4dashboard:project-edit', kwargs={\n 'project_slug': self.project.slug\n })\n\n def publish_module(self):\n module = self.get_object()\n if not module.is_draft:\n messages.info(self.request, _('Module is already added'))\n return\n\n module.is_draft = False\n module.save()\n\n signals.module_published.send(sender=None,\n module=module,\n user=self.request.user)\n\n messages.success(self.request,\n _('Module successfully added.'))\n\n def unpublish_module(self):\n module = self.get_object()\n if module.is_draft:\n messages.info(self.request, _('Module is already removed'))\n return\n\n module.is_draft = True\n module.save()\n\n signals.module_unpublished.send(sender=None,\n module=module,\n user=self.request.user)\n\n messages.success(self.request,\n _('Module successfully removed.'))\n\n\nclass ModuleDeleteView(generic.DeleteView):\n permission_required = 'a4projects.change_project'\n model = module_models.Module\n success_message = _('The module has been deleted')\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super().delete(request, *args, **kwargs)\n\n def get_permission_object(self):\n return self.get_object().project\n\n def get_success_url(self):\n referrer = self.request.POST.get('referrer', None) \\\n or self.request.META.get('HTTP_REFERER', None)\n if referrer:\n view, args, kwargs = resolve(referrer)\n if 'module_slug' not in kwargs \\\n or not kwargs['module_slug'] == self.get_object().slug:\n return referrer\n\n return reverse('a4dashboard:project-edit', kwargs={\n 'project_slug': self.get_object().project.slug\n })\n\n\nclass DashboardProjectListView(a4dashboard_views.ProjectListView):\n def get_queryset(self):\n return super().get_queryset().filter(\n projectcontainer=None,\n externalproject=None\n )\n\n\nclass ProjectCreateView(mixins.DashboardBaseMixin,\n SuccessMessageMixin,\n generic.CreateView):\n model = project_models.Project\n slug_url_kwarg = 'project_slug'\n form_class = DashboardProjectCreateForm\n template_name = 'a4dashboard/project_create_form.html'\n permission_required = 'a4projects.add_project'\n menu_item = 'project'\n success_message = _('Project successfully created.')\n\n def get_permission_object(self):\n return self.organisation\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['organisation'] = self.organisation\n kwargs['creator'] = self.request.user\n return kwargs\n\n def get_success_url(self):\n return reverse('a4dashboard:project-edit',\n kwargs={'project_slug': self.object.slug})\n\n def form_valid(self, form):\n response = super().form_valid(form)\n signals.project_created.send(sender=None,\n project=self.object,\n user=self.request.user)\n\n return response\n", "path": "meinberlin/apps/dashboard/views.py"}]} | 2,691 | 292 |
gh_patches_debug_48394 | rasdani/github-patches | git_diff | DDMAL__CantusDB-274 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Assign a specific user to multiple sources in Django admin
In the user-edit page in the Django admin interface, we already have a selector that allows for multi-selecting sources and assigning the user to them. We need to make the selector box wider so that the source titles are not clipped.
This issue is related to issue #216 , the relationship between the User model and Source model should go both ways.
</issue>
<code>
[start of django/cantusdb_project/main_app/models/source.py]
1 from django.db import models
2 from main_app.models import BaseModel, Segment
3 from django.contrib.auth import get_user_model
4
5
6 class Source(BaseModel):
7 cursus_choices = [("Monastic", "Monastic"), ("Secular", "Secular")]
8 source_status_choices = [
9 (
10 "Editing process (not all the fields have been proofread)",
11 "Editing process (not all the fields have been proofread)",
12 ),
13 ("Published / Complete", "Published / Complete"),
14 ("Published / Proofread pending", "Published / Proofread pending"),
15 ("Unpublished / Editing process", "Unpublished / Editing process"),
16 ("Unpublished / Indexing process", "Unpublished / Indexing process"),
17 ("Unpublished / Proofread pending", "Unpublished / Proofread pending"),
18 ("Unpublished / Proofreading process", "Unpublished / Proofreading process"),
19 ]
20
21 # sources with public=False cannot be accessed by its url (access denied) and do not appear in source list
22 public = models.BooleanField(blank=True, null=True)
23 # sources with visible=False can be accessed by typing in the url, but do not appear in source list
24 visible = models.BooleanField(blank=True, null=True)
25 title = models.CharField(
26 max_length=255,
27 help_text="Full Manuscript Identification (City, Archive, Shelf-mark)",
28 )
29 # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark
30 # it is a human-readable ID for a source
31 siglum = models.CharField(
32 max_length=63,
33 null=True,
34 blank=True,
35 help_text="RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).",
36 )
37 # the RISM siglum uniquely identifies a library or holding institution
38 rism_siglum = models.ForeignKey(
39 "RismSiglum", on_delete=models.PROTECT, null=True, blank=True,
40 )
41 provenance = models.ForeignKey(
42 "Provenance",
43 on_delete=models.PROTECT,
44 help_text="If the origin is unknown, select a location where the source was "
45 "used later in its lifetime and provide details in the "
46 '"Provenance notes" field.',
47 null=True,
48 blank=True,
49 )
50 provenance_notes = models.TextField(
51 blank=True,
52 null=True,
53 help_text="More exact indication of the provenance (if necessary)",
54 )
55 full_source = models.BooleanField(blank=True, null=True)
56 date = models.CharField(
57 blank=True,
58 null=True,
59 max_length=63,
60 help_text='Date of the manuscript (e.g. "1200s", "1300-1350", etc.)',
61 )
62 century = models.ManyToManyField("Century", related_name="sources")
63 notation = models.ManyToManyField("Notation", related_name="sources")
64 cursus = models.CharField(
65 blank=True, null=True, choices=cursus_choices, max_length=63
66 )
67 # TODO: Fill this field up with JSON info when I have access to the Users
68 current_editors = models.ManyToManyField(get_user_model(), related_name="sources_user_can_edit")
69 inventoried_by = models.ManyToManyField(
70 "Indexer", related_name="sources_inventoried"
71 )
72 full_text_entered_by = models.ManyToManyField(
73 "Indexer", related_name="entered_full_text_for_sources"
74 )
75 melodies_entered_by = models.ManyToManyField(
76 "Indexer", related_name="entered_melody_for_sources"
77 )
78 proofreaders = models.ManyToManyField("Indexer", related_name="proofread_sources")
79 other_editors = models.ManyToManyField("Indexer", related_name="edited_sources")
80 segment = models.ForeignKey(
81 "Segment", on_delete=models.PROTECT, blank=True, null=True
82 )
83 source_status = models.CharField(blank=True, null=True, max_length=255)
84 complete_inventory = models.BooleanField(blank=True, null=True)
85 summary = models.TextField(blank=True, null=True)
86 liturgical_occasions = models.TextField(blank=True, null=True)
87 description = models.TextField(blank=True, null=True)
88 selected_bibliography = models.TextField(blank=True, null=True)
89 image_link = models.URLField(
90 blank=True,
91 null=True,
92 help_text='HTTP link to the image gallery of the source.',
93 )
94 indexing_notes = models.TextField(blank=True, null=True)
95 indexing_date = models.TextField(blank=True, null=True)
96 json_info = models.JSONField(blank=True, null=True)
97 fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)
98 dact_id = models.CharField(max_length=15, blank=True, null=True)
99
100 def number_of_chants(self) -> int:
101 """Returns the number of Chants and Sequences in this Source."""
102 return self.chant_set.count() + self.sequence_set.count()
103
104 def number_of_melodies(self) -> int:
105 """Returns the number of Chants in this Source that have melodies."""
106 return self.chant_set.filter(volpiano__isnull=False).count()
107
108 def __str__(self):
109 string = '{t} ({i})'.format(t=self.title, i=self.id)
110 return string
111
112 def save(self, *args, **kwargs):
113 # when creating a source, assign it to "Cantus Database" by default
114 cantus_db_segment = Segment.objects.get(name="CANTUS Database")
115 self.segment = cantus_db_segment
116 super().save(*args, **kwargs)
117
[end of django/cantusdb_project/main_app/models/source.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django/cantusdb_project/main_app/models/source.py b/django/cantusdb_project/main_app/models/source.py
--- a/django/cantusdb_project/main_app/models/source.py
+++ b/django/cantusdb_project/main_app/models/source.py
@@ -106,7 +106,7 @@
return self.chant_set.filter(volpiano__isnull=False).count()
def __str__(self):
- string = '{t} ({i})'.format(t=self.title, i=self.id)
+ string = '[{s}] {t} ({i})'.format(s=self.rism_siglum, t=self.title, i=self.id)
return string
def save(self, *args, **kwargs):
| {"golden_diff": "diff --git a/django/cantusdb_project/main_app/models/source.py b/django/cantusdb_project/main_app/models/source.py\n--- a/django/cantusdb_project/main_app/models/source.py\n+++ b/django/cantusdb_project/main_app/models/source.py\n@@ -106,7 +106,7 @@\n return self.chant_set.filter(volpiano__isnull=False).count()\n \n def __str__(self):\n- string = '{t} ({i})'.format(t=self.title, i=self.id)\n+ string = '[{s}] {t} ({i})'.format(s=self.rism_siglum, t=self.title, i=self.id)\n return string\n \n def save(self, *args, **kwargs):\n", "issue": "Assign a specific user to multiple sources in Django admin\nIn the user-edit page in the Django admin interface, we already have a selector that allows for multi-selecting sources and assigning the user to them. We need to make the selector box wider so that the source titles are not clipped. \r\n\r\nThis issue is related to issue #216 , the relationship between the User model and Source model should go both ways. \n", "before_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel, Segment\nfrom django.contrib.auth import get_user_model\n\n\nclass Source(BaseModel):\n cursus_choices = [(\"Monastic\", \"Monastic\"), (\"Secular\", \"Secular\")]\n source_status_choices = [\n (\n \"Editing process (not all the fields have been proofread)\",\n \"Editing process (not all the fields have been proofread)\",\n ),\n (\"Published / Complete\", \"Published / Complete\"),\n (\"Published / Proofread pending\", \"Published / Proofread pending\"),\n (\"Unpublished / Editing process\", \"Unpublished / Editing process\"),\n (\"Unpublished / Indexing process\", \"Unpublished / Indexing process\"),\n (\"Unpublished / Proofread pending\", \"Unpublished / Proofread pending\"),\n (\"Unpublished / Proofreading process\", \"Unpublished / Proofreading process\"),\n ]\n\n # sources with public=False cannot be accessed by its url (access denied) and do not appear in source list\n public = models.BooleanField(blank=True, null=True)\n # sources with visible=False can be accessed by typing in the url, but do not appear in source list\n visible = models.BooleanField(blank=True, null=True)\n title = models.CharField(\n max_length=255,\n help_text=\"Full Manuscript Identification (City, Archive, Shelf-mark)\",\n )\n # the siglum field as implemented on the old Cantus is composed of both the RISM siglum and the shelfmark\n # it is a human-readable ID for a source\n siglum = models.CharField(\n max_length=63, \n null=True, \n blank=True,\n help_text=\"RISM-style siglum + Shelf-mark (e.g. GB-Ob 202).\",\n )\n # the RISM siglum uniquely identifies a library or holding institution\n rism_siglum = models.ForeignKey(\n \"RismSiglum\", on_delete=models.PROTECT, null=True, blank=True,\n )\n provenance = models.ForeignKey(\n \"Provenance\",\n on_delete=models.PROTECT,\n help_text=\"If the origin is unknown, select a location where the source was \"\n \"used later in its lifetime and provide details in the \"\n '\"Provenance notes\" field.',\n null=True,\n blank=True,\n )\n provenance_notes = models.TextField(\n blank=True,\n null=True,\n help_text=\"More exact indication of the provenance (if necessary)\",\n )\n full_source = models.BooleanField(blank=True, null=True)\n date = models.CharField(\n blank=True,\n null=True,\n max_length=63,\n help_text='Date of the manuscript (e.g. \"1200s\", \"1300-1350\", etc.)',\n )\n century = models.ManyToManyField(\"Century\", related_name=\"sources\")\n notation = models.ManyToManyField(\"Notation\", related_name=\"sources\")\n cursus = models.CharField(\n blank=True, null=True, choices=cursus_choices, max_length=63\n )\n # TODO: Fill this field up with JSON info when I have access to the Users\n current_editors = models.ManyToManyField(get_user_model(), related_name=\"sources_user_can_edit\")\n inventoried_by = models.ManyToManyField(\n \"Indexer\", related_name=\"sources_inventoried\"\n )\n full_text_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_full_text_for_sources\"\n )\n melodies_entered_by = models.ManyToManyField(\n \"Indexer\", related_name=\"entered_melody_for_sources\"\n )\n proofreaders = models.ManyToManyField(\"Indexer\", related_name=\"proofread_sources\")\n other_editors = models.ManyToManyField(\"Indexer\", related_name=\"edited_sources\")\n segment = models.ForeignKey(\n \"Segment\", on_delete=models.PROTECT, blank=True, null=True\n )\n source_status = models.CharField(blank=True, null=True, max_length=255)\n complete_inventory = models.BooleanField(blank=True, null=True)\n summary = models.TextField(blank=True, null=True)\n liturgical_occasions = models.TextField(blank=True, null=True)\n description = models.TextField(blank=True, null=True)\n selected_bibliography = models.TextField(blank=True, null=True)\n image_link = models.URLField(\n blank=True, \n null=True,\n help_text='HTTP link to the image gallery of the source.',\n )\n indexing_notes = models.TextField(blank=True, null=True)\n indexing_date = models.TextField(blank=True, null=True)\n json_info = models.JSONField(blank=True, null=True)\n fragmentarium_id = models.CharField(max_length=15, blank=True, null=True)\n dact_id = models.CharField(max_length=15, blank=True, null=True)\n\n def number_of_chants(self) -> int:\n \"\"\"Returns the number of Chants and Sequences in this Source.\"\"\"\n return self.chant_set.count() + self.sequence_set.count()\n\n def number_of_melodies(self) -> int:\n \"\"\"Returns the number of Chants in this Source that have melodies.\"\"\"\n return self.chant_set.filter(volpiano__isnull=False).count()\n\n def __str__(self):\n string = '{t} ({i})'.format(t=self.title, i=self.id)\n return string\n\n def save(self, *args, **kwargs):\n # when creating a source, assign it to \"Cantus Database\" by default\n cantus_db_segment = Segment.objects.get(name=\"CANTUS Database\")\n self.segment = cantus_db_segment\n super().save(*args, **kwargs)\n", "path": "django/cantusdb_project/main_app/models/source.py"}]} | 2,074 | 166 |
gh_patches_debug_6027 | rasdani/github-patches | git_diff | twisted__twisted-12103 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Avoid encode/decode in chat.py for better readablity
As discussed in [this comment](https://github.com/twisted/twisted/pull/12070#discussion_r1442784443), it's better to use byte concat as mentioned in order of better readability in [docs/core/howto/listings/servers/chat.py:35](https://github.com/twisted/twisted/pull/12070/files/c59c93ec644a17e0f3a1752ca9ceca31a27a9f5e#diff-0923ff3db530a2e5d28ea8cc2b3a8f91f399792786772c541bf9edf7a0c50126)
```python
message = b'<' + self.name + b'> ' + message
```
</issue>
<code>
[start of docs/core/howto/listings/servers/chat.py]
1 from twisted.internet import reactor
2 from twisted.internet.protocol import Factory
3 from twisted.protocols.basic import LineReceiver
4
5
6 class Chat(LineReceiver):
7 def __init__(self, users):
8 self.users = users
9 self.name = None
10 self.state = "GETNAME"
11
12 def connectionMade(self):
13 self.sendLine(b"What's your name?")
14
15 def connectionLost(self, reason):
16 if self.name in self.users:
17 del self.users[self.name]
18
19 def lineReceived(self, line):
20 if self.state == "GETNAME":
21 self.handle_GETNAME(line)
22 else:
23 self.handle_CHAT(line)
24
25 def handle_GETNAME(self, name):
26 if name in self.users:
27 self.sendLine(b"Name taken, please choose another.")
28 return
29 self.sendLine(f"Welcome, {name.decode('utf-8')}!".encode("utf-8"))
30 self.name = name
31 self.users[name] = self
32 self.state = "CHAT"
33
34 def handle_CHAT(self, message):
35 message = f"<{self.name.decode('utf-8')}> {message.decode('utf-8')}".encode(
36 "utf-8"
37 )
38 for name, protocol in self.users.items():
39 if protocol != self:
40 protocol.sendLine(message)
41
42
43 class ChatFactory(Factory):
44 def __init__(self):
45 self.users = {} # maps user names to Chat instances
46
47 def buildProtocol(self, addr):
48 return Chat(self.users)
49
50
51 reactor.listenTCP(8123, ChatFactory())
52 reactor.run()
53
[end of docs/core/howto/listings/servers/chat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/core/howto/listings/servers/chat.py b/docs/core/howto/listings/servers/chat.py
--- a/docs/core/howto/listings/servers/chat.py
+++ b/docs/core/howto/listings/servers/chat.py
@@ -32,9 +32,7 @@
self.state = "CHAT"
def handle_CHAT(self, message):
- message = f"<{self.name.decode('utf-8')}> {message.decode('utf-8')}".encode(
- "utf-8"
- )
+ message = b"<" + self.name + b"> " + message
for name, protocol in self.users.items():
if protocol != self:
protocol.sendLine(message)
| {"golden_diff": "diff --git a/docs/core/howto/listings/servers/chat.py b/docs/core/howto/listings/servers/chat.py\n--- a/docs/core/howto/listings/servers/chat.py\n+++ b/docs/core/howto/listings/servers/chat.py\n@@ -32,9 +32,7 @@\n self.state = \"CHAT\"\n \n def handle_CHAT(self, message):\n- message = f\"<{self.name.decode('utf-8')}> {message.decode('utf-8')}\".encode(\n- \"utf-8\"\n- )\n+ message = b\"<\" + self.name + b\"> \" + message\n for name, protocol in self.users.items():\n if protocol != self:\n protocol.sendLine(message)\n", "issue": "Avoid encode/decode in chat.py for better readablity\nAs discussed in [this comment](https://github.com/twisted/twisted/pull/12070#discussion_r1442784443), it's better to use byte concat as mentioned in order of better readability in [docs/core/howto/listings/servers/chat.py:35](https://github.com/twisted/twisted/pull/12070/files/c59c93ec644a17e0f3a1752ca9ceca31a27a9f5e#diff-0923ff3db530a2e5d28ea8cc2b3a8f91f399792786772c541bf9edf7a0c50126)\r\n```python\r\nmessage = b'<' + self.name + b'> ' + message\r\n```\n", "before_files": [{"content": "from twisted.internet import reactor\nfrom twisted.internet.protocol import Factory\nfrom twisted.protocols.basic import LineReceiver\n\n\nclass Chat(LineReceiver):\n def __init__(self, users):\n self.users = users\n self.name = None\n self.state = \"GETNAME\"\n\n def connectionMade(self):\n self.sendLine(b\"What's your name?\")\n\n def connectionLost(self, reason):\n if self.name in self.users:\n del self.users[self.name]\n\n def lineReceived(self, line):\n if self.state == \"GETNAME\":\n self.handle_GETNAME(line)\n else:\n self.handle_CHAT(line)\n\n def handle_GETNAME(self, name):\n if name in self.users:\n self.sendLine(b\"Name taken, please choose another.\")\n return\n self.sendLine(f\"Welcome, {name.decode('utf-8')}!\".encode(\"utf-8\"))\n self.name = name\n self.users[name] = self\n self.state = \"CHAT\"\n\n def handle_CHAT(self, message):\n message = f\"<{self.name.decode('utf-8')}> {message.decode('utf-8')}\".encode(\n \"utf-8\"\n )\n for name, protocol in self.users.items():\n if protocol != self:\n protocol.sendLine(message)\n\n\nclass ChatFactory(Factory):\n def __init__(self):\n self.users = {} # maps user names to Chat instances\n\n def buildProtocol(self, addr):\n return Chat(self.users)\n\n\nreactor.listenTCP(8123, ChatFactory())\nreactor.run()\n", "path": "docs/core/howto/listings/servers/chat.py"}]} | 1,197 | 156 |
gh_patches_debug_43541 | rasdani/github-patches | git_diff | Qiskit__qiskit-6869 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Warning statement upon compiling `cz` gate into `['h', 'cx', 'rz', 'sx', 'x']` gate set
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**: `0.18.0`
- **Python version**: `3.9.6`
- **Operating system**: `Pop!_OS 21.04 x86_64`
### What is the current behavior?
Upon updating from version `0.17.4`, there is a substantially amount of additional warning printing upon compiling cz gates to the basis gate set: `['h', 'cx', 'rz', 'sx', 'x']`:
```
/home/username/anaconda3/envs/qiskit-new/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py:166: UserWarning: Resynthesized [<qiskit.dagcircuit.dagnode.DAGNode object at 0x7f70be79e040>] and got global phase: π/4
┌─────────┐┌────┐┌─────────┐
qr_0: ┤ Rz(π/2) ├┤ √X ├┤ Rz(π/2) ├
└─────────┘└────┘└─────────┘, but the original was native and the new value is longer. This indicates an efficiency bug in synthesis. Please report it by opening an issue here: https://github.com/Qiskit/qiskit-terra/issues/new/choose
new_dag = pass_.run(dag)
```
This doesn't appear to change the ability for the circuits to run correctly, it just fills up the terminal with a lot of non-needed statements, especially when running large algorithms requiring many cz gates.
### Steps to reproduce the problem
```
from qiskit import QuantumCircuit, QuantumRegister
from qiskit import transpile
qr = QuantumRegister(2)
qc_ = QuantumCircuit(qr)
qc_.cz(qr[0], qr[1])
qc = transpile(qc_, basis_gates=['h', 'cx', 'rz', 'sx', 'x'])
```
### What is the expected behavior?
Not printing a ton of warning statements upon compiling to this basis gate set
### Suggested solutions
Remove the printing? The basis gates we are using is a pretty common basis gate set, and compiling away from the cz gate is the intended behavior.
</issue>
<code>
[start of qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py]
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2017, 2018.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12
13 """Optimize chains of single-qubit gates using Euler 1q decomposer"""
14
15 import copy
16 import logging
17 import warnings
18
19 import numpy as np
20
21 from qiskit.circuit.library.standard_gates import U3Gate
22 from qiskit.transpiler.basepasses import TransformationPass
23 from qiskit.quantum_info.synthesis import one_qubit_decompose
24 from qiskit.converters import circuit_to_dag
25
26 logger = logging.getLogger(__name__)
27
28
29 class Optimize1qGatesDecomposition(TransformationPass):
30 """Optimize chains of single-qubit gates by combining them into a single gate."""
31
32 def __init__(self, basis=None):
33 """Optimize1qGatesDecomposition initializer.
34
35 Args:
36 basis (list[str]): Basis gates to consider, e.g. `['u3', 'cx']`. For the effects
37 of this pass, the basis is the set intersection between the `basis` parameter
38 and the Euler basis.
39 """
40 super().__init__()
41 self._target_basis = basis
42 self._decomposers = None
43 if basis:
44 self._decomposers = []
45 basis_set = set(basis)
46 euler_basis_gates = one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES
47 for euler_basis_name, gates in euler_basis_gates.items():
48 if set(gates).issubset(basis_set):
49 basis_copy = copy.copy(self._decomposers)
50 for base in basis_copy:
51 # check if gates are a superset of another basis
52 # and if so, remove that basis
53 if set(euler_basis_gates[base.basis]).issubset(set(gates)):
54 self._decomposers.remove(base)
55 # check if the gates are a subset of another basis
56 elif set(gates).issubset(set(euler_basis_gates[base.basis])):
57 break
58 # if not a subset, add it to the list
59 else:
60 self._decomposers.append(
61 one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)
62 )
63
64 def run(self, dag):
65 """Run the Optimize1qGatesDecomposition pass on `dag`.
66
67 Args:
68 dag (DAGCircuit): the DAG to be optimized.
69
70 Returns:
71 DAGCircuit: the optimized DAG.
72 """
73 if not self._decomposers:
74 logger.info("Skipping pass because no basis is set")
75 return dag
76 runs = dag.collect_1q_runs()
77 for run in runs:
78 # SPECIAL CASE: Don't bother to optimize single U3 gates which are in the basis set.
79 # The U3 decomposer is only going to emit a sequence of length 1 anyhow.
80 if "u3" in self._target_basis and len(run) == 1 and isinstance(run[0].op, U3Gate):
81 # Toss U3 gates equivalent to the identity; there we get off easy.
82 if np.allclose(run[0].op.to_matrix(), np.eye(2), 1e-15, 0):
83 dag.remove_op_node(run[0])
84 continue
85 # We might rewrite into lower `u`s if they're available.
86 if "u2" not in self._target_basis and "u1" not in self._target_basis:
87 continue
88
89 new_circs = []
90 operator = run[0].op.to_matrix()
91 for gate in run[1:]:
92 operator = gate.op.to_matrix().dot(operator)
93 for decomposer in self._decomposers:
94 new_circs.append(decomposer._decompose(operator))
95 if new_circs:
96 new_circ = min(new_circs, key=len)
97
98 # do we even have calibrations?
99 has_cals_p = dag.calibrations is not None and len(dag.calibrations) > 0
100 # is this run all in the target set and also uncalibrated?
101 rewriteable_and_in_basis_p = all(
102 g.name in self._target_basis
103 and (not has_cals_p or not dag.has_calibration_for(g))
104 for g in run
105 )
106 # does this run have uncalibrated gates?
107 uncalibrated_p = not has_cals_p or any(not dag.has_calibration_for(g) for g in run)
108 # does this run have gates not in the image of ._decomposers _and_ uncalibrated?
109 uncalibrated_and_not_basis_p = any(
110 g.name not in self._target_basis
111 and (not has_cals_p or not dag.has_calibration_for(g))
112 for g in run
113 )
114
115 if rewriteable_and_in_basis_p and len(run) < len(new_circ):
116 # NOTE: This is short-circuited on calibrated gates, which we're timid about
117 # reducing.
118 warnings.warn(
119 f"Resynthesized {run} and got {new_circ}, "
120 f"but the original was native and the new value is longer. This "
121 f"indicates an efficiency bug in synthesis. Please report it by "
122 f"opening an issue here: "
123 f"https://github.com/Qiskit/qiskit-terra/issues/new/choose",
124 stacklevel=2,
125 )
126 # if we're outside of the basis set, we're obligated to logically decompose.
127 # if we're outside of the set of gates for which we have physical definitions,
128 # then we _try_ to decompose, using the results if we see improvement.
129 # NOTE: Here we use circuit length as a weak proxy for "improvement"; in reality,
130 # we care about something more like fidelity at runtime, which would mean,
131 # e.g., a preference for `RZGate`s over `RXGate`s. In fact, users sometimes
132 # express a preference for a "canonical form" of a circuit, which may come in
133 # the form of some parameter values, also not visible at the level of circuit
134 # length. Since we don't have a framework for the caller to programmatically
135 # express what they want here, we include some special casing for particular
136 # gates which we've promised to normalize --- but this is fragile and should
137 # ultimately be done away with.
138 if (
139 uncalibrated_and_not_basis_p
140 or (uncalibrated_p and len(run) > len(new_circ))
141 or isinstance(run[0].op, U3Gate)
142 ):
143 new_dag = circuit_to_dag(new_circ)
144 dag.substitute_node_with_dag(run[0], new_dag)
145 # Delete the other nodes in the run
146 for current_node in run[1:]:
147 dag.remove_op_node(current_node)
148 return dag
149
[end of qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py b/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py
--- a/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py
+++ b/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py
@@ -38,28 +38,31 @@
and the Euler basis.
"""
super().__init__()
+
self._target_basis = basis
self._decomposers = None
+
if basis:
- self._decomposers = []
+ self._decomposers = {}
basis_set = set(basis)
euler_basis_gates = one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES
for euler_basis_name, gates in euler_basis_gates.items():
if set(gates).issubset(basis_set):
basis_copy = copy.copy(self._decomposers)
- for base in basis_copy:
+ for base in basis_copy.keys():
# check if gates are a superset of another basis
- # and if so, remove that basis
- if set(euler_basis_gates[base.basis]).issubset(set(gates)):
- self._decomposers.remove(base)
+ if set(base).issubset(set(gates)):
+ # if so, remove that basis
+ del self._decomposers[base]
# check if the gates are a subset of another basis
- elif set(gates).issubset(set(euler_basis_gates[base.basis])):
+ elif set(gates).issubset(set(base)):
+ # if so, don't bother
break
# if not a subset, add it to the list
else:
- self._decomposers.append(
- one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)
- )
+ self._decomposers[
+ tuple(gates)
+ ] = one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)
def run(self, dag):
"""Run the Optimize1qGatesDecomposition pass on `dag`.
@@ -70,7 +73,7 @@
Returns:
DAGCircuit: the optimized DAG.
"""
- if not self._decomposers:
+ if self._decomposers is None:
logger.info("Skipping pass because no basis is set")
return dag
runs = dag.collect_1q_runs()
@@ -86,21 +89,20 @@
if "u2" not in self._target_basis and "u1" not in self._target_basis:
continue
- new_circs = []
operator = run[0].op.to_matrix()
for gate in run[1:]:
operator = gate.op.to_matrix().dot(operator)
- for decomposer in self._decomposers:
- new_circs.append(decomposer._decompose(operator))
- if new_circs:
- new_circ = min(new_circs, key=len)
+
+ new_circs = {k: v._decompose(operator) for k, v in self._decomposers.items()}
+
+ if len(new_circs) > 0:
+ new_basis, new_circ = min(new_circs.items(), key=lambda x: len(x[1]))
# do we even have calibrations?
has_cals_p = dag.calibrations is not None and len(dag.calibrations) > 0
- # is this run all in the target set and also uncalibrated?
+ # is this run in the target set of this particular decomposer and also uncalibrated?
rewriteable_and_in_basis_p = all(
- g.name in self._target_basis
- and (not has_cals_p or not dag.has_calibration_for(g))
+ g.name in new_basis and (not has_cals_p or not dag.has_calibration_for(g))
for g in run
)
# does this run have uncalibrated gates?
| {"golden_diff": "diff --git a/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py b/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py\n--- a/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py\n+++ b/qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py\n@@ -38,28 +38,31 @@\n and the Euler basis.\n \"\"\"\n super().__init__()\n+\n self._target_basis = basis\n self._decomposers = None\n+\n if basis:\n- self._decomposers = []\n+ self._decomposers = {}\n basis_set = set(basis)\n euler_basis_gates = one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES\n for euler_basis_name, gates in euler_basis_gates.items():\n if set(gates).issubset(basis_set):\n basis_copy = copy.copy(self._decomposers)\n- for base in basis_copy:\n+ for base in basis_copy.keys():\n # check if gates are a superset of another basis\n- # and if so, remove that basis\n- if set(euler_basis_gates[base.basis]).issubset(set(gates)):\n- self._decomposers.remove(base)\n+ if set(base).issubset(set(gates)):\n+ # if so, remove that basis\n+ del self._decomposers[base]\n # check if the gates are a subset of another basis\n- elif set(gates).issubset(set(euler_basis_gates[base.basis])):\n+ elif set(gates).issubset(set(base)):\n+ # if so, don't bother\n break\n # if not a subset, add it to the list\n else:\n- self._decomposers.append(\n- one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)\n- )\n+ self._decomposers[\n+ tuple(gates)\n+ ] = one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)\n \n def run(self, dag):\n \"\"\"Run the Optimize1qGatesDecomposition pass on `dag`.\n@@ -70,7 +73,7 @@\n Returns:\n DAGCircuit: the optimized DAG.\n \"\"\"\n- if not self._decomposers:\n+ if self._decomposers is None:\n logger.info(\"Skipping pass because no basis is set\")\n return dag\n runs = dag.collect_1q_runs()\n@@ -86,21 +89,20 @@\n if \"u2\" not in self._target_basis and \"u1\" not in self._target_basis:\n continue\n \n- new_circs = []\n operator = run[0].op.to_matrix()\n for gate in run[1:]:\n operator = gate.op.to_matrix().dot(operator)\n- for decomposer in self._decomposers:\n- new_circs.append(decomposer._decompose(operator))\n- if new_circs:\n- new_circ = min(new_circs, key=len)\n+\n+ new_circs = {k: v._decompose(operator) for k, v in self._decomposers.items()}\n+\n+ if len(new_circs) > 0:\n+ new_basis, new_circ = min(new_circs.items(), key=lambda x: len(x[1]))\n \n # do we even have calibrations?\n has_cals_p = dag.calibrations is not None and len(dag.calibrations) > 0\n- # is this run all in the target set and also uncalibrated?\n+ # is this run in the target set of this particular decomposer and also uncalibrated?\n rewriteable_and_in_basis_p = all(\n- g.name in self._target_basis\n- and (not has_cals_p or not dag.has_calibration_for(g))\n+ g.name in new_basis and (not has_cals_p or not dag.has_calibration_for(g))\n for g in run\n )\n # does this run have uncalibrated gates?\n", "issue": "Warning statement upon compiling `cz` gate into `['h', 'cx', 'rz', 'sx', 'x']` gate set\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**: `0.18.0`\r\n- **Python version**: `3.9.6`\r\n- **Operating system**: `Pop!_OS 21.04 x86_64`\r\n\r\n### What is the current behavior?\r\nUpon updating from version `0.17.4`, there is a substantially amount of additional warning printing upon compiling cz gates to the basis gate set: `['h', 'cx', 'rz', 'sx', 'x']`:\r\n\r\n```\r\n/home/username/anaconda3/envs/qiskit-new/lib/python3.9/site-packages/qiskit/transpiler/runningpassmanager.py:166: UserWarning: Resynthesized [<qiskit.dagcircuit.dagnode.DAGNode object at 0x7f70be79e040>] and got global phase: \u03c0/4\r\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\u250c\u2500\u2500\u2500\u2500\u2510\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\r\nqr_0: \u2524 Rz(\u03c0/2) \u251c\u2524 \u221aX \u251c\u2524 Rz(\u03c0/2) \u251c\r\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\u2514\u2500\u2500\u2500\u2500\u2518\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518, but the original was native and the new value is longer. This indicates an efficiency bug in synthesis. Please report it by opening an issue here: https://github.com/Qiskit/qiskit-terra/issues/new/choose\r\n new_dag = pass_.run(dag)\r\n```\r\n\r\nThis doesn't appear to change the ability for the circuits to run correctly, it just fills up the terminal with a lot of non-needed statements, especially when running large algorithms requiring many cz gates.\r\n\r\n\r\n### Steps to reproduce the problem\r\n```\r\nfrom qiskit import QuantumCircuit, QuantumRegister\r\nfrom qiskit import transpile\r\n\r\nqr = QuantumRegister(2)\r\nqc_ = QuantumCircuit(qr)\r\n\r\nqc_.cz(qr[0], qr[1])\r\n\r\nqc = transpile(qc_, basis_gates=['h', 'cx', 'rz', 'sx', 'x'])\r\n```\r\n\r\n\r\n### What is the expected behavior?\r\nNot printing a ton of warning statements upon compiling to this basis gate set\r\n\r\n\r\n### Suggested solutions\r\nRemove the printing? The basis gates we are using is a pretty common basis gate set, and compiling away from the cz gate is the intended behavior.\r\n\r\n\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Optimize chains of single-qubit gates using Euler 1q decomposer\"\"\"\n\nimport copy\nimport logging\nimport warnings\n\nimport numpy as np\n\nfrom qiskit.circuit.library.standard_gates import U3Gate\nfrom qiskit.transpiler.basepasses import TransformationPass\nfrom qiskit.quantum_info.synthesis import one_qubit_decompose\nfrom qiskit.converters import circuit_to_dag\n\nlogger = logging.getLogger(__name__)\n\n\nclass Optimize1qGatesDecomposition(TransformationPass):\n \"\"\"Optimize chains of single-qubit gates by combining them into a single gate.\"\"\"\n\n def __init__(self, basis=None):\n \"\"\"Optimize1qGatesDecomposition initializer.\n\n Args:\n basis (list[str]): Basis gates to consider, e.g. `['u3', 'cx']`. For the effects\n of this pass, the basis is the set intersection between the `basis` parameter\n and the Euler basis.\n \"\"\"\n super().__init__()\n self._target_basis = basis\n self._decomposers = None\n if basis:\n self._decomposers = []\n basis_set = set(basis)\n euler_basis_gates = one_qubit_decompose.ONE_QUBIT_EULER_BASIS_GATES\n for euler_basis_name, gates in euler_basis_gates.items():\n if set(gates).issubset(basis_set):\n basis_copy = copy.copy(self._decomposers)\n for base in basis_copy:\n # check if gates are a superset of another basis\n # and if so, remove that basis\n if set(euler_basis_gates[base.basis]).issubset(set(gates)):\n self._decomposers.remove(base)\n # check if the gates are a subset of another basis\n elif set(gates).issubset(set(euler_basis_gates[base.basis])):\n break\n # if not a subset, add it to the list\n else:\n self._decomposers.append(\n one_qubit_decompose.OneQubitEulerDecomposer(euler_basis_name)\n )\n\n def run(self, dag):\n \"\"\"Run the Optimize1qGatesDecomposition pass on `dag`.\n\n Args:\n dag (DAGCircuit): the DAG to be optimized.\n\n Returns:\n DAGCircuit: the optimized DAG.\n \"\"\"\n if not self._decomposers:\n logger.info(\"Skipping pass because no basis is set\")\n return dag\n runs = dag.collect_1q_runs()\n for run in runs:\n # SPECIAL CASE: Don't bother to optimize single U3 gates which are in the basis set.\n # The U3 decomposer is only going to emit a sequence of length 1 anyhow.\n if \"u3\" in self._target_basis and len(run) == 1 and isinstance(run[0].op, U3Gate):\n # Toss U3 gates equivalent to the identity; there we get off easy.\n if np.allclose(run[0].op.to_matrix(), np.eye(2), 1e-15, 0):\n dag.remove_op_node(run[0])\n continue\n # We might rewrite into lower `u`s if they're available.\n if \"u2\" not in self._target_basis and \"u1\" not in self._target_basis:\n continue\n\n new_circs = []\n operator = run[0].op.to_matrix()\n for gate in run[1:]:\n operator = gate.op.to_matrix().dot(operator)\n for decomposer in self._decomposers:\n new_circs.append(decomposer._decompose(operator))\n if new_circs:\n new_circ = min(new_circs, key=len)\n\n # do we even have calibrations?\n has_cals_p = dag.calibrations is not None and len(dag.calibrations) > 0\n # is this run all in the target set and also uncalibrated?\n rewriteable_and_in_basis_p = all(\n g.name in self._target_basis\n and (not has_cals_p or not dag.has_calibration_for(g))\n for g in run\n )\n # does this run have uncalibrated gates?\n uncalibrated_p = not has_cals_p or any(not dag.has_calibration_for(g) for g in run)\n # does this run have gates not in the image of ._decomposers _and_ uncalibrated?\n uncalibrated_and_not_basis_p = any(\n g.name not in self._target_basis\n and (not has_cals_p or not dag.has_calibration_for(g))\n for g in run\n )\n\n if rewriteable_and_in_basis_p and len(run) < len(new_circ):\n # NOTE: This is short-circuited on calibrated gates, which we're timid about\n # reducing.\n warnings.warn(\n f\"Resynthesized {run} and got {new_circ}, \"\n f\"but the original was native and the new value is longer. This \"\n f\"indicates an efficiency bug in synthesis. Please report it by \"\n f\"opening an issue here: \"\n f\"https://github.com/Qiskit/qiskit-terra/issues/new/choose\",\n stacklevel=2,\n )\n # if we're outside of the basis set, we're obligated to logically decompose.\n # if we're outside of the set of gates for which we have physical definitions,\n # then we _try_ to decompose, using the results if we see improvement.\n # NOTE: Here we use circuit length as a weak proxy for \"improvement\"; in reality,\n # we care about something more like fidelity at runtime, which would mean,\n # e.g., a preference for `RZGate`s over `RXGate`s. In fact, users sometimes\n # express a preference for a \"canonical form\" of a circuit, which may come in\n # the form of some parameter values, also not visible at the level of circuit\n # length. Since we don't have a framework for the caller to programmatically\n # express what they want here, we include some special casing for particular\n # gates which we've promised to normalize --- but this is fragile and should\n # ultimately be done away with.\n if (\n uncalibrated_and_not_basis_p\n or (uncalibrated_p and len(run) > len(new_circ))\n or isinstance(run[0].op, U3Gate)\n ):\n new_dag = circuit_to_dag(new_circ)\n dag.substitute_node_with_dag(run[0], new_dag)\n # Delete the other nodes in the run\n for current_node in run[1:]:\n dag.remove_op_node(current_node)\n return dag\n", "path": "qiskit/transpiler/passes/optimization/optimize_1q_decomposition.py"}]} | 3,068 | 931 |
gh_patches_debug_548 | rasdani/github-patches | git_diff | Gallopsled__pwntools-532 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bpython
Hi,
Unfortunately pwntools doesn't seem to work with bpython 0.12 in conjunction of python 2.7.9.
from pwn import *
results in:
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/pwn/**init**.py", line 2, in <module>
from .toplevel import *
File "/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py", line 2, in <module>
from pwnlib import *
File "/usr/local/lib/python2.7/dist-packages/pwnlib/**init**.py", line 10, in <module>
from . import \
File "/usr/local/lib/python2.7/dist-packages/pwnlib/asm.py", line 45, in <module>
from . import log
File "/usr/local/lib/python2.7/dist-packages/pwnlib/log.py", line 69, in <module>
from .term import spinners, text
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/**init**.py", line 1, in <module>
from . import key, readline, text, termcap, keymap, term
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py", line 2, in <module>
from . import term, text
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py", line 111, in <module>
sys.modules[**name**] = Module()
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py", line 22, in **init**
self.num_colors = termcap.get('colors', default = 8)
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py", line 15, in get
init()
File "/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py", line 39, in init
curses.setupterm()
TypeError: argument must be an int, or have a fileno() method.
</issue>
<code>
[start of pwnlib/term/termcap.py]
1 __all__ = ['get']
2 import os, curses
3
4 cache = None
5 def get(cap, *args, **kwargs):
6 default = kwargs.pop('default', '')
7
8 if 'PWNLIB_NOTERM' in os.environ:
9 return ''
10
11 if kwargs != {}:
12 raise TypeError("get(): No such argument %r" % kwargs.popitem()[0])
13
14 if cache == None:
15 init()
16 s = cache.get(cap)
17 if not s:
18 s = curses.tigetstr(cap)
19 if s == None:
20 s = curses.tigetnum(cap)
21 if s == -2:
22 s = curses.tigetflag(cap)
23 if s == -1:
24 # default to empty string so tparm doesn't fail
25 s = ''
26 else:
27 s = bool(s)
28 cache[cap] = s
29 # if `s' is not set `curses.tparm' will throw an error if given arguments
30 if args and s:
31 return curses.tparm(s, *args)
32 else:
33 return s
34
35 def init():
36 global cache
37
38 if 'PWNLIB_NOTERM' not in os.environ:
39 curses.setupterm()
40
41 cache = {}
42
[end of pwnlib/term/termcap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/term/termcap.py b/pwnlib/term/termcap.py
--- a/pwnlib/term/termcap.py
+++ b/pwnlib/term/termcap.py
@@ -36,6 +36,10 @@
global cache
if 'PWNLIB_NOTERM' not in os.environ:
- curses.setupterm()
+ # Fix for BPython
+ try:
+ curses.setupterm()
+ except:
+ pass
cache = {}
| {"golden_diff": "diff --git a/pwnlib/term/termcap.py b/pwnlib/term/termcap.py\n--- a/pwnlib/term/termcap.py\n+++ b/pwnlib/term/termcap.py\n@@ -36,6 +36,10 @@\n global cache\n \n if 'PWNLIB_NOTERM' not in os.environ:\n- curses.setupterm()\n+ # Fix for BPython\n+ try:\n+ curses.setupterm()\n+ except:\n+ pass\n \n cache = {}\n", "issue": "Bpython\nHi,\n\nUnfortunately pwntools doesn't seem to work with bpython 0.12 in conjunction of python 2.7.9.\n\nfrom pwn import *\n\nresults in:\n\nTraceback (most recent call last):\n File \"<input>\", line 1, in <module>\n File \"/usr/local/lib/python2.7/dist-packages/pwn/**init**.py\", line 2, in <module>\n from .toplevel import *\n File \"/usr/local/lib/python2.7/dist-packages/pwn/toplevel.py\", line 2, in <module>\n from pwnlib import *\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/**init**.py\", line 10, in <module>\n from . import \\\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/asm.py\", line 45, in <module>\n from . import log\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/log.py\", line 69, in <module>\n from .term import spinners, text\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/**init**.py\", line 1, in <module>\n from . import key, readline, text, termcap, keymap, term\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/readline.py\", line 2, in <module>\n from . import term, text\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py\", line 111, in <module>\n sys.modules[**name**] = Module()\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/text.py\", line 22, in **init**\n self.num_colors = termcap.get('colors', default = 8)\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py\", line 15, in get\n init()\n File \"/usr/local/lib/python2.7/dist-packages/pwnlib/term/termcap.py\", line 39, in init\n curses.setupterm()\nTypeError: argument must be an int, or have a fileno() method.\n\n", "before_files": [{"content": "__all__ = ['get']\nimport os, curses\n\ncache = None\ndef get(cap, *args, **kwargs):\n default = kwargs.pop('default', '')\n\n if 'PWNLIB_NOTERM' in os.environ:\n return ''\n\n if kwargs != {}:\n raise TypeError(\"get(): No such argument %r\" % kwargs.popitem()[0])\n\n if cache == None:\n init()\n s = cache.get(cap)\n if not s:\n s = curses.tigetstr(cap)\n if s == None:\n s = curses.tigetnum(cap)\n if s == -2:\n s = curses.tigetflag(cap)\n if s == -1:\n # default to empty string so tparm doesn't fail\n s = ''\n else:\n s = bool(s)\n cache[cap] = s\n # if `s' is not set `curses.tparm' will throw an error if given arguments\n if args and s:\n return curses.tparm(s, *args)\n else:\n return s\n\ndef init():\n global cache\n\n if 'PWNLIB_NOTERM' not in os.environ:\n curses.setupterm()\n\n cache = {}\n", "path": "pwnlib/term/termcap.py"}]} | 1,378 | 117 |
gh_patches_debug_1990 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2137 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in project document category API
## Test plan
The project_document_category should not give an error. E.g. `http://rsr.localdev.akvo.org/rest/v1/project_document_category/` should load.
## Issue description
The project document category API gives an error. See http://sentry.support.akvo-ops.org/rsr/test/group/879/, or on the Test server: http://rsr.test.akvo.org/rest/v1/project_document_category/.
</issue>
<code>
[start of akvo/rest/views/project_document.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import ProjectDocument, ProjectDocumentCategory
9
10 from ..serializers import ProjectDocumentSerializer, ProjectDocumentCategorySerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class ProjectDocumentViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = ProjectDocument.objects.all()
18 serializer_class = ProjectDocumentSerializer
19
20
21 class ProjectDocumentCategoryViewSet(PublicProjectViewSet):
22 """
23 """
24 queryset = ProjectDocumentCategory.objects.all()
25 serializer_class = ProjectDocumentCategorySerializer
26 filter_fields = ('document__project', 'document', 'category', )
27
[end of akvo/rest/views/project_document.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/views/project_document.py b/akvo/rest/views/project_document.py
--- a/akvo/rest/views/project_document.py
+++ b/akvo/rest/views/project_document.py
@@ -24,3 +24,4 @@
queryset = ProjectDocumentCategory.objects.all()
serializer_class = ProjectDocumentCategorySerializer
filter_fields = ('document__project', 'document', 'category', )
+ project_relation = 'document__project__'
| {"golden_diff": "diff --git a/akvo/rest/views/project_document.py b/akvo/rest/views/project_document.py\n--- a/akvo/rest/views/project_document.py\n+++ b/akvo/rest/views/project_document.py\n@@ -24,3 +24,4 @@\n queryset = ProjectDocumentCategory.objects.all()\n serializer_class = ProjectDocumentCategorySerializer\n filter_fields = ('document__project', 'document', 'category', )\n+ project_relation = 'document__project__'\n", "issue": "Bug in project document category API\n## Test plan\n\nThe project_document_category should not give an error. E.g. `http://rsr.localdev.akvo.org/rest/v1/project_document_category/` should load.\n## Issue description\n\nThe project document category API gives an error. See http://sentry.support.akvo-ops.org/rsr/test/group/879/, or on the Test server: http://rsr.test.akvo.org/rest/v1/project_document_category/.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import ProjectDocument, ProjectDocumentCategory\n\nfrom ..serializers import ProjectDocumentSerializer, ProjectDocumentCategorySerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass ProjectDocumentViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectDocument.objects.all()\n serializer_class = ProjectDocumentSerializer\n\n\nclass ProjectDocumentCategoryViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = ProjectDocumentCategory.objects.all()\n serializer_class = ProjectDocumentCategorySerializer\n filter_fields = ('document__project', 'document', 'category', )\n", "path": "akvo/rest/views/project_document.py"}]} | 873 | 101 |
gh_patches_debug_7593 | rasdani/github-patches | git_diff | python-pillow__Pillow-1230 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot identify XBM file created with filename including underscore
Pillow 2.8.1, Python 2.7.6 (Anaconda 2.2.0), Windows 7 64bit
When I create git_hub.xbm (with ImageMagick), created file's header contains lines like this.
``` C
#define git_hub_width 32
#define git_hub_height 32
```
In XbmImagePlugin.py, regular expression to extract XBM header doesn't match defined macro with more than two underscores like above.This causes an IOError.
``` python
# XBM header
xbm_head = re.compile(
b"\s*#define[ \t]+[^_]*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
b"#define[ \t]+[^_]*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
b"(?P<hotspot>"
b"#define[ \t]+[^_]*_x_hot[ \t]+(?P<xhot>[0-9]+)[\r\n]+"
b"#define[ \t]+[^_]*_y_hot[ \t]+(?P<yhot>[0-9]+)[\r\n]+"
b")?"
b"[\\000-\\377]*_bits\\[\\]"
)
```
</issue>
<code>
[start of PIL/XbmImagePlugin.py]
1 #
2 # The Python Imaging Library.
3 # $Id$
4 #
5 # XBM File handling
6 #
7 # History:
8 # 1995-09-08 fl Created
9 # 1996-11-01 fl Added save support
10 # 1997-07-07 fl Made header parser more tolerant
11 # 1997-07-22 fl Fixed yet another parser bug
12 # 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
13 # 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog)
14 # 2004-02-24 fl Allow some whitespace before first #define
15 #
16 # Copyright (c) 1997-2004 by Secret Labs AB
17 # Copyright (c) 1996-1997 by Fredrik Lundh
18 #
19 # See the README file for information on usage and redistribution.
20 #
21
22 __version__ = "0.6"
23
24 import re
25 from PIL import Image, ImageFile
26
27 # XBM header
28 xbm_head = re.compile(
29 b"\s*#define[ \t]+[^_]*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
30 b"#define[ \t]+[^_]*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
31 b"(?P<hotspot>"
32 b"#define[ \t]+[^_]*_x_hot[ \t]+(?P<xhot>[0-9]+)[\r\n]+"
33 b"#define[ \t]+[^_]*_y_hot[ \t]+(?P<yhot>[0-9]+)[\r\n]+"
34 b")?"
35 b"[\\000-\\377]*_bits\\[\\]"
36 )
37
38
39 def _accept(prefix):
40 return prefix.lstrip()[:7] == b"#define"
41
42
43 ##
44 # Image plugin for X11 bitmaps.
45
46 class XbmImageFile(ImageFile.ImageFile):
47
48 format = "XBM"
49 format_description = "X11 Bitmap"
50
51 def _open(self):
52
53 m = xbm_head.match(self.fp.read(512))
54
55 if m:
56
57 xsize = int(m.group("width"))
58 ysize = int(m.group("height"))
59
60 if m.group("hotspot"):
61 self.info["hotspot"] = (
62 int(m.group("xhot")), int(m.group("yhot"))
63 )
64
65 self.mode = "1"
66 self.size = xsize, ysize
67
68 self.tile = [("xbm", (0, 0)+self.size, m.end(), None)]
69
70
71 def _save(im, fp, filename):
72
73 if im.mode != "1":
74 raise IOError("cannot write mode %s as XBM" % im.mode)
75
76 fp.write(("#define im_width %d\n" % im.size[0]).encode('ascii'))
77 fp.write(("#define im_height %d\n" % im.size[1]).encode('ascii'))
78
79 hotspot = im.encoderinfo.get("hotspot")
80 if hotspot:
81 fp.write(("#define im_x_hot %d\n" % hotspot[0]).encode('ascii'))
82 fp.write(("#define im_y_hot %d\n" % hotspot[1]).encode('ascii'))
83
84 fp.write(b"static char im_bits[] = {\n")
85
86 ImageFile._save(im, fp, [("xbm", (0, 0)+im.size, 0, None)])
87
88 fp.write(b"};\n")
89
90
91 Image.register_open("XBM", XbmImageFile, _accept)
92 Image.register_save("XBM", _save)
93
94 Image.register_extension("XBM", ".xbm")
95
96 Image.register_mime("XBM", "image/xbm")
97
[end of PIL/XbmImagePlugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PIL/XbmImagePlugin.py b/PIL/XbmImagePlugin.py
--- a/PIL/XbmImagePlugin.py
+++ b/PIL/XbmImagePlugin.py
@@ -26,8 +26,8 @@
# XBM header
xbm_head = re.compile(
- b"\s*#define[ \t]+[^_]*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
- b"#define[ \t]+[^_]*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
+ b"\s*#define[ \t]+.*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
+ b"#define[ \t]+.*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
b"(?P<hotspot>"
b"#define[ \t]+[^_]*_x_hot[ \t]+(?P<xhot>[0-9]+)[\r\n]+"
b"#define[ \t]+[^_]*_y_hot[ \t]+(?P<yhot>[0-9]+)[\r\n]+"
| {"golden_diff": "diff --git a/PIL/XbmImagePlugin.py b/PIL/XbmImagePlugin.py\n--- a/PIL/XbmImagePlugin.py\n+++ b/PIL/XbmImagePlugin.py\n@@ -26,8 +26,8 @@\n \n # XBM header\n xbm_head = re.compile(\n- b\"\\s*#define[ \\t]+[^_]*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n- b\"#define[ \\t]+[^_]*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n+ b\"\\s*#define[ \\t]+.*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n+ b\"#define[ \\t]+.*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n b\"(?P<hotspot>\"\n b\"#define[ \\t]+[^_]*_x_hot[ \\t]+(?P<xhot>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_y_hot[ \\t]+(?P<yhot>[0-9]+)[\\r\\n]+\"\n", "issue": "Cannot identify XBM file created with filename including underscore\nPillow 2.8.1, Python 2.7.6 (Anaconda 2.2.0), Windows 7 64bit\n\nWhen I create git_hub.xbm (with ImageMagick), created file's header contains lines like this.\n\n``` C\n#define git_hub_width 32\n#define git_hub_height 32\n```\n\nIn XbmImagePlugin.py, regular expression to extract XBM header doesn't match defined macro with more than two underscores like above.This causes an IOError.\n\n``` python\n# XBM header\nxbm_head = re.compile(\n b\"\\s*#define[ \\t]+[^_]*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n b\"(?P<hotspot>\"\n b\"#define[ \\t]+[^_]*_x_hot[ \\t]+(?P<xhot>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_y_hot[ \\t]+(?P<yhot>[0-9]+)[\\r\\n]+\"\n b\")?\"\n b\"[\\\\000-\\\\377]*_bits\\\\[\\\\]\"\n)\n```\n\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# XBM File handling\n#\n# History:\n# 1995-09-08 fl Created\n# 1996-11-01 fl Added save support\n# 1997-07-07 fl Made header parser more tolerant\n# 1997-07-22 fl Fixed yet another parser bug\n# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)\n# 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog)\n# 2004-02-24 fl Allow some whitespace before first #define\n#\n# Copyright (c) 1997-2004 by Secret Labs AB\n# Copyright (c) 1996-1997 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\n__version__ = \"0.6\"\n\nimport re\nfrom PIL import Image, ImageFile\n\n# XBM header\nxbm_head = re.compile(\n b\"\\s*#define[ \\t]+[^_]*_width[ \\t]+(?P<width>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_height[ \\t]+(?P<height>[0-9]+)[\\r\\n]+\"\n b\"(?P<hotspot>\"\n b\"#define[ \\t]+[^_]*_x_hot[ \\t]+(?P<xhot>[0-9]+)[\\r\\n]+\"\n b\"#define[ \\t]+[^_]*_y_hot[ \\t]+(?P<yhot>[0-9]+)[\\r\\n]+\"\n b\")?\"\n b\"[\\\\000-\\\\377]*_bits\\\\[\\\\]\"\n)\n\n\ndef _accept(prefix):\n return prefix.lstrip()[:7] == b\"#define\"\n\n\n##\n# Image plugin for X11 bitmaps.\n\nclass XbmImageFile(ImageFile.ImageFile):\n\n format = \"XBM\"\n format_description = \"X11 Bitmap\"\n\n def _open(self):\n\n m = xbm_head.match(self.fp.read(512))\n\n if m:\n\n xsize = int(m.group(\"width\"))\n ysize = int(m.group(\"height\"))\n\n if m.group(\"hotspot\"):\n self.info[\"hotspot\"] = (\n int(m.group(\"xhot\")), int(m.group(\"yhot\"))\n )\n\n self.mode = \"1\"\n self.size = xsize, ysize\n\n self.tile = [(\"xbm\", (0, 0)+self.size, m.end(), None)]\n\n\ndef _save(im, fp, filename):\n\n if im.mode != \"1\":\n raise IOError(\"cannot write mode %s as XBM\" % im.mode)\n\n fp.write((\"#define im_width %d\\n\" % im.size[0]).encode('ascii'))\n fp.write((\"#define im_height %d\\n\" % im.size[1]).encode('ascii'))\n\n hotspot = im.encoderinfo.get(\"hotspot\")\n if hotspot:\n fp.write((\"#define im_x_hot %d\\n\" % hotspot[0]).encode('ascii'))\n fp.write((\"#define im_y_hot %d\\n\" % hotspot[1]).encode('ascii'))\n\n fp.write(b\"static char im_bits[] = {\\n\")\n\n ImageFile._save(im, fp, [(\"xbm\", (0, 0)+im.size, 0, None)])\n\n fp.write(b\"};\\n\")\n\n\nImage.register_open(\"XBM\", XbmImageFile, _accept)\nImage.register_save(\"XBM\", _save)\n\nImage.register_extension(\"XBM\", \".xbm\")\n\nImage.register_mime(\"XBM\", \"image/xbm\")\n", "path": "PIL/XbmImagePlugin.py"}]} | 1,896 | 274 |
gh_patches_debug_18857 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-5807 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Codecommit and Codepipeline missing tag filters and actions in 0.9.2.0
**Describe the bug**
Codecommit and codepipeline tagging was added per the release notes but it doesn't come up in the schema when you do a custodian schema codecommit or codepipeline
**To Reproduce**
Steps to reproduce the behavior:
custodian schema codecommit or codepipeline
**Expected behavior**
Expecting to see the marked-for-op filter and tag and mark-for-op actions avaialble for the 2 resources
**Background (please complete the following information):**
- OS: [e.g. OSX 10.15] Ubuntu v20
- Python Version: [e.g. python 3.8.1] 3.8
- Custodian Version: [e.g. 0.8.46.1] 0.9.2.0
- Tool Version: [if applicable]
- Cloud Provider: [e.g. gcp, aws, azure] aws
- Policy: [please exclude any account/sensitive information]
```yaml
policies:
- name: check-buckets
resource: aws.s3
```
- Traceback: [if applicable, please exclude sensitive/account information]
- `custodian version --debug` output
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of c7n/resources/code.py]
1 # Copyright 2017 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from botocore.exceptions import ClientError
15 import jmespath
16
17 from c7n.actions import BaseAction
18 from c7n.filters.vpc import SubnetFilter, SecurityGroupFilter, VpcFilter
19 from c7n.manager import resources
20 from c7n.query import QueryResourceManager, DescribeSource, ConfigSource, TypeInfo
21 from c7n.tags import universal_augment
22 from c7n.utils import local_session, type_schema
23
24 from .securityhub import OtherResourcePostFinding
25
26
27 @resources.register('codecommit')
28 class CodeRepository(QueryResourceManager):
29
30 class resource_type(TypeInfo):
31 service = 'codecommit'
32 enum_spec = ('list_repositories', 'repositories', None)
33 batch_detail_spec = (
34 'batch_get_repositories', 'repositoryNames', 'repositoryName',
35 'repositories', None)
36 name = id = 'repositoryName'
37 arn = "Arn"
38 date = 'creationDate'
39 cfn_type = 'AWS::CodeCommit::Repository'
40 universal_tagging = object()
41
42 def get_resources(self, ids, cache=True):
43 return universal_augment(self, self.augment([{'repositoryName': i} for i in ids]))
44
45
46 @CodeRepository.action_registry.register('delete')
47 class DeleteRepository(BaseAction):
48 """Action to delete code commit
49
50 It is recommended to use a filter to avoid unwanted deletion of repos
51
52 :example:
53
54 .. code-block:: yaml
55
56 policies:
57 - name: codecommit-delete
58 resource: codecommit
59 actions:
60 - delete
61 """
62
63 schema = type_schema('delete')
64 permissions = ("codecommit:DeleteRepository",)
65
66 def process(self, repositories):
67 client = local_session(
68 self.manager.session_factory).client('codecommit')
69 for r in repositories:
70 self.process_repository(client, r)
71
72 def process_repository(self, client, repository):
73 try:
74 client.delete_repository(repositoryName=repository['repositoryName'])
75 except ClientError as e:
76 self.log.exception(
77 "Exception deleting repo:\n %s" % e)
78
79
80 class DescribeBuild(DescribeSource):
81
82 def augment(self, resources):
83 return universal_augment(
84 self.manager,
85 super(DescribeBuild, self).augment(resources))
86
87
88 @resources.register('codebuild')
89 class CodeBuildProject(QueryResourceManager):
90
91 class resource_type(TypeInfo):
92 service = 'codebuild'
93 enum_spec = ('list_projects', 'projects', None)
94 batch_detail_spec = (
95 'batch_get_projects', 'names', None, 'projects', None)
96 name = id = 'name'
97 arn = 'arn'
98 date = 'created'
99 dimension = 'ProjectName'
100 cfn_type = config_type = "AWS::CodeBuild::Project"
101 arn_type = 'project'
102 universal_taggable = object()
103
104 source_mapping = {
105 'describe': DescribeBuild,
106 'config': ConfigSource
107 }
108
109
110 @CodeBuildProject.filter_registry.register('subnet')
111 class BuildSubnetFilter(SubnetFilter):
112
113 RelatedIdsExpression = "vpcConfig.subnets[]"
114
115
116 @CodeBuildProject.filter_registry.register('security-group')
117 class BuildSecurityGroupFilter(SecurityGroupFilter):
118
119 RelatedIdsExpression = "vpcConfig.securityGroupIds[]"
120
121
122 @CodeBuildProject.filter_registry.register('vpc')
123 class BuildVpcFilter(VpcFilter):
124
125 RelatedIdsExpression = "vpcConfig.vpcId"
126
127
128 @CodeBuildProject.action_registry.register('post-finding')
129 class BuildPostFinding(OtherResourcePostFinding):
130
131 resource_type = 'AwsCodeBuildProject'
132
133 def format_resource(self, r):
134 envelope, payload = self.format_envelope(r)
135 payload.update(self.filter_empty({
136 'Name': r['name'],
137 'EncryptionKey': r['encryptionKey'],
138 'Environment': self.filter_empty({
139 'Type': r['environment']['type'],
140 'Certificate': r['environment'].get('certificate'),
141 'RegistryCredential': self.filter_empty({
142 'Credential': jmespath.search(
143 'environment.registryCredential.credential', r),
144 'CredentialProvider': jmespath.search(
145 'environment.registryCredential.credentialProvider', r)
146 }),
147 'ImagePullCredentialsType': r['environment'].get(
148 'imagePullCredentialsType')
149 }),
150 'ServiceRole': r['serviceRole'],
151 'VpcConfig': self.filter_empty({
152 'VpcId': jmespath.search('vpcConfig.vpcId', r),
153 'Subnets': jmespath.search('vpcConfig.subnets', r),
154 'SecurityGroupIds': jmespath.search('vpcConfig.securityGroupIds', r)
155 }),
156 'Source': self.filter_empty({
157 'Type': jmespath.search('source.type', r),
158 'Location': jmespath.search('source.location', r),
159 'GitCloneDepth': jmespath.search('source.gitCloneDepth', r)
160 }),
161 }))
162 return envelope
163
164
165 @CodeBuildProject.action_registry.register('delete')
166 class DeleteProject(BaseAction):
167 """Action to delete code build
168
169 It is recommended to use a filter to avoid unwanted deletion of builds
170
171 :example:
172
173 .. code-block:: yaml
174
175 policies:
176 - name: codebuild-delete
177 resource: codebuild
178 actions:
179 - delete
180 """
181
182 schema = type_schema('delete')
183 permissions = ("codebuild:DeleteProject",)
184
185 def process(self, projects):
186 client = local_session(self.manager.session_factory).client('codebuild')
187 for p in projects:
188 self.process_project(client, p)
189
190 def process_project(self, client, project):
191
192 try:
193 client.delete_project(name=project['name'])
194 except ClientError as e:
195 self.log.exception(
196 "Exception deleting project:\n %s" % e)
197
198
199 class DescribePipeline(DescribeSource):
200
201 def augment(self, resources):
202 resources = super().augment(resources)
203 return universal_augment(self.manager, resources)
204
205
206 @resources.register('codepipeline')
207 class CodeDeployPipeline(QueryResourceManager):
208
209 class resource_type(TypeInfo):
210 service = 'codepipeline'
211 enum_spec = ('list_pipelines', 'pipelines', None)
212 detail_spec = ('get_pipeline', 'name', 'name', 'pipeline')
213 name = id = 'name'
214 date = 'created'
215 # Note this is purposeful, codepipeline don't have a separate type specifier.
216 arn_type = ""
217 cfn_type = config_type = "AWS::CodePipeline::Pipeline"
218 universal_tagging = object()
219
220 source_mapping = {
221 'describe': DescribePipeline,
222 'config': ConfigSource
223 }
224
225
226 @CodeDeployPipeline.action_registry.register('delete')
227 class DeletePipeline(BaseAction):
228
229 schema = type_schema('delete')
230 permissions = ('codepipeline:DeletePipeline',)
231
232 def process(self, resources):
233 client = local_session(self.manager.session_factory).client('codepipeline')
234 for r in resources:
235 try:
236 self.manager.retry(client.delete_pipeline, name=r['name'])
237 except client.exceptions.PipelineNotFoundException:
238 continue
239
[end of c7n/resources/code.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/c7n/resources/code.py b/c7n/resources/code.py
--- a/c7n/resources/code.py
+++ b/c7n/resources/code.py
@@ -37,7 +37,7 @@
arn = "Arn"
date = 'creationDate'
cfn_type = 'AWS::CodeCommit::Repository'
- universal_tagging = object()
+ universal_taggable = object()
def get_resources(self, ids, cache=True):
return universal_augment(self, self.augment([{'repositoryName': i} for i in ids]))
@@ -215,7 +215,7 @@
# Note this is purposeful, codepipeline don't have a separate type specifier.
arn_type = ""
cfn_type = config_type = "AWS::CodePipeline::Pipeline"
- universal_tagging = object()
+ universal_taggable = object()
source_mapping = {
'describe': DescribePipeline,
| {"golden_diff": "diff --git a/c7n/resources/code.py b/c7n/resources/code.py\n--- a/c7n/resources/code.py\n+++ b/c7n/resources/code.py\n@@ -37,7 +37,7 @@\n arn = \"Arn\"\n date = 'creationDate'\n cfn_type = 'AWS::CodeCommit::Repository'\n- universal_tagging = object()\n+ universal_taggable = object()\n \n def get_resources(self, ids, cache=True):\n return universal_augment(self, self.augment([{'repositoryName': i} for i in ids]))\n@@ -215,7 +215,7 @@\n # Note this is purposeful, codepipeline don't have a separate type specifier.\n arn_type = \"\"\n cfn_type = config_type = \"AWS::CodePipeline::Pipeline\"\n- universal_tagging = object()\n+ universal_taggable = object()\n \n source_mapping = {\n 'describe': DescribePipeline,\n", "issue": "Codecommit and Codepipeline missing tag filters and actions in 0.9.2.0\n**Describe the bug**\r\nCodecommit and codepipeline tagging was added per the release notes but it doesn't come up in the schema when you do a custodian schema codecommit or codepipeline\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n custodian schema codecommit or codepipeline\r\n**Expected behavior**\r\n Expecting to see the marked-for-op filter and tag and mark-for-op actions avaialble for the 2 resources\r\n\r\n\r\n**Background (please complete the following information):**\r\n - OS: [e.g. OSX 10.15] Ubuntu v20\r\n - Python Version: [e.g. python 3.8.1] 3.8\r\n - Custodian Version: [e.g. 0.8.46.1] 0.9.2.0\r\n - Tool Version: [if applicable]\r\n - Cloud Provider: [e.g. gcp, aws, azure] aws\r\n - Policy: [please exclude any account/sensitive information]\r\n```yaml\r\npolicies: \r\n - name: check-buckets\r\n resource: aws.s3\r\n```\r\n - Traceback: [if applicable, please exclude sensitive/account information]\r\n - `custodian version --debug` output\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright 2017 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom botocore.exceptions import ClientError\nimport jmespath\n\nfrom c7n.actions import BaseAction\nfrom c7n.filters.vpc import SubnetFilter, SecurityGroupFilter, VpcFilter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, DescribeSource, ConfigSource, TypeInfo\nfrom c7n.tags import universal_augment\nfrom c7n.utils import local_session, type_schema\n\nfrom .securityhub import OtherResourcePostFinding\n\n\[email protected]('codecommit')\nclass CodeRepository(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'codecommit'\n enum_spec = ('list_repositories', 'repositories', None)\n batch_detail_spec = (\n 'batch_get_repositories', 'repositoryNames', 'repositoryName',\n 'repositories', None)\n name = id = 'repositoryName'\n arn = \"Arn\"\n date = 'creationDate'\n cfn_type = 'AWS::CodeCommit::Repository'\n universal_tagging = object()\n\n def get_resources(self, ids, cache=True):\n return universal_augment(self, self.augment([{'repositoryName': i} for i in ids]))\n\n\[email protected]_registry.register('delete')\nclass DeleteRepository(BaseAction):\n \"\"\"Action to delete code commit\n\n It is recommended to use a filter to avoid unwanted deletion of repos\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: codecommit-delete\n resource: codecommit\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"codecommit:DeleteRepository\",)\n\n def process(self, repositories):\n client = local_session(\n self.manager.session_factory).client('codecommit')\n for r in repositories:\n self.process_repository(client, r)\n\n def process_repository(self, client, repository):\n try:\n client.delete_repository(repositoryName=repository['repositoryName'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting repo:\\n %s\" % e)\n\n\nclass DescribeBuild(DescribeSource):\n\n def augment(self, resources):\n return universal_augment(\n self.manager,\n super(DescribeBuild, self).augment(resources))\n\n\[email protected]('codebuild')\nclass CodeBuildProject(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'codebuild'\n enum_spec = ('list_projects', 'projects', None)\n batch_detail_spec = (\n 'batch_get_projects', 'names', None, 'projects', None)\n name = id = 'name'\n arn = 'arn'\n date = 'created'\n dimension = 'ProjectName'\n cfn_type = config_type = \"AWS::CodeBuild::Project\"\n arn_type = 'project'\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeBuild,\n 'config': ConfigSource\n }\n\n\[email protected]_registry.register('subnet')\nclass BuildSubnetFilter(SubnetFilter):\n\n RelatedIdsExpression = \"vpcConfig.subnets[]\"\n\n\[email protected]_registry.register('security-group')\nclass BuildSecurityGroupFilter(SecurityGroupFilter):\n\n RelatedIdsExpression = \"vpcConfig.securityGroupIds[]\"\n\n\[email protected]_registry.register('vpc')\nclass BuildVpcFilter(VpcFilter):\n\n RelatedIdsExpression = \"vpcConfig.vpcId\"\n\n\[email protected]_registry.register('post-finding')\nclass BuildPostFinding(OtherResourcePostFinding):\n\n resource_type = 'AwsCodeBuildProject'\n\n def format_resource(self, r):\n envelope, payload = self.format_envelope(r)\n payload.update(self.filter_empty({\n 'Name': r['name'],\n 'EncryptionKey': r['encryptionKey'],\n 'Environment': self.filter_empty({\n 'Type': r['environment']['type'],\n 'Certificate': r['environment'].get('certificate'),\n 'RegistryCredential': self.filter_empty({\n 'Credential': jmespath.search(\n 'environment.registryCredential.credential', r),\n 'CredentialProvider': jmespath.search(\n 'environment.registryCredential.credentialProvider', r)\n }),\n 'ImagePullCredentialsType': r['environment'].get(\n 'imagePullCredentialsType')\n }),\n 'ServiceRole': r['serviceRole'],\n 'VpcConfig': self.filter_empty({\n 'VpcId': jmespath.search('vpcConfig.vpcId', r),\n 'Subnets': jmespath.search('vpcConfig.subnets', r),\n 'SecurityGroupIds': jmespath.search('vpcConfig.securityGroupIds', r)\n }),\n 'Source': self.filter_empty({\n 'Type': jmespath.search('source.type', r),\n 'Location': jmespath.search('source.location', r),\n 'GitCloneDepth': jmespath.search('source.gitCloneDepth', r)\n }),\n }))\n return envelope\n\n\[email protected]_registry.register('delete')\nclass DeleteProject(BaseAction):\n \"\"\"Action to delete code build\n\n It is recommended to use a filter to avoid unwanted deletion of builds\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: codebuild-delete\n resource: codebuild\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"codebuild:DeleteProject\",)\n\n def process(self, projects):\n client = local_session(self.manager.session_factory).client('codebuild')\n for p in projects:\n self.process_project(client, p)\n\n def process_project(self, client, project):\n\n try:\n client.delete_project(name=project['name'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting project:\\n %s\" % e)\n\n\nclass DescribePipeline(DescribeSource):\n\n def augment(self, resources):\n resources = super().augment(resources)\n return universal_augment(self.manager, resources)\n\n\[email protected]('codepipeline')\nclass CodeDeployPipeline(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'codepipeline'\n enum_spec = ('list_pipelines', 'pipelines', None)\n detail_spec = ('get_pipeline', 'name', 'name', 'pipeline')\n name = id = 'name'\n date = 'created'\n # Note this is purposeful, codepipeline don't have a separate type specifier.\n arn_type = \"\"\n cfn_type = config_type = \"AWS::CodePipeline::Pipeline\"\n universal_tagging = object()\n\n source_mapping = {\n 'describe': DescribePipeline,\n 'config': ConfigSource\n }\n\n\[email protected]_registry.register('delete')\nclass DeletePipeline(BaseAction):\n\n schema = type_schema('delete')\n permissions = ('codepipeline:DeletePipeline',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('codepipeline')\n for r in resources:\n try:\n self.manager.retry(client.delete_pipeline, name=r['name'])\n except client.exceptions.PipelineNotFoundException:\n continue\n", "path": "c7n/resources/code.py"}]} | 3,095 | 209 |
gh_patches_debug_6237 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3988 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add result type to /project/:id/results/
We want to display whether the result is an Outcome, Impact, etc...
</issue>
<code>
[start of akvo/rest/views/project_overview.py]
1 # -*- coding: utf-8 -*-
2 """Akvo RSR is covered by the GNU Affero General Public License.
3
4 See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6 """
7
8 from akvo.rest.models import TastyTokenAuthentication
9 from akvo.rsr.models import Project, Result, IndicatorPeriod, IndicatorPeriodData
10 from akvo.rsr.models.result.utils import QUANTITATIVE
11 from decimal import Decimal, InvalidOperation
12 from django.http import Http404
13 from django.shortcuts import get_object_or_404
14 from rest_framework.authentication import SessionAuthentication
15 from rest_framework.decorators import api_view, authentication_classes
16 from rest_framework.response import Response
17
18
19 @api_view(['GET'])
20 @authentication_classes([SessionAuthentication, TastyTokenAuthentication])
21 def project_results(request, pk):
22 queryset = Project.objects.prefetch_related('results')
23 project = get_object_or_404(queryset, pk=pk)
24 if not request.user.has_perm('rsr.view_project', project):
25 raise Http404
26 data = {
27 'id': project.id,
28 'title': project.title,
29 'results': [
30 {'id': r.id, 'title': r.title, 'indicator_count': r.indicators.count()}
31 for r in project.results.all()
32 ],
33 }
34 return Response(data)
35
36
37 @api_view(['GET'])
38 @authentication_classes([SessionAuthentication, TastyTokenAuthentication])
39 def project_result_overview(request, project_pk, result_pk):
40 queryset = Result.objects.prefetch_related(
41 'indicators', 'indicators__periods').select_related('project')
42 result = get_object_or_404(queryset, pk=result_pk)
43 project = result.project
44 if project.id != int(project_pk) or not request.user.has_perm('rsr.view_project', project):
45 raise Http404
46
47 data = {
48 'id': result.id,
49 'title': result.title,
50 'indicators': [
51 {
52 'id': i.id,
53 'title': i.title,
54 'description': i.description,
55 'period_count': len(i.periods.all()),
56 'type': 'quantitative' if i.type == QUANTITATIVE else 'qualitative',
57 'measure': (
58 'unit' if i.measure == '1' else 'percentage' if i.measure == '2' else None),
59 'periods': _drilldown_indicator_periods_contributions(i)
60 }
61 for i in result.indicators.all()
62 ]
63 }
64 return Response(data)
65
66
67 def _drilldown_indicator_periods_contributions(indicator):
68 periods = _get_indicator_periods_hierarchy_flatlist(indicator)
69 periods_tree = _make_periods_hierarchy_tree(periods)
70
71 return [_transform_period_contributions_node(n) for n in periods_tree]
72
73
74 def _get_indicator_periods_hierarchy_flatlist(indicator):
75 family = {period.id for period in indicator.periods.all()}
76 while True:
77 children = set(
78 IndicatorPeriod.objects.filter(parent_period__in=family).values_list('pk', flat=True))
79 if family.union(children) == family:
80 break
81
82 family = family.union(children)
83
84 periods = IndicatorPeriod.objects.select_related(
85 'indicator__result__project',
86 'indicator__result__project__primary_location__country',
87 'parent_period',
88 ).prefetch_related(
89 'data',
90 'data__user',
91 'data__approved_by',
92 'data__comments',
93 'data__comments__user',
94 'data__disaggregations',
95 'data__disaggregations__dimension_value',
96 'data__disaggregations__dimension_value__name',
97 'disaggregation_targets',
98 'disaggregation_targets__dimension_value',
99 'disaggregation_targets__dimension_value__name'
100 ).filter(pk__in=family)
101
102 return periods
103
104
105 def _make_periods_hierarchy_tree(qs):
106 tree = []
107 lookup = {}
108 ids = [p.id for p in qs]
109
110 for period in qs:
111 item_id = period.id
112 parent_id = period.parent_period.id if period.parent_period else None
113
114 if item_id not in lookup:
115 lookup[item_id] = {'children': []}
116
117 lookup[item_id]['item'] = period
118 node = lookup[item_id]
119
120 if not parent_id or parent_id not in ids:
121 tree.append(node)
122 else:
123 if parent_id not in lookup:
124 lookup[parent_id] = {'children': []}
125
126 lookup[parent_id]['children'].append(node)
127
128 return tree
129
130
131 def _transform_period_contributions_node(node):
132 period = node['item']
133 contributors, countries, aggregated_value, disaggregations = _transform_contributions_hierarchy(node['children'])
134 updates = _transform_updates(period)
135
136 result = {
137 'period_id': period.id,
138 'period_start': period.period_start,
139 'period_end': period.period_end,
140 'actual_comment': period.actual_comment.split(' | ') if period.actual_comment else None,
141 'actual_value': _force_decimal(period.actual_value),
142 'aggregated_value': aggregated_value,
143 'target_value': _force_decimal(period.target_value),
144 'countries': countries,
145 'updates': updates,
146 'contributors': contributors,
147 'disaggregation_contributions': list(disaggregations.values()),
148 'disaggregation_targets': _transform_disaggregation_targets(period),
149 }
150
151 return result
152
153
154 def _transform_contributions_hierarchy(tree):
155 contributors = []
156 contributor_countries = []
157 aggregated_value = 0
158 disaggregations = {}
159 for node in tree:
160 contributor, countries = _transform_contributor_node(node)
161 if contributor:
162 contributors.append(contributor)
163 contributor_countries = _merge_unique(contributor_countries, countries)
164 aggregated_value += contributor['actual_value']
165 disaggregation_contributions = _extract_disaggregation_contributions(contributor)
166 for key in disaggregation_contributions:
167 if key not in disaggregations:
168 disaggregations[key] = disaggregation_contributions[key].copy()
169 else:
170 disaggregations[key]['value'] += disaggregation_contributions[key]['value']
171
172 return contributors, contributor_countries, aggregated_value, disaggregations
173
174
175 def _extract_disaggregation_contributions(contributor):
176 disaggregations = {}
177 for update in contributor['updates']:
178 if update['status']['code'] == 'A':
179 for d in update['disaggregations']:
180 key = (d['category'], d['type'])
181 if key not in disaggregations:
182 disaggregations[key] = d.copy()
183 else:
184 disaggregations[key]['value'] += d['value']
185
186 return disaggregations
187
188
189 def _transform_contributor_node(node):
190 contributor = _transform_contributor(node['item'])
191 contributor_countries = []
192 if contributor:
193 if contributor['country']:
194 contributor_countries.append(contributor['country'])
195 contributors, countries, aggregated_value, disaggregations = _transform_contributions_hierarchy(node['children'])
196 contributors_count = len(contributors)
197 if contributors_count:
198 contributor['aggregated_value'] = aggregated_value
199 contributor['contributors'] = contributors
200 contributor['disaggregation_contributions'] = list(disaggregations.values())
201 contributor_countries = _merge_unique(contributor_countries, countries)
202
203 return contributor, contributor_countries
204
205
206 def _transform_contributor(period):
207 value = _force_decimal(period.actual_value)
208
209 if value < 1 and period.data.count() < 1:
210 return None
211
212 project = period.indicator.result.project
213 country = project.primary_location.country if project.primary_location else None
214 updates = _transform_updates(period)
215
216 return {
217 'project_id': project.id,
218 'project_title': project.title,
219 'period_id': period.id,
220 'country': {'iso_code': country.iso_code} if country else None,
221 'actual_comment': period.actual_comment.split(' | ') if period.actual_comment else None,
222 'actual_value': value,
223 'aggregated_value': None,
224 'updates': updates,
225 'contributors': [],
226 'disaggregation_contributions': [],
227 'disaggregation_targets': _transform_disaggregation_targets(period),
228 }
229
230
231 def _transform_updates(period):
232 return [
233 {
234 'update_id': u.id,
235 'status': {'code': u.status, 'name': dict(IndicatorPeriodData.STATUSES)[u.status]},
236 'user': {
237 'user_id': u.user.id,
238 'email': u.user.email,
239 'name': u.user.get_full_name(),
240 } if u.user else None,
241 'approved_by': {
242 'user_id': u.approved_by.id,
243 'email': u.approved_by.email,
244 'name': u.user.get_full_name(),
245 } if u.approved_by else None,
246 'value': u.value,
247 'numerator': u.numerator,
248 'denominator': u.denominator,
249 'text': u.text,
250 'comments': [
251 {
252 'comment_id': c.id,
253 'user': {
254 'user_id': c.user.id,
255 'email': c.user.email,
256 'name': u.user.get_full_name(),
257 },
258 'comment': c.comment,
259 'created_at': c.created_at,
260 }
261 for c
262 in u.comments.all()
263 ],
264 'disaggregations': [
265 {
266 'category': d.dimension_value.name.name,
267 'type': d.dimension_value.value,
268 'value': d.value,
269 'numerator': d.numerator,
270 'denominator': d.denominator,
271 }
272 for d
273 in u.disaggregations.all()
274 ],
275 'created_at': u.created_at,
276 'last_modified_at': u.last_modified_at,
277
278 }
279 for u
280 in period.data.all()
281 ]
282
283
284 def _transform_disaggregation_targets(period):
285 return [
286 {
287 'category': t.dimension_value.name.name,
288 'type': t.dimension_value.value,
289 'value': t.value,
290 }
291 for t
292 in period.disaggregation_targets.all()
293 ]
294
295
296 def _force_decimal(value):
297 try:
298 return Decimal(value)
299 except (InvalidOperation, TypeError):
300 return Decimal(0)
301
302
303 def _merge_unique(l1, l2):
304 out = list(l1)
305 for i in l2:
306 if i not in out:
307 out.append(i)
308
309 return out
310
[end of akvo/rest/views/project_overview.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/views/project_overview.py b/akvo/rest/views/project_overview.py
--- a/akvo/rest/views/project_overview.py
+++ b/akvo/rest/views/project_overview.py
@@ -27,7 +27,12 @@
'id': project.id,
'title': project.title,
'results': [
- {'id': r.id, 'title': r.title, 'indicator_count': r.indicators.count()}
+ {
+ 'id': r.id,
+ 'title': r.title,
+ 'indicator_count': r.indicators.count(),
+ 'type': r.iati_type().name if r.type else None
+ }
for r in project.results.all()
],
}
| {"golden_diff": "diff --git a/akvo/rest/views/project_overview.py b/akvo/rest/views/project_overview.py\n--- a/akvo/rest/views/project_overview.py\n+++ b/akvo/rest/views/project_overview.py\n@@ -27,7 +27,12 @@\n 'id': project.id,\n 'title': project.title,\n 'results': [\n- {'id': r.id, 'title': r.title, 'indicator_count': r.indicators.count()}\n+ {\n+ 'id': r.id,\n+ 'title': r.title,\n+ 'indicator_count': r.indicators.count(),\n+ 'type': r.iati_type().name if r.type else None\n+ }\n for r in project.results.all()\n ],\n }\n", "issue": "Add result type to /project/:id/results/\nWe want to display whether the result is an Outcome, Impact, etc...\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.models import TastyTokenAuthentication\nfrom akvo.rsr.models import Project, Result, IndicatorPeriod, IndicatorPeriodData\nfrom akvo.rsr.models.result.utils import QUANTITATIVE\nfrom decimal import Decimal, InvalidOperation\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.authentication import SessionAuthentication\nfrom rest_framework.decorators import api_view, authentication_classes\nfrom rest_framework.response import Response\n\n\n@api_view(['GET'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication])\ndef project_results(request, pk):\n queryset = Project.objects.prefetch_related('results')\n project = get_object_or_404(queryset, pk=pk)\n if not request.user.has_perm('rsr.view_project', project):\n raise Http404\n data = {\n 'id': project.id,\n 'title': project.title,\n 'results': [\n {'id': r.id, 'title': r.title, 'indicator_count': r.indicators.count()}\n for r in project.results.all()\n ],\n }\n return Response(data)\n\n\n@api_view(['GET'])\n@authentication_classes([SessionAuthentication, TastyTokenAuthentication])\ndef project_result_overview(request, project_pk, result_pk):\n queryset = Result.objects.prefetch_related(\n 'indicators', 'indicators__periods').select_related('project')\n result = get_object_or_404(queryset, pk=result_pk)\n project = result.project\n if project.id != int(project_pk) or not request.user.has_perm('rsr.view_project', project):\n raise Http404\n\n data = {\n 'id': result.id,\n 'title': result.title,\n 'indicators': [\n {\n 'id': i.id,\n 'title': i.title,\n 'description': i.description,\n 'period_count': len(i.periods.all()),\n 'type': 'quantitative' if i.type == QUANTITATIVE else 'qualitative',\n 'measure': (\n 'unit' if i.measure == '1' else 'percentage' if i.measure == '2' else None),\n 'periods': _drilldown_indicator_periods_contributions(i)\n }\n for i in result.indicators.all()\n ]\n }\n return Response(data)\n\n\ndef _drilldown_indicator_periods_contributions(indicator):\n periods = _get_indicator_periods_hierarchy_flatlist(indicator)\n periods_tree = _make_periods_hierarchy_tree(periods)\n\n return [_transform_period_contributions_node(n) for n in periods_tree]\n\n\ndef _get_indicator_periods_hierarchy_flatlist(indicator):\n family = {period.id for period in indicator.periods.all()}\n while True:\n children = set(\n IndicatorPeriod.objects.filter(parent_period__in=family).values_list('pk', flat=True))\n if family.union(children) == family:\n break\n\n family = family.union(children)\n\n periods = IndicatorPeriod.objects.select_related(\n 'indicator__result__project',\n 'indicator__result__project__primary_location__country',\n 'parent_period',\n ).prefetch_related(\n 'data',\n 'data__user',\n 'data__approved_by',\n 'data__comments',\n 'data__comments__user',\n 'data__disaggregations',\n 'data__disaggregations__dimension_value',\n 'data__disaggregations__dimension_value__name',\n 'disaggregation_targets',\n 'disaggregation_targets__dimension_value',\n 'disaggregation_targets__dimension_value__name'\n ).filter(pk__in=family)\n\n return periods\n\n\ndef _make_periods_hierarchy_tree(qs):\n tree = []\n lookup = {}\n ids = [p.id for p in qs]\n\n for period in qs:\n item_id = period.id\n parent_id = period.parent_period.id if period.parent_period else None\n\n if item_id not in lookup:\n lookup[item_id] = {'children': []}\n\n lookup[item_id]['item'] = period\n node = lookup[item_id]\n\n if not parent_id or parent_id not in ids:\n tree.append(node)\n else:\n if parent_id not in lookup:\n lookup[parent_id] = {'children': []}\n\n lookup[parent_id]['children'].append(node)\n\n return tree\n\n\ndef _transform_period_contributions_node(node):\n period = node['item']\n contributors, countries, aggregated_value, disaggregations = _transform_contributions_hierarchy(node['children'])\n updates = _transform_updates(period)\n\n result = {\n 'period_id': period.id,\n 'period_start': period.period_start,\n 'period_end': period.period_end,\n 'actual_comment': period.actual_comment.split(' | ') if period.actual_comment else None,\n 'actual_value': _force_decimal(period.actual_value),\n 'aggregated_value': aggregated_value,\n 'target_value': _force_decimal(period.target_value),\n 'countries': countries,\n 'updates': updates,\n 'contributors': contributors,\n 'disaggregation_contributions': list(disaggregations.values()),\n 'disaggregation_targets': _transform_disaggregation_targets(period),\n }\n\n return result\n\n\ndef _transform_contributions_hierarchy(tree):\n contributors = []\n contributor_countries = []\n aggregated_value = 0\n disaggregations = {}\n for node in tree:\n contributor, countries = _transform_contributor_node(node)\n if contributor:\n contributors.append(contributor)\n contributor_countries = _merge_unique(contributor_countries, countries)\n aggregated_value += contributor['actual_value']\n disaggregation_contributions = _extract_disaggregation_contributions(contributor)\n for key in disaggregation_contributions:\n if key not in disaggregations:\n disaggregations[key] = disaggregation_contributions[key].copy()\n else:\n disaggregations[key]['value'] += disaggregation_contributions[key]['value']\n\n return contributors, contributor_countries, aggregated_value, disaggregations\n\n\ndef _extract_disaggregation_contributions(contributor):\n disaggregations = {}\n for update in contributor['updates']:\n if update['status']['code'] == 'A':\n for d in update['disaggregations']:\n key = (d['category'], d['type'])\n if key not in disaggregations:\n disaggregations[key] = d.copy()\n else:\n disaggregations[key]['value'] += d['value']\n\n return disaggregations\n\n\ndef _transform_contributor_node(node):\n contributor = _transform_contributor(node['item'])\n contributor_countries = []\n if contributor:\n if contributor['country']:\n contributor_countries.append(contributor['country'])\n contributors, countries, aggregated_value, disaggregations = _transform_contributions_hierarchy(node['children'])\n contributors_count = len(contributors)\n if contributors_count:\n contributor['aggregated_value'] = aggregated_value\n contributor['contributors'] = contributors\n contributor['disaggregation_contributions'] = list(disaggregations.values())\n contributor_countries = _merge_unique(contributor_countries, countries)\n\n return contributor, contributor_countries\n\n\ndef _transform_contributor(period):\n value = _force_decimal(period.actual_value)\n\n if value < 1 and period.data.count() < 1:\n return None\n\n project = period.indicator.result.project\n country = project.primary_location.country if project.primary_location else None\n updates = _transform_updates(period)\n\n return {\n 'project_id': project.id,\n 'project_title': project.title,\n 'period_id': period.id,\n 'country': {'iso_code': country.iso_code} if country else None,\n 'actual_comment': period.actual_comment.split(' | ') if period.actual_comment else None,\n 'actual_value': value,\n 'aggregated_value': None,\n 'updates': updates,\n 'contributors': [],\n 'disaggregation_contributions': [],\n 'disaggregation_targets': _transform_disaggregation_targets(period),\n }\n\n\ndef _transform_updates(period):\n return [\n {\n 'update_id': u.id,\n 'status': {'code': u.status, 'name': dict(IndicatorPeriodData.STATUSES)[u.status]},\n 'user': {\n 'user_id': u.user.id,\n 'email': u.user.email,\n 'name': u.user.get_full_name(),\n } if u.user else None,\n 'approved_by': {\n 'user_id': u.approved_by.id,\n 'email': u.approved_by.email,\n 'name': u.user.get_full_name(),\n } if u.approved_by else None,\n 'value': u.value,\n 'numerator': u.numerator,\n 'denominator': u.denominator,\n 'text': u.text,\n 'comments': [\n {\n 'comment_id': c.id,\n 'user': {\n 'user_id': c.user.id,\n 'email': c.user.email,\n 'name': u.user.get_full_name(),\n },\n 'comment': c.comment,\n 'created_at': c.created_at,\n }\n for c\n in u.comments.all()\n ],\n 'disaggregations': [\n {\n 'category': d.dimension_value.name.name,\n 'type': d.dimension_value.value,\n 'value': d.value,\n 'numerator': d.numerator,\n 'denominator': d.denominator,\n }\n for d\n in u.disaggregations.all()\n ],\n 'created_at': u.created_at,\n 'last_modified_at': u.last_modified_at,\n\n }\n for u\n in period.data.all()\n ]\n\n\ndef _transform_disaggregation_targets(period):\n return [\n {\n 'category': t.dimension_value.name.name,\n 'type': t.dimension_value.value,\n 'value': t.value,\n }\n for t\n in period.disaggregation_targets.all()\n ]\n\n\ndef _force_decimal(value):\n try:\n return Decimal(value)\n except (InvalidOperation, TypeError):\n return Decimal(0)\n\n\ndef _merge_unique(l1, l2):\n out = list(l1)\n for i in l2:\n if i not in out:\n out.append(i)\n\n return out\n", "path": "akvo/rest/views/project_overview.py"}]} | 3,655 | 165 |
gh_patches_debug_28793 | rasdani/github-patches | git_diff | PaddlePaddle__models-123 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Modify inference script
</issue>
<code>
[start of hsigmoid/infer.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 import os
4 import logging
5 import gzip
6
7 import paddle.v2 as paddle
8 from network_conf import ngram_lm
9
10 logger = logging.getLogger("paddle")
11 logger.setLevel(logging.WARNING)
12
13
14 def decode_res(infer_res, dict_size):
15 """
16 Inferring probabilities are orginized as a complete binary tree.
17 The actual labels are leaves (indices are counted from class number).
18 This function travels paths decoded from inferring results.
19 If the probability >0.5 then go to right child, otherwise go to left child.
20
21 param infer_res: inferring result
22 param dict_size: class number
23 return predict_lbls: actual class
24 """
25 predict_lbls = []
26 infer_res = infer_res > 0.5
27 for i, probs in enumerate(infer_res):
28 idx = 0
29 result = 1
30 while idx < len(probs):
31 result <<= 1
32 if probs[idx]:
33 result |= 1
34 if probs[idx]:
35 idx = idx * 2 + 2 # right child
36 else:
37 idx = idx * 2 + 1 # left child
38
39 predict_lbl = result - dict_size
40 predict_lbls.append(predict_lbl)
41 return predict_lbls
42
43
44 def predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):
45 infer_res = paddle.infer(
46 output_layer=prediction_layer, parameters=parameters, input=batch_ins)
47
48 predict_lbls = decode_res(infer_res, dict_size)
49 predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word
50
51 # Ouput format: word1 word2 word3 word4 -> predict label
52 for i, ins in enumerate(batch_ins):
53 print(" ".join([idx_word_dict[w]
54 for w in ins]) + " -> " + predict_words[i])
55
56
57 def main(model_path):
58 assert os.path.exists(model_path), "trained model does not exist."
59
60 paddle.init(use_gpu=False, trainer_count=1)
61 word_dict = paddle.dataset.imikolov.build_dict(min_word_freq=2)
62 dict_size = len(word_dict)
63 prediction_layer = ngram_lm(
64 is_train=False, hidden_size=256, embed_size=32, dict_size=dict_size)
65
66 with gzip.open(model_path, "r") as f:
67 parameters = paddle.parameters.Parameters.from_tar(f)
68
69 idx_word_dict = dict((v, k) for k, v in word_dict.items())
70 batch_size = 64
71 batch_ins = []
72 ins_iter = paddle.dataset.imikolov.test(word_dict, 5)
73
74 for ins in ins_iter():
75 batch_ins.append(ins[:-1])
76 if len(batch_ins) == batch_size:
77 predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
78 parameters)
79 batch_ins = []
80
81 if len(batch_ins) > 0:
82 predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
83 parameters)
84
85
86 if __name__ == "__main__":
87 main("models/hsigmoid_batch_00010.tar.gz")
88
[end of hsigmoid/infer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hsigmoid/infer.py b/hsigmoid/infer.py
--- a/hsigmoid/infer.py
+++ b/hsigmoid/infer.py
@@ -41,9 +41,8 @@
return predict_lbls
-def predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):
- infer_res = paddle.infer(
- output_layer=prediction_layer, parameters=parameters, input=batch_ins)
+def predict(batch_ins, idx_word_dict, dict_size, inferer):
+ infer_res = inferer.infer(input=batch_ins)
predict_lbls = decode_res(infer_res, dict_size)
predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word
@@ -66,6 +65,8 @@
with gzip.open(model_path, "r") as f:
parameters = paddle.parameters.Parameters.from_tar(f)
+ inferer = paddle.inference.Inference(
+ output_layer=prediction_layer, parameters=parameters)
idx_word_dict = dict((v, k) for k, v in word_dict.items())
batch_size = 64
batch_ins = []
@@ -74,13 +75,11 @@
for ins in ins_iter():
batch_ins.append(ins[:-1])
if len(batch_ins) == batch_size:
- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
- parameters)
+ predict(batch_ins, idx_word_dict, dict_size, inferer)
batch_ins = []
if len(batch_ins) > 0:
- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,
- parameters)
+ predict(batch_ins, idx_word_dict, dict_size, inferer)
if __name__ == "__main__":
| {"golden_diff": "diff --git a/hsigmoid/infer.py b/hsigmoid/infer.py\n--- a/hsigmoid/infer.py\n+++ b/hsigmoid/infer.py\n@@ -41,9 +41,8 @@\n return predict_lbls\n \n \n-def predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):\n- infer_res = paddle.infer(\n- output_layer=prediction_layer, parameters=parameters, input=batch_ins)\n+def predict(batch_ins, idx_word_dict, dict_size, inferer):\n+ infer_res = inferer.infer(input=batch_ins)\n \n predict_lbls = decode_res(infer_res, dict_size)\n predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word\n@@ -66,6 +65,8 @@\n with gzip.open(model_path, \"r\") as f:\n parameters = paddle.parameters.Parameters.from_tar(f)\n \n+ inferer = paddle.inference.Inference(\n+ output_layer=prediction_layer, parameters=parameters)\n idx_word_dict = dict((v, k) for k, v in word_dict.items())\n batch_size = 64\n batch_ins = []\n@@ -74,13 +75,11 @@\n for ins in ins_iter():\n batch_ins.append(ins[:-1])\n if len(batch_ins) == batch_size:\n- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n- parameters)\n+ predict(batch_ins, idx_word_dict, dict_size, inferer)\n batch_ins = []\n \n if len(batch_ins) > 0:\n- predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n- parameters)\n+ predict(batch_ins, idx_word_dict, dict_size, inferer)\n \n \n if __name__ == \"__main__\":\n", "issue": "Modify inference script\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport logging\nimport gzip\n\nimport paddle.v2 as paddle\nfrom network_conf import ngram_lm\n\nlogger = logging.getLogger(\"paddle\")\nlogger.setLevel(logging.WARNING)\n\n\ndef decode_res(infer_res, dict_size):\n \"\"\"\n Inferring probabilities are orginized as a complete binary tree.\n The actual labels are leaves (indices are counted from class number).\n This function travels paths decoded from inferring results.\n If the probability >0.5 then go to right child, otherwise go to left child.\n\n param infer_res: inferring result\n param dict_size: class number\n return predict_lbls: actual class\n \"\"\"\n predict_lbls = []\n infer_res = infer_res > 0.5\n for i, probs in enumerate(infer_res):\n idx = 0\n result = 1\n while idx < len(probs):\n result <<= 1\n if probs[idx]:\n result |= 1\n if probs[idx]:\n idx = idx * 2 + 2 # right child\n else:\n idx = idx * 2 + 1 # left child\n\n predict_lbl = result - dict_size\n predict_lbls.append(predict_lbl)\n return predict_lbls\n\n\ndef predict(batch_ins, idx_word_dict, dict_size, prediction_layer, parameters):\n infer_res = paddle.infer(\n output_layer=prediction_layer, parameters=parameters, input=batch_ins)\n\n predict_lbls = decode_res(infer_res, dict_size)\n predict_words = [idx_word_dict[lbl] for lbl in predict_lbls] # map to word\n\n # Ouput format: word1 word2 word3 word4 -> predict label\n for i, ins in enumerate(batch_ins):\n print(\" \".join([idx_word_dict[w]\n for w in ins]) + \" -> \" + predict_words[i])\n\n\ndef main(model_path):\n assert os.path.exists(model_path), \"trained model does not exist.\"\n\n paddle.init(use_gpu=False, trainer_count=1)\n word_dict = paddle.dataset.imikolov.build_dict(min_word_freq=2)\n dict_size = len(word_dict)\n prediction_layer = ngram_lm(\n is_train=False, hidden_size=256, embed_size=32, dict_size=dict_size)\n\n with gzip.open(model_path, \"r\") as f:\n parameters = paddle.parameters.Parameters.from_tar(f)\n\n idx_word_dict = dict((v, k) for k, v in word_dict.items())\n batch_size = 64\n batch_ins = []\n ins_iter = paddle.dataset.imikolov.test(word_dict, 5)\n\n for ins in ins_iter():\n batch_ins.append(ins[:-1])\n if len(batch_ins) == batch_size:\n predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n parameters)\n batch_ins = []\n\n if len(batch_ins) > 0:\n predict(batch_ins, idx_word_dict, dict_size, prediction_layer,\n parameters)\n\n\nif __name__ == \"__main__\":\n main(\"models/hsigmoid_batch_00010.tar.gz\")\n", "path": "hsigmoid/infer.py"}]} | 1,400 | 395 |
gh_patches_debug_14264 | rasdani/github-patches | git_diff | CTFd__CTFd-1233 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Theme reset from CLI
There needs to be a way to reset the theme and other kinds of unrecoverable errors from the CLI.
Context: https://community.majorleaguecyber.org/t/ctfd-templatenotfound/51
</issue>
<code>
[start of manage.py]
1 from flask import Flask
2 from flask_sqlalchemy import SQLAlchemy
3 from flask_script import Manager
4 from flask_migrate import Migrate, MigrateCommand
5 from CTFd import create_app
6 from CTFd.models import *
7
8 app = create_app()
9
10 manager = Manager(app)
11 manager.add_command('db', MigrateCommand)
12
13 if __name__ == '__main__':
14 manager.run()
15
[end of manage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/manage.py b/manage.py
--- a/manage.py
+++ b/manage.py
@@ -3,6 +3,7 @@
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from CTFd import create_app
+from CTFd.utils import get_config as get_config_util, set_config as set_config_util
from CTFd.models import *
app = create_app()
@@ -10,5 +11,18 @@
manager = Manager(app)
manager.add_command('db', MigrateCommand)
-if __name__ == '__main__':
+
[email protected]
+def get_config(key):
+ with app.app_context():
+ print(get_config_util(key))
+
+
[email protected]
+def set_config(key, value):
+ with app.app_context():
+ print(set_config_util(key, value).value)
+
+
+if __name__ == "__main__":
manager.run()
| {"golden_diff": "diff --git a/manage.py b/manage.py\n--- a/manage.py\n+++ b/manage.py\n@@ -3,6 +3,7 @@\n from flask_script import Manager\n from flask_migrate import Migrate, MigrateCommand\n from CTFd import create_app\n+from CTFd.utils import get_config as get_config_util, set_config as set_config_util\n from CTFd.models import *\n \n app = create_app()\n@@ -10,5 +11,18 @@\n manager = Manager(app)\n manager.add_command('db', MigrateCommand)\n \n-if __name__ == '__main__':\n+\[email protected]\n+def get_config(key):\n+ with app.app_context():\n+ print(get_config_util(key))\n+\n+\[email protected]\n+def set_config(key, value):\n+ with app.app_context():\n+ print(set_config_util(key, value).value)\n+\n+\n+if __name__ == \"__main__\":\n manager.run()\n", "issue": "Theme reset from CLI\nThere needs to be a way to reset the theme and other kinds of unrecoverable errors from the CLI. \r\n\r\nContext: https://community.majorleaguecyber.org/t/ctfd-templatenotfound/51\n", "before_files": [{"content": "from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom CTFd import create_app\nfrom CTFd.models import *\n\napp = create_app()\n\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\n\nif __name__ == '__main__':\n manager.run()\n", "path": "manage.py"}]} | 681 | 203 |
gh_patches_debug_24829 | rasdani/github-patches | git_diff | qtile__qtile-4348 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Logs pollution - keyboardlayout.py
### The issue:
Bunch of these:
```
2023-07-15 15:28:06,304 ERROR libqtile keyboardlayout.py:set_keyboard():L102 Can not load ~/.Xmodmap:
```
My humble opinion is that if I don't use something optional, I shouldn't have an error about it in the logs. Oh and it's supposed to be **"Cannot"**. I don't have another idea on how this should be done atm.
### Required:
- [X] I have searched past issues to see if this bug has already been reported.
</issue>
<code>
[start of libqtile/widget/keyboardlayout.py]
1 # Copyright (c) 2013 Jacob Mourelos
2 # Copyright (c) 2014 Shepilov Vladislav
3 # Copyright (c) 2014-2015 Sean Vig
4 # Copyright (c) 2014 Tycho Andersen
5 # Copyright (c) 2019 zordsdavini
6 #
7 # Permission is hereby granted, free of charge, to any person obtaining a copy
8 # of this software and associated documentation files (the "Software"), to deal
9 # in the Software without restriction, including without limitation the rights
10 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 # copies of the Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice shall be included in
15 # all copies or substantial portions of the Software.
16 #
17 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 # SOFTWARE.
24
25 from __future__ import annotations
26
27 import re
28 from abc import ABCMeta, abstractmethod
29 from subprocess import CalledProcessError, check_output
30 from typing import TYPE_CHECKING
31
32 from libqtile.command.base import expose_command
33 from libqtile.confreader import ConfigError
34 from libqtile.log_utils import logger
35 from libqtile.widget import base
36
37 if TYPE_CHECKING:
38 from libqtile.core.manager import Qtile
39
40
41 class _BaseLayoutBackend(metaclass=ABCMeta):
42 def __init__(self, qtile: Qtile):
43 """
44 This handles getting and setter the keyboard layout with the appropriate
45 backend.
46 """
47
48 @abstractmethod
49 def get_keyboard(self) -> str:
50 """
51 Return the currently used keyboard layout as a string
52
53 Examples: "us", "us dvorak". In case of error returns "unknown".
54 """
55
56 def set_keyboard(self, layout: str, options: str | None) -> None:
57 """
58 Set the keyboard layout with specified options.
59 """
60
61
62 class _X11LayoutBackend(_BaseLayoutBackend):
63 kb_layout_regex = re.compile(r"layout:\s+(?P<layout>[\w-]+)")
64 kb_variant_regex = re.compile(r"variant:\s+(?P<variant>\w+)")
65
66 def get_keyboard(self) -> str:
67 try:
68 command = "setxkbmap -verbose 10 -query"
69 setxkbmap_output = check_output(command.split(" ")).decode()
70 except CalledProcessError:
71 logger.exception("Can not get the keyboard layout:")
72 return "unknown"
73 except OSError:
74 logger.exception("Please, check that xset is available:")
75 return "unknown"
76
77 match_layout = self.kb_layout_regex.search(setxkbmap_output)
78 if match_layout is None:
79 return "ERR"
80 keyboard = match_layout.group("layout")
81
82 match_variant = self.kb_variant_regex.search(setxkbmap_output)
83 if match_variant:
84 keyboard += " " + match_variant.group("variant")
85 return keyboard
86
87 def set_keyboard(self, layout: str, options: str | None) -> None:
88 command = ["setxkbmap"]
89 command.extend(layout.split(" "))
90 if options:
91 command.extend(["-option", options])
92 try:
93 check_output(command)
94 except CalledProcessError:
95 logger.error("Can not change the keyboard layout:")
96 except OSError:
97 logger.error("Please, check that setxkbmap is available:")
98 else:
99 try:
100 check_output("xmodmap $HOME/.Xmodmap", shell=True)
101 except CalledProcessError:
102 logger.error("Can not load ~/.Xmodmap:")
103 except OSError:
104 logger.error("Please, check that xmodmap is available:")
105
106
107 class _WaylandLayoutBackend(_BaseLayoutBackend):
108 def __init__(self, qtile: Qtile) -> None:
109 self.set_keymap = qtile.core.set_keymap
110 self._layout: str = ""
111
112 def get_keyboard(self) -> str:
113 return self._layout
114
115 def set_keyboard(self, layout: str, options: str | None) -> None:
116 maybe_variant: str | None = None
117 if " " in layout:
118 layout_name, maybe_variant = layout.split(" ", maxsplit=1)
119 else:
120 layout_name = layout
121 self.set_keymap(layout_name, options, maybe_variant)
122 self._layout = layout
123
124
125 layout_backends = {
126 "x11": _X11LayoutBackend,
127 "wayland": _WaylandLayoutBackend,
128 }
129
130
131 class KeyboardLayout(base.InLoopPollText):
132 """Widget for changing and displaying the current keyboard layout
133
134 To use this widget effectively you need to specify keyboard layouts you want to use
135 (using "configured_keyboards") and bind function "next_keyboard" to specific keys in
136 order to change layouts.
137
138 For example:
139
140 Key([mod], "space", lazy.widget["keyboardlayout"].next_keyboard(), desc="Next keyboard layout."),
141
142 When running Qtile with the X11 backend, this widget requires setxkbmap to be available.
143 Xmodmap will also be used if .Xmodmap file is available.
144 """
145
146 defaults = [
147 ("update_interval", 1, "Update time in seconds."),
148 (
149 "configured_keyboards",
150 ["us"],
151 "A list of predefined keyboard layouts "
152 "represented as strings. For example: "
153 "['us', 'us colemak', 'es', 'fr'].",
154 ),
155 (
156 "display_map",
157 {},
158 "Custom display of layout. Key should be in format "
159 "'layout variant'. For example: "
160 "{'us': 'us', 'lt sgs': 'sgs', 'ru phonetic': 'ru'}",
161 ),
162 ("option", None, "string of setxkbmap option. Ex., 'compose:menu,grp_led:scroll'"),
163 ]
164
165 def __init__(self, **config):
166 base.InLoopPollText.__init__(self, **config)
167 self.add_defaults(KeyboardLayout.defaults)
168 self.add_callbacks({"Button1": self.next_keyboard})
169
170 def _configure(self, qtile, bar):
171 base.InLoopPollText._configure(self, qtile, bar)
172
173 if qtile.core.name not in layout_backends:
174 raise ConfigError("KeyboardLayout does not support backend: " + qtile.core.name)
175
176 self.backend = layout_backends[qtile.core.name](qtile)
177 self.backend.set_keyboard(self.configured_keyboards[0], self.option)
178
179 @expose_command()
180 def next_keyboard(self):
181 """set the next layout in the list of configured keyboard layouts as
182 new current layout in use
183
184 If the current keyboard layout is not in the list, it will set as new
185 layout the first one in the list.
186 """
187
188 current_keyboard = self.backend.get_keyboard()
189 if current_keyboard in self.configured_keyboards:
190 # iterate the list circularly
191 next_keyboard = self.configured_keyboards[
192 (self.configured_keyboards.index(current_keyboard) + 1)
193 % len(self.configured_keyboards)
194 ]
195 else:
196 next_keyboard = self.configured_keyboards[0]
197
198 self.backend.set_keyboard(next_keyboard, self.option)
199
200 self.tick()
201
202 def poll(self):
203 keyboard = self.backend.get_keyboard()
204 if keyboard in self.display_map.keys():
205 return self.display_map[keyboard]
206 return keyboard.upper()
207
[end of libqtile/widget/keyboardlayout.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/widget/keyboardlayout.py b/libqtile/widget/keyboardlayout.py
--- a/libqtile/widget/keyboardlayout.py
+++ b/libqtile/widget/keyboardlayout.py
@@ -26,6 +26,7 @@
import re
from abc import ABCMeta, abstractmethod
+from pathlib import Path
from subprocess import CalledProcessError, check_output
from typing import TYPE_CHECKING
@@ -92,16 +93,16 @@
try:
check_output(command)
except CalledProcessError:
- logger.error("Can not change the keyboard layout:")
+ logger.error("Cannot change the keyboard layout.")
except OSError:
- logger.error("Please, check that setxkbmap is available:")
+ logger.error("Please, check that setxkbmap is available.")
else:
- try:
- check_output("xmodmap $HOME/.Xmodmap", shell=True)
- except CalledProcessError:
- logger.error("Can not load ~/.Xmodmap:")
- except OSError:
- logger.error("Please, check that xmodmap is available:")
+ # Load Xmodmap if it's available
+ if Path("~/.Xmodmap").expanduser().is_file():
+ try:
+ check_output("xmodmap $HOME/.Xmodmap", shell=True)
+ except CalledProcessError:
+ logger.error("Could not load ~/.Xmodmap.")
class _WaylandLayoutBackend(_BaseLayoutBackend):
| {"golden_diff": "diff --git a/libqtile/widget/keyboardlayout.py b/libqtile/widget/keyboardlayout.py\n--- a/libqtile/widget/keyboardlayout.py\n+++ b/libqtile/widget/keyboardlayout.py\n@@ -26,6 +26,7 @@\n \n import re\n from abc import ABCMeta, abstractmethod\n+from pathlib import Path\n from subprocess import CalledProcessError, check_output\n from typing import TYPE_CHECKING\n \n@@ -92,16 +93,16 @@\n try:\n check_output(command)\n except CalledProcessError:\n- logger.error(\"Can not change the keyboard layout:\")\n+ logger.error(\"Cannot change the keyboard layout.\")\n except OSError:\n- logger.error(\"Please, check that setxkbmap is available:\")\n+ logger.error(\"Please, check that setxkbmap is available.\")\n else:\n- try:\n- check_output(\"xmodmap $HOME/.Xmodmap\", shell=True)\n- except CalledProcessError:\n- logger.error(\"Can not load ~/.Xmodmap:\")\n- except OSError:\n- logger.error(\"Please, check that xmodmap is available:\")\n+ # Load Xmodmap if it's available\n+ if Path(\"~/.Xmodmap\").expanduser().is_file():\n+ try:\n+ check_output(\"xmodmap $HOME/.Xmodmap\", shell=True)\n+ except CalledProcessError:\n+ logger.error(\"Could not load ~/.Xmodmap.\")\n \n \n class _WaylandLayoutBackend(_BaseLayoutBackend):\n", "issue": "Logs pollution - keyboardlayout.py\n### The issue:\r\n\r\nBunch of these:\r\n```\r\n2023-07-15 15:28:06,304 ERROR libqtile keyboardlayout.py:set_keyboard():L102 Can not load ~/.Xmodmap:\r\n```\r\nMy humble opinion is that if I don't use something optional, I shouldn't have an error about it in the logs. Oh and it's supposed to be **\"Cannot\"**. I don't have another idea on how this should be done atm.\r\n\r\n### Required:\r\n\r\n- [X] I have searched past issues to see if this bug has already been reported.\n", "before_files": [{"content": "# Copyright (c) 2013 Jacob Mourelos\n# Copyright (c) 2014 Shepilov Vladislav\n# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2019 zordsdavini\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import annotations\n\nimport re\nfrom abc import ABCMeta, abstractmethod\nfrom subprocess import CalledProcessError, check_output\nfrom typing import TYPE_CHECKING\n\nfrom libqtile.command.base import expose_command\nfrom libqtile.confreader import ConfigError\nfrom libqtile.log_utils import logger\nfrom libqtile.widget import base\n\nif TYPE_CHECKING:\n from libqtile.core.manager import Qtile\n\n\nclass _BaseLayoutBackend(metaclass=ABCMeta):\n def __init__(self, qtile: Qtile):\n \"\"\"\n This handles getting and setter the keyboard layout with the appropriate\n backend.\n \"\"\"\n\n @abstractmethod\n def get_keyboard(self) -> str:\n \"\"\"\n Return the currently used keyboard layout as a string\n\n Examples: \"us\", \"us dvorak\". In case of error returns \"unknown\".\n \"\"\"\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n \"\"\"\n Set the keyboard layout with specified options.\n \"\"\"\n\n\nclass _X11LayoutBackend(_BaseLayoutBackend):\n kb_layout_regex = re.compile(r\"layout:\\s+(?P<layout>[\\w-]+)\")\n kb_variant_regex = re.compile(r\"variant:\\s+(?P<variant>\\w+)\")\n\n def get_keyboard(self) -> str:\n try:\n command = \"setxkbmap -verbose 10 -query\"\n setxkbmap_output = check_output(command.split(\" \")).decode()\n except CalledProcessError:\n logger.exception(\"Can not get the keyboard layout:\")\n return \"unknown\"\n except OSError:\n logger.exception(\"Please, check that xset is available:\")\n return \"unknown\"\n\n match_layout = self.kb_layout_regex.search(setxkbmap_output)\n if match_layout is None:\n return \"ERR\"\n keyboard = match_layout.group(\"layout\")\n\n match_variant = self.kb_variant_regex.search(setxkbmap_output)\n if match_variant:\n keyboard += \" \" + match_variant.group(\"variant\")\n return keyboard\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n command = [\"setxkbmap\"]\n command.extend(layout.split(\" \"))\n if options:\n command.extend([\"-option\", options])\n try:\n check_output(command)\n except CalledProcessError:\n logger.error(\"Can not change the keyboard layout:\")\n except OSError:\n logger.error(\"Please, check that setxkbmap is available:\")\n else:\n try:\n check_output(\"xmodmap $HOME/.Xmodmap\", shell=True)\n except CalledProcessError:\n logger.error(\"Can not load ~/.Xmodmap:\")\n except OSError:\n logger.error(\"Please, check that xmodmap is available:\")\n\n\nclass _WaylandLayoutBackend(_BaseLayoutBackend):\n def __init__(self, qtile: Qtile) -> None:\n self.set_keymap = qtile.core.set_keymap\n self._layout: str = \"\"\n\n def get_keyboard(self) -> str:\n return self._layout\n\n def set_keyboard(self, layout: str, options: str | None) -> None:\n maybe_variant: str | None = None\n if \" \" in layout:\n layout_name, maybe_variant = layout.split(\" \", maxsplit=1)\n else:\n layout_name = layout\n self.set_keymap(layout_name, options, maybe_variant)\n self._layout = layout\n\n\nlayout_backends = {\n \"x11\": _X11LayoutBackend,\n \"wayland\": _WaylandLayoutBackend,\n}\n\n\nclass KeyboardLayout(base.InLoopPollText):\n \"\"\"Widget for changing and displaying the current keyboard layout\n\n To use this widget effectively you need to specify keyboard layouts you want to use\n (using \"configured_keyboards\") and bind function \"next_keyboard\" to specific keys in\n order to change layouts.\n\n For example:\n\n Key([mod], \"space\", lazy.widget[\"keyboardlayout\"].next_keyboard(), desc=\"Next keyboard layout.\"),\n\n When running Qtile with the X11 backend, this widget requires setxkbmap to be available.\n Xmodmap will also be used if .Xmodmap file is available.\n \"\"\"\n\n defaults = [\n (\"update_interval\", 1, \"Update time in seconds.\"),\n (\n \"configured_keyboards\",\n [\"us\"],\n \"A list of predefined keyboard layouts \"\n \"represented as strings. For example: \"\n \"['us', 'us colemak', 'es', 'fr'].\",\n ),\n (\n \"display_map\",\n {},\n \"Custom display of layout. Key should be in format \"\n \"'layout variant'. For example: \"\n \"{'us': 'us', 'lt sgs': 'sgs', 'ru phonetic': 'ru'}\",\n ),\n (\"option\", None, \"string of setxkbmap option. Ex., 'compose:menu,grp_led:scroll'\"),\n ]\n\n def __init__(self, **config):\n base.InLoopPollText.__init__(self, **config)\n self.add_defaults(KeyboardLayout.defaults)\n self.add_callbacks({\"Button1\": self.next_keyboard})\n\n def _configure(self, qtile, bar):\n base.InLoopPollText._configure(self, qtile, bar)\n\n if qtile.core.name not in layout_backends:\n raise ConfigError(\"KeyboardLayout does not support backend: \" + qtile.core.name)\n\n self.backend = layout_backends[qtile.core.name](qtile)\n self.backend.set_keyboard(self.configured_keyboards[0], self.option)\n\n @expose_command()\n def next_keyboard(self):\n \"\"\"set the next layout in the list of configured keyboard layouts as\n new current layout in use\n\n If the current keyboard layout is not in the list, it will set as new\n layout the first one in the list.\n \"\"\"\n\n current_keyboard = self.backend.get_keyboard()\n if current_keyboard in self.configured_keyboards:\n # iterate the list circularly\n next_keyboard = self.configured_keyboards[\n (self.configured_keyboards.index(current_keyboard) + 1)\n % len(self.configured_keyboards)\n ]\n else:\n next_keyboard = self.configured_keyboards[0]\n\n self.backend.set_keyboard(next_keyboard, self.option)\n\n self.tick()\n\n def poll(self):\n keyboard = self.backend.get_keyboard()\n if keyboard in self.display_map.keys():\n return self.display_map[keyboard]\n return keyboard.upper()\n", "path": "libqtile/widget/keyboardlayout.py"}]} | 2,907 | 326 |
gh_patches_debug_31699 | rasdani/github-patches | git_diff | facebookresearch__Mephisto-494 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Running static_test_script.py: Usage of deprecated keyword in package header
Seems like this is a compatibility issue with hydra 1.1.0? I've changed static_test_script.py like so:
`@hydra.main(config_path=".", config_name="scriptconfig")`
and deleted the first line config/example.yaml as instructed by hydra.cc, after running the script I'm still getting the following error:
`/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/provider/mock': Usage of deprecated keyword in package header '# @package _group_'.
See https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information
warnings.warn(
/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/architect/local': Usage of deprecated keyword in package header '# @package _group_'.
See https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information
warnings.warn(
/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/blueprint/static_task': Usage of deprecated keyword in package header '# @package _group_'.
See https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information
warnings.warn(
In 'scriptconfig': Validation error while composing config:
Merge error: BlueprintArgs is not a subclass of StaticHTMLBlueprintArgs. value: {'_blueprint_type': '???', 'onboarding_qualification': '???', 'block_qualification': '???'}
full_key:
object_type=dict
Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
`
Is there something I'm doing wrong here?
</issue>
<code>
[start of examples/simple_static_task/static_run_with_onboarding.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 import os
8 from mephisto.operations.operator import Operator
9 from mephisto.operations.utils import get_root_dir
10 from mephisto.tools.scripts import load_db_and_process_config
11 from mephisto.abstractions.blueprints.static_html_task.static_html_blueprint import (
12 BLUEPRINT_TYPE,
13 )
14 from mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (
15 SharedStaticTaskState,
16 )
17
18 import hydra
19 from omegaconf import DictConfig
20 from dataclasses import dataclass, field
21 from typing import List, Any
22
23 TASK_DIRECTORY = os.path.join(get_root_dir(), "examples/simple_static_task")
24 CORRECT_ANSWER = "apple"
25
26 defaults = [
27 {"mephisto/blueprint": BLUEPRINT_TYPE},
28 {"mephisto/architect": "local"},
29 {"mephisto/provider": "mock"},
30 {"conf": "onboarding_example"},
31 ]
32
33 from mephisto.operations.hydra_config import RunScriptConfig, register_script_config
34
35
36 @dataclass
37 class TestScriptConfig(RunScriptConfig):
38 defaults: List[Any] = field(default_factory=lambda: defaults)
39 task_dir: str = TASK_DIRECTORY
40 correct_answer: str = CORRECT_ANSWER
41
42
43 register_script_config(name="scriptconfig", module=TestScriptConfig)
44
45
46 @hydra.main(config_name="scriptconfig")
47 def main(cfg: DictConfig) -> None:
48 correct_config_answer = cfg.correct_answer
49
50 def onboarding_is_valid(onboarding_data):
51 inputs = onboarding_data["inputs"]
52 outputs = onboarding_data["outputs"]
53 return outputs.get("answer") == correct_config_answer
54
55 shared_state = SharedStaticTaskState(
56 onboarding_data={"correct_answer": correct_config_answer},
57 validate_onboarding=onboarding_is_valid,
58 )
59
60 db, cfg = load_db_and_process_config(cfg)
61 operator = Operator(db)
62
63 operator.validate_and_run_config(cfg.mephisto, shared_state)
64 operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)
65
66
67 if __name__ == "__main__":
68 main()
69
[end of examples/simple_static_task/static_run_with_onboarding.py]
[start of mephisto/operations/hydra_config.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 from hydra.core.config_store import ConfigStoreWithProvider
8 from mephisto.abstractions.blueprint import BlueprintArgs
9 from mephisto.abstractions.architect import ArchitectArgs
10 from mephisto.abstractions.crowd_provider import ProviderArgs
11 from mephisto.data_model.task_config import TaskConfigArgs
12 from dataclasses import dataclass, field
13 from omegaconf import MISSING
14 from typing import List, Any
15
16 config = ConfigStoreWithProvider("mephisto")
17
18
19 @dataclass
20 class DatabaseArgs:
21 _database_type: str = "local" # default DB is local
22
23
24 @dataclass
25 class MephistoConfig:
26 blueprint: BlueprintArgs = BlueprintArgs()
27 provider: ProviderArgs = ProviderArgs()
28 architect: ArchitectArgs = ArchitectArgs()
29 task: TaskConfigArgs = TaskConfigArgs()
30 database: DatabaseArgs = DatabaseArgs()
31 log_level: str = "info"
32
33
34 @dataclass
35 class RunScriptConfig:
36 mephisto: MephistoConfig = MephistoConfig()
37
38
39 def register_abstraction_config(name: str, node: Any, abstraction_type: str):
40 config.store(
41 name=name, node=node, group=f"mephisto/{abstraction_type}", package="_group_"
42 )
43
44
45 def initialize_named_configs():
46 """
47 Functionality to register the core mephisto configuration structure. Must be done in __init__
48 """
49 config.store(
50 name="base_mephisto_config",
51 node=MephistoConfig,
52 group="mephisto",
53 package="_group_",
54 )
55
56
57 def register_script_config(name: str, module: Any):
58 config.store(name=name, node=module)
59
[end of mephisto/operations/hydra_config.py]
[start of examples/static_react_task/run_task.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 import os
8 import shutil
9 import subprocess
10 from mephisto.operations.operator import Operator
11 from mephisto.operations.utils import get_root_dir
12 from mephisto.tools.scripts import load_db_and_process_config
13 from mephisto.abstractions.blueprints.static_react_task.static_react_blueprint import (
14 BLUEPRINT_TYPE,
15 )
16 from mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (
17 SharedStaticTaskState,
18 )
19
20 import hydra
21 from omegaconf import DictConfig
22 from dataclasses import dataclass, field
23 from typing import List, Any
24
25 TASK_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
26
27 defaults = [
28 {"mephisto/blueprint": BLUEPRINT_TYPE},
29 {"mephisto/architect": "local"},
30 {"mephisto/provider": "mock"},
31 {"conf": "example"},
32 ]
33
34 from mephisto.operations.hydra_config import RunScriptConfig, register_script_config
35
36
37 @dataclass
38 class TestScriptConfig(RunScriptConfig):
39 defaults: List[Any] = field(default_factory=lambda: defaults)
40 task_dir: str = TASK_DIRECTORY
41
42
43 register_script_config(name="scriptconfig", module=TestScriptConfig)
44
45
46 # TODO it would be nice if this was automated in the way that it
47 # is for ParlAI custom frontend tasks
48 def build_task(task_dir):
49 """Rebuild the frontend for this task"""
50
51 frontend_source_dir = os.path.join(task_dir, "webapp")
52 frontend_build_dir = os.path.join(frontend_source_dir, "build")
53
54 return_dir = os.getcwd()
55 os.chdir(frontend_source_dir)
56 if os.path.exists(frontend_build_dir):
57 shutil.rmtree(frontend_build_dir)
58 packages_installed = subprocess.call(["npm", "install"])
59 if packages_installed != 0:
60 raise Exception(
61 "please make sure npm is installed, otherwise view "
62 "the above error for more info."
63 )
64
65 webpack_complete = subprocess.call(["npm", "run", "dev"])
66 if webpack_complete != 0:
67 raise Exception(
68 "Webpack appears to have failed to build your "
69 "frontend. See the above error for more information."
70 )
71 os.chdir(return_dir)
72
73
74 @hydra.main(config_name="scriptconfig")
75 def main(cfg: DictConfig) -> None:
76 task_dir = cfg.task_dir
77
78 def onboarding_always_valid(onboarding_data):
79 return True
80
81 shared_state = SharedStaticTaskState(
82 static_task_data=[
83 {"text": "This text is good text!"},
84 {"text": "This text is bad text!"},
85 ],
86 validate_onboarding=onboarding_always_valid,
87 )
88
89 build_task(task_dir)
90
91 db, cfg = load_db_and_process_config(cfg)
92 operator = Operator(db)
93
94 operator.validate_and_run_config(cfg.mephisto, shared_state)
95 operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)
96
97
98 if __name__ == "__main__":
99 main()
100
[end of examples/static_react_task/run_task.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/simple_static_task/static_run_with_onboarding.py b/examples/simple_static_task/static_run_with_onboarding.py
--- a/examples/simple_static_task/static_run_with_onboarding.py
+++ b/examples/simple_static_task/static_run_with_onboarding.py
@@ -43,7 +43,7 @@
register_script_config(name="scriptconfig", module=TestScriptConfig)
[email protected](config_name="scriptconfig")
[email protected](config_path="hydra_configs", config_name="scriptconfig")
def main(cfg: DictConfig) -> None:
correct_config_answer = cfg.correct_answer
diff --git a/examples/static_react_task/run_task.py b/examples/static_react_task/run_task.py
--- a/examples/static_react_task/run_task.py
+++ b/examples/static_react_task/run_task.py
@@ -71,7 +71,7 @@
os.chdir(return_dir)
[email protected](config_name="scriptconfig")
[email protected](config_path="hydra_configs", config_name="scriptconfig")
def main(cfg: DictConfig) -> None:
task_dir = cfg.task_dir
diff --git a/mephisto/operations/hydra_config.py b/mephisto/operations/hydra_config.py
--- a/mephisto/operations/hydra_config.py
+++ b/mephisto/operations/hydra_config.py
@@ -23,9 +23,9 @@
@dataclass
class MephistoConfig:
- blueprint: BlueprintArgs = BlueprintArgs()
- provider: ProviderArgs = ProviderArgs()
- architect: ArchitectArgs = ArchitectArgs()
+ blueprint: BlueprintArgs = MISSING
+ provider: ProviderArgs = MISSING
+ architect: ArchitectArgs = MISSING
task: TaskConfigArgs = TaskConfigArgs()
database: DatabaseArgs = DatabaseArgs()
log_level: str = "info"
@@ -38,7 +38,9 @@
def register_abstraction_config(name: str, node: Any, abstraction_type: str):
config.store(
- name=name, node=node, group=f"mephisto/{abstraction_type}", package="_group_"
+ name=name,
+ node=node,
+ group=f"mephisto/{abstraction_type}",
)
@@ -50,7 +52,6 @@
name="base_mephisto_config",
node=MephistoConfig,
group="mephisto",
- package="_group_",
)
| {"golden_diff": "diff --git a/examples/simple_static_task/static_run_with_onboarding.py b/examples/simple_static_task/static_run_with_onboarding.py\n--- a/examples/simple_static_task/static_run_with_onboarding.py\n+++ b/examples/simple_static_task/static_run_with_onboarding.py\n@@ -43,7 +43,7 @@\n register_script_config(name=\"scriptconfig\", module=TestScriptConfig)\n \n \[email protected](config_name=\"scriptconfig\")\[email protected](config_path=\"hydra_configs\", config_name=\"scriptconfig\")\n def main(cfg: DictConfig) -> None:\n correct_config_answer = cfg.correct_answer\n \ndiff --git a/examples/static_react_task/run_task.py b/examples/static_react_task/run_task.py\n--- a/examples/static_react_task/run_task.py\n+++ b/examples/static_react_task/run_task.py\n@@ -71,7 +71,7 @@\n os.chdir(return_dir)\n \n \[email protected](config_name=\"scriptconfig\")\[email protected](config_path=\"hydra_configs\", config_name=\"scriptconfig\")\n def main(cfg: DictConfig) -> None:\n task_dir = cfg.task_dir\n \ndiff --git a/mephisto/operations/hydra_config.py b/mephisto/operations/hydra_config.py\n--- a/mephisto/operations/hydra_config.py\n+++ b/mephisto/operations/hydra_config.py\n@@ -23,9 +23,9 @@\n \n @dataclass\n class MephistoConfig:\n- blueprint: BlueprintArgs = BlueprintArgs()\n- provider: ProviderArgs = ProviderArgs()\n- architect: ArchitectArgs = ArchitectArgs()\n+ blueprint: BlueprintArgs = MISSING\n+ provider: ProviderArgs = MISSING\n+ architect: ArchitectArgs = MISSING\n task: TaskConfigArgs = TaskConfigArgs()\n database: DatabaseArgs = DatabaseArgs()\n log_level: str = \"info\"\n@@ -38,7 +38,9 @@\n \n def register_abstraction_config(name: str, node: Any, abstraction_type: str):\n config.store(\n- name=name, node=node, group=f\"mephisto/{abstraction_type}\", package=\"_group_\"\n+ name=name,\n+ node=node,\n+ group=f\"mephisto/{abstraction_type}\",\n )\n \n \n@@ -50,7 +52,6 @@\n name=\"base_mephisto_config\",\n node=MephistoConfig,\n group=\"mephisto\",\n- package=\"_group_\",\n )\n", "issue": "Running static_test_script.py: Usage of deprecated keyword in package header\nSeems like this is a compatibility issue with hydra 1.1.0? I've changed static_test_script.py like so:\r\n`@hydra.main(config_path=\".\", config_name=\"scriptconfig\")`\r\nand deleted the first line config/example.yaml as instructed by hydra.cc, after running the script I'm still getting the following error:\r\n`/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/provider/mock': Usage of deprecated keyword in package header '# @package _group_'.\r\nSee https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information\r\n warnings.warn(\r\n/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/architect/local': Usage of deprecated keyword in package header '# @package _group_'.\r\nSee https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information\r\n warnings.warn(\r\n/home/ninacdu/.local/lib/python3.8/site-packages/hydra/core/default_element.py:122: UserWarning: In 'mephisto/blueprint/static_task': Usage of deprecated keyword in package header '# @package _group_'.\r\nSee https://hydra.cc/docs/next/upgrades/1.0_to_1.1/changes_to_package_header for more information\r\n warnings.warn(\r\nIn 'scriptconfig': Validation error while composing config:\r\nMerge error: BlueprintArgs is not a subclass of StaticHTMLBlueprintArgs. value: {'_blueprint_type': '???', 'onboarding_qualification': '???', 'block_qualification': '???'}\r\n full_key: \r\n object_type=dict\r\n\r\nSet the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.\r\n`\r\nIs there something I'm doing wrong here? \n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nfrom mephisto.operations.operator import Operator\nfrom mephisto.operations.utils import get_root_dir\nfrom mephisto.tools.scripts import load_db_and_process_config\nfrom mephisto.abstractions.blueprints.static_html_task.static_html_blueprint import (\n BLUEPRINT_TYPE,\n)\nfrom mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (\n SharedStaticTaskState,\n)\n\nimport hydra\nfrom omegaconf import DictConfig\nfrom dataclasses import dataclass, field\nfrom typing import List, Any\n\nTASK_DIRECTORY = os.path.join(get_root_dir(), \"examples/simple_static_task\")\nCORRECT_ANSWER = \"apple\"\n\ndefaults = [\n {\"mephisto/blueprint\": BLUEPRINT_TYPE},\n {\"mephisto/architect\": \"local\"},\n {\"mephisto/provider\": \"mock\"},\n {\"conf\": \"onboarding_example\"},\n]\n\nfrom mephisto.operations.hydra_config import RunScriptConfig, register_script_config\n\n\n@dataclass\nclass TestScriptConfig(RunScriptConfig):\n defaults: List[Any] = field(default_factory=lambda: defaults)\n task_dir: str = TASK_DIRECTORY\n correct_answer: str = CORRECT_ANSWER\n\n\nregister_script_config(name=\"scriptconfig\", module=TestScriptConfig)\n\n\[email protected](config_name=\"scriptconfig\")\ndef main(cfg: DictConfig) -> None:\n correct_config_answer = cfg.correct_answer\n\n def onboarding_is_valid(onboarding_data):\n inputs = onboarding_data[\"inputs\"]\n outputs = onboarding_data[\"outputs\"]\n return outputs.get(\"answer\") == correct_config_answer\n\n shared_state = SharedStaticTaskState(\n onboarding_data={\"correct_answer\": correct_config_answer},\n validate_onboarding=onboarding_is_valid,\n )\n\n db, cfg = load_db_and_process_config(cfg)\n operator = Operator(db)\n\n operator.validate_and_run_config(cfg.mephisto, shared_state)\n operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/simple_static_task/static_run_with_onboarding.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom hydra.core.config_store import ConfigStoreWithProvider\nfrom mephisto.abstractions.blueprint import BlueprintArgs\nfrom mephisto.abstractions.architect import ArchitectArgs\nfrom mephisto.abstractions.crowd_provider import ProviderArgs\nfrom mephisto.data_model.task_config import TaskConfigArgs\nfrom dataclasses import dataclass, field\nfrom omegaconf import MISSING\nfrom typing import List, Any\n\nconfig = ConfigStoreWithProvider(\"mephisto\")\n\n\n@dataclass\nclass DatabaseArgs:\n _database_type: str = \"local\" # default DB is local\n\n\n@dataclass\nclass MephistoConfig:\n blueprint: BlueprintArgs = BlueprintArgs()\n provider: ProviderArgs = ProviderArgs()\n architect: ArchitectArgs = ArchitectArgs()\n task: TaskConfigArgs = TaskConfigArgs()\n database: DatabaseArgs = DatabaseArgs()\n log_level: str = \"info\"\n\n\n@dataclass\nclass RunScriptConfig:\n mephisto: MephistoConfig = MephistoConfig()\n\n\ndef register_abstraction_config(name: str, node: Any, abstraction_type: str):\n config.store(\n name=name, node=node, group=f\"mephisto/{abstraction_type}\", package=\"_group_\"\n )\n\n\ndef initialize_named_configs():\n \"\"\"\n Functionality to register the core mephisto configuration structure. Must be done in __init__\n \"\"\"\n config.store(\n name=\"base_mephisto_config\",\n node=MephistoConfig,\n group=\"mephisto\",\n package=\"_group_\",\n )\n\n\ndef register_script_config(name: str, module: Any):\n config.store(name=name, node=module)\n", "path": "mephisto/operations/hydra_config.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport os\nimport shutil\nimport subprocess\nfrom mephisto.operations.operator import Operator\nfrom mephisto.operations.utils import get_root_dir\nfrom mephisto.tools.scripts import load_db_and_process_config\nfrom mephisto.abstractions.blueprints.static_react_task.static_react_blueprint import (\n BLUEPRINT_TYPE,\n)\nfrom mephisto.abstractions.blueprints.abstract.static_task.static_blueprint import (\n SharedStaticTaskState,\n)\n\nimport hydra\nfrom omegaconf import DictConfig\nfrom dataclasses import dataclass, field\nfrom typing import List, Any\n\nTASK_DIRECTORY = os.path.dirname(os.path.abspath(__file__))\n\ndefaults = [\n {\"mephisto/blueprint\": BLUEPRINT_TYPE},\n {\"mephisto/architect\": \"local\"},\n {\"mephisto/provider\": \"mock\"},\n {\"conf\": \"example\"},\n]\n\nfrom mephisto.operations.hydra_config import RunScriptConfig, register_script_config\n\n\n@dataclass\nclass TestScriptConfig(RunScriptConfig):\n defaults: List[Any] = field(default_factory=lambda: defaults)\n task_dir: str = TASK_DIRECTORY\n\n\nregister_script_config(name=\"scriptconfig\", module=TestScriptConfig)\n\n\n# TODO it would be nice if this was automated in the way that it\n# is for ParlAI custom frontend tasks\ndef build_task(task_dir):\n \"\"\"Rebuild the frontend for this task\"\"\"\n\n frontend_source_dir = os.path.join(task_dir, \"webapp\")\n frontend_build_dir = os.path.join(frontend_source_dir, \"build\")\n\n return_dir = os.getcwd()\n os.chdir(frontend_source_dir)\n if os.path.exists(frontend_build_dir):\n shutil.rmtree(frontend_build_dir)\n packages_installed = subprocess.call([\"npm\", \"install\"])\n if packages_installed != 0:\n raise Exception(\n \"please make sure npm is installed, otherwise view \"\n \"the above error for more info.\"\n )\n\n webpack_complete = subprocess.call([\"npm\", \"run\", \"dev\"])\n if webpack_complete != 0:\n raise Exception(\n \"Webpack appears to have failed to build your \"\n \"frontend. See the above error for more information.\"\n )\n os.chdir(return_dir)\n\n\[email protected](config_name=\"scriptconfig\")\ndef main(cfg: DictConfig) -> None:\n task_dir = cfg.task_dir\n\n def onboarding_always_valid(onboarding_data):\n return True\n\n shared_state = SharedStaticTaskState(\n static_task_data=[\n {\"text\": \"This text is good text!\"},\n {\"text\": \"This text is bad text!\"},\n ],\n validate_onboarding=onboarding_always_valid,\n )\n\n build_task(task_dir)\n\n db, cfg = load_db_and_process_config(cfg)\n operator = Operator(db)\n\n operator.validate_and_run_config(cfg.mephisto, shared_state)\n operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/static_react_task/run_task.py"}]} | 3,070 | 537 |
gh_patches_debug_40764 | rasdani/github-patches | git_diff | svthalia__concrexit-3115 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Members API doesn't list all members
### Describe the bug
Luko, pk=23 on staging, is listed as member on the website, but is not a member according to the API
### How to reproduce
No idea.
check https://staging.thalia.nu/members/profile/23 vs https://staging.thalia.nu/api/v2/members/23/
### Expected behaviour
These urls should agree on the memberness of Luko
</issue>
<code>
[start of website/members/api/v2/views.py]
1 """API views of the activemembers app."""
2
3 from django.shortcuts import get_object_or_404
4
5 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
6 from rest_framework import filters as framework_filters
7 from rest_framework.generics import ListAPIView, RetrieveAPIView, UpdateAPIView
8
9 from members.api.v2 import filters
10 from members.api.v2.serializers.member import (
11 MemberCurrentSerializer,
12 MemberListSerializer,
13 MemberSerializer,
14 )
15 from members.models import Member
16 from thaliawebsite.api.openapi import OAuthAutoSchema
17 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod
18 from utils.media.services import fetch_thumbnails_db
19
20
21 class MemberListView(ListAPIView):
22 """Returns an overview of all members."""
23
24 serializer_class = MemberListSerializer
25 queryset = (
26 Member.current_members.all()
27 .select_related("profile")
28 .prefetch_related("membership_set")
29 )
30
31 def get_serializer(self, *args, **kwargs):
32 if len(args) > 0:
33 members = args[0]
34 fetch_thumbnails_db([member.profile.photo for member in members])
35 return super().get_serializer(*args, **kwargs)
36
37 permission_classes = [
38 IsAuthenticatedOrTokenHasScope,
39 ]
40 required_scopes = ["members:read"]
41 filter_backends = (
42 framework_filters.OrderingFilter,
43 framework_filters.SearchFilter,
44 filters.MembershipTypeFilter,
45 filters.StartingYearFilter,
46 )
47 ordering_fields = ("first_name", "last_name", "username")
48 search_fields = (
49 "profile__nickname",
50 "profile__starting_year",
51 "first_name",
52 "last_name",
53 "username",
54 )
55
56
57 class MemberDetailView(RetrieveAPIView):
58 """Returns details of a member."""
59
60 serializer_class = MemberSerializer
61 queryset = Member.current_members.all()
62 permission_classes = [
63 IsAuthenticatedOrTokenHasScope,
64 ]
65 required_scopes = ["members:read"]
66
67
68 class MemberCurrentView(MemberDetailView, UpdateAPIView):
69 """Returns details of the authenticated member."""
70
71 serializer_class = MemberCurrentSerializer
72 schema = OAuthAutoSchema(operation_id_base="CurrentMember")
73 permission_classes = [
74 IsAuthenticatedOrTokenHasScopeForMethod,
75 ]
76 required_scopes_per_method = {
77 "GET": ["profile:read"],
78 "PATCH": ["profile:write"],
79 "PUT": ["profile:write"],
80 }
81
82 def get_object(self):
83 return get_object_or_404(Member, pk=self.request.user.pk)
84
[end of website/members/api/v2/views.py]
[start of website/members/api/v2/filters.py]
1 from rest_framework import filters
2
3 from members.models import Membership
4
5
6 class StartingYearFilter(filters.BaseFilterBackend):
7 """Allows you to filter by starting year."""
8
9 def filter_queryset(self, request, queryset, view):
10 starting_year = request.query_params.get("starting_year", None)
11
12 if starting_year:
13 queryset = queryset.filter(profile__starting_year=starting_year)
14
15 return queryset
16
17 def get_schema_operation_parameters(self, view):
18 return [
19 {
20 "name": "starting_year",
21 "required": False,
22 "in": "query",
23 "description": "Filter by starting year",
24 "schema": {
25 "type": "number",
26 },
27 }
28 ]
29
30
31 class MembershipTypeFilter(filters.BaseFilterBackend):
32 """Allows you to filter by membership type."""
33
34 def filter_queryset(self, request, queryset, view):
35 membership_type = request.query_params.get("membership_type", None)
36
37 if membership_type:
38 memberships = Membership.objects.filter(type=membership_type)
39 queryset = queryset.filter(pk__in=memberships.values("user__pk"))
40
41 return queryset
42
43 def get_schema_operation_parameters(self, view):
44 return [
45 {
46 "name": "membership_type",
47 "required": False,
48 "in": "query",
49 "description": "Filter by membership type",
50 "schema": {
51 "type": "string",
52 },
53 }
54 ]
55
[end of website/members/api/v2/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/members/api/v2/filters.py b/website/members/api/v2/filters.py
--- a/website/members/api/v2/filters.py
+++ b/website/members/api/v2/filters.py
@@ -1,4 +1,9 @@
-from rest_framework import filters
+from datetime import datetime
+
+from django.db.models import Q
+from django.utils import timezone
+
+from rest_framework import filters, serializers
from members.models import Membership
@@ -28,6 +33,62 @@
]
+class FormerMemberFilter(filters.BaseFilterBackend):
+ def filter_queryset(self, request, queryset, view):
+ former = request.query_params.get("former", "false")
+
+ if former == "false":
+ # Filter out former members
+ return (
+ queryset.exclude(membership=None)
+ .filter(
+ Q(membership__until__isnull=True)
+ | Q(membership__until__gt=timezone.now().date())
+ )
+ .distinct()
+ )
+ elif former == "true":
+ # Filter out current members
+
+ memberships_query = Q(until__gt=datetime.now()) | Q(until=None)
+ members_query = ~Q(id=None)
+
+ # Filter out all current active memberships
+ memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
+ memberships = Membership.objects.filter(memberships_query)
+ members_query &= ~Q(pk__in=memberships.values("user__pk"))
+
+ memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)
+ memberships = Membership.objects.filter(memberships_query)
+ all_memberships = Membership.objects.all()
+ # Only keep members that were once members, or are legacy users
+ # that do not have any memberships at all
+ members_query &= Q(pk__in=memberships.values("user__pk")) | ~Q(
+ pk__in=all_memberships.values("user__pk")
+ )
+
+ return queryset.filter(members_query)
+ elif former == "any":
+ # Include both former and current members
+ return queryset
+ else:
+ raise serializers.ValidationError("invalid former parameter")
+
+ def get_schema_operation_parameters(self, view):
+ return [
+ {
+ "name": "former",
+ "required": False,
+ "in": "query",
+ "description": "Include former members or only former members",
+ "schema": {
+ "type": "string",
+ "enum": ["true", "false", "any"],
+ },
+ }
+ ]
+
+
class MembershipTypeFilter(filters.BaseFilterBackend):
"""Allows you to filter by membership type."""
diff --git a/website/members/api/v2/views.py b/website/members/api/v2/views.py
--- a/website/members/api/v2/views.py
+++ b/website/members/api/v2/views.py
@@ -23,7 +23,7 @@
serializer_class = MemberListSerializer
queryset = (
- Member.current_members.all()
+ Member.objects.all()
.select_related("profile")
.prefetch_related("membership_set")
)
@@ -43,6 +43,7 @@
framework_filters.SearchFilter,
filters.MembershipTypeFilter,
filters.StartingYearFilter,
+ filters.FormerMemberFilter,
)
ordering_fields = ("first_name", "last_name", "username")
search_fields = (
@@ -58,7 +59,7 @@
"""Returns details of a member."""
serializer_class = MemberSerializer
- queryset = Member.current_members.all()
+ queryset = Member.objects.all()
permission_classes = [
IsAuthenticatedOrTokenHasScope,
]
| {"golden_diff": "diff --git a/website/members/api/v2/filters.py b/website/members/api/v2/filters.py\n--- a/website/members/api/v2/filters.py\n+++ b/website/members/api/v2/filters.py\n@@ -1,4 +1,9 @@\n-from rest_framework import filters\n+from datetime import datetime\n+\n+from django.db.models import Q\n+from django.utils import timezone\n+\n+from rest_framework import filters, serializers\n \n from members.models import Membership\n \n@@ -28,6 +33,62 @@\n ]\n \n \n+class FormerMemberFilter(filters.BaseFilterBackend):\n+ def filter_queryset(self, request, queryset, view):\n+ former = request.query_params.get(\"former\", \"false\")\n+\n+ if former == \"false\":\n+ # Filter out former members\n+ return (\n+ queryset.exclude(membership=None)\n+ .filter(\n+ Q(membership__until__isnull=True)\n+ | Q(membership__until__gt=timezone.now().date())\n+ )\n+ .distinct()\n+ )\n+ elif former == \"true\":\n+ # Filter out current members\n+\n+ memberships_query = Q(until__gt=datetime.now()) | Q(until=None)\n+ members_query = ~Q(id=None)\n+\n+ # Filter out all current active memberships\n+ memberships_query &= Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n+ memberships = Membership.objects.filter(memberships_query)\n+ members_query &= ~Q(pk__in=memberships.values(\"user__pk\"))\n+\n+ memberships_query = Q(type=Membership.MEMBER) | Q(type=Membership.HONORARY)\n+ memberships = Membership.objects.filter(memberships_query)\n+ all_memberships = Membership.objects.all()\n+ # Only keep members that were once members, or are legacy users\n+ # that do not have any memberships at all\n+ members_query &= Q(pk__in=memberships.values(\"user__pk\")) | ~Q(\n+ pk__in=all_memberships.values(\"user__pk\")\n+ )\n+\n+ return queryset.filter(members_query)\n+ elif former == \"any\":\n+ # Include both former and current members\n+ return queryset\n+ else:\n+ raise serializers.ValidationError(\"invalid former parameter\")\n+\n+ def get_schema_operation_parameters(self, view):\n+ return [\n+ {\n+ \"name\": \"former\",\n+ \"required\": False,\n+ \"in\": \"query\",\n+ \"description\": \"Include former members or only former members\",\n+ \"schema\": {\n+ \"type\": \"string\",\n+ \"enum\": [\"true\", \"false\", \"any\"],\n+ },\n+ }\n+ ]\n+\n+\n class MembershipTypeFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by membership type.\"\"\"\n \ndiff --git a/website/members/api/v2/views.py b/website/members/api/v2/views.py\n--- a/website/members/api/v2/views.py\n+++ b/website/members/api/v2/views.py\n@@ -23,7 +23,7 @@\n \n serializer_class = MemberListSerializer\n queryset = (\n- Member.current_members.all()\n+ Member.objects.all()\n .select_related(\"profile\")\n .prefetch_related(\"membership_set\")\n )\n@@ -43,6 +43,7 @@\n framework_filters.SearchFilter,\n filters.MembershipTypeFilter,\n filters.StartingYearFilter,\n+ filters.FormerMemberFilter,\n )\n ordering_fields = (\"first_name\", \"last_name\", \"username\")\n search_fields = (\n@@ -58,7 +59,7 @@\n \"\"\"Returns details of a member.\"\"\"\n \n serializer_class = MemberSerializer\n- queryset = Member.current_members.all()\n+ queryset = Member.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n", "issue": "Members API doesn't list all members\n### Describe the bug\r\nLuko, pk=23 on staging, is listed as member on the website, but is not a member according to the API\r\n\r\n### How to reproduce\r\nNo idea.\r\ncheck https://staging.thalia.nu/members/profile/23 vs https://staging.thalia.nu/api/v2/members/23/\r\n\r\n### Expected behaviour\r\nThese urls should agree on the memberness of Luko\r\n\r\n\n", "before_files": [{"content": "\"\"\"API views of the activemembers app.\"\"\"\n\nfrom django.shortcuts import get_object_or_404\n\nfrom oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework import filters as framework_filters\nfrom rest_framework.generics import ListAPIView, RetrieveAPIView, UpdateAPIView\n\nfrom members.api.v2 import filters\nfrom members.api.v2.serializers.member import (\n MemberCurrentSerializer,\n MemberListSerializer,\n MemberSerializer,\n)\nfrom members.models import Member\nfrom thaliawebsite.api.openapi import OAuthAutoSchema\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\nfrom utils.media.services import fetch_thumbnails_db\n\n\nclass MemberListView(ListAPIView):\n \"\"\"Returns an overview of all members.\"\"\"\n\n serializer_class = MemberListSerializer\n queryset = (\n Member.current_members.all()\n .select_related(\"profile\")\n .prefetch_related(\"membership_set\")\n )\n\n def get_serializer(self, *args, **kwargs):\n if len(args) > 0:\n members = args[0]\n fetch_thumbnails_db([member.profile.photo for member in members])\n return super().get_serializer(*args, **kwargs)\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"members:read\"]\n filter_backends = (\n framework_filters.OrderingFilter,\n framework_filters.SearchFilter,\n filters.MembershipTypeFilter,\n filters.StartingYearFilter,\n )\n ordering_fields = (\"first_name\", \"last_name\", \"username\")\n search_fields = (\n \"profile__nickname\",\n \"profile__starting_year\",\n \"first_name\",\n \"last_name\",\n \"username\",\n )\n\n\nclass MemberDetailView(RetrieveAPIView):\n \"\"\"Returns details of a member.\"\"\"\n\n serializer_class = MemberSerializer\n queryset = Member.current_members.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n ]\n required_scopes = [\"members:read\"]\n\n\nclass MemberCurrentView(MemberDetailView, UpdateAPIView):\n \"\"\"Returns details of the authenticated member.\"\"\"\n\n serializer_class = MemberCurrentSerializer\n schema = OAuthAutoSchema(operation_id_base=\"CurrentMember\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n ]\n required_scopes_per_method = {\n \"GET\": [\"profile:read\"],\n \"PATCH\": [\"profile:write\"],\n \"PUT\": [\"profile:write\"],\n }\n\n def get_object(self):\n return get_object_or_404(Member, pk=self.request.user.pk)\n", "path": "website/members/api/v2/views.py"}, {"content": "from rest_framework import filters\n\nfrom members.models import Membership\n\n\nclass StartingYearFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by starting year.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n starting_year = request.query_params.get(\"starting_year\", None)\n\n if starting_year:\n queryset = queryset.filter(profile__starting_year=starting_year)\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"starting_year\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by starting year\",\n \"schema\": {\n \"type\": \"number\",\n },\n }\n ]\n\n\nclass MembershipTypeFilter(filters.BaseFilterBackend):\n \"\"\"Allows you to filter by membership type.\"\"\"\n\n def filter_queryset(self, request, queryset, view):\n membership_type = request.query_params.get(\"membership_type\", None)\n\n if membership_type:\n memberships = Membership.objects.filter(type=membership_type)\n queryset = queryset.filter(pk__in=memberships.values(\"user__pk\"))\n\n return queryset\n\n def get_schema_operation_parameters(self, view):\n return [\n {\n \"name\": \"membership_type\",\n \"required\": False,\n \"in\": \"query\",\n \"description\": \"Filter by membership type\",\n \"schema\": {\n \"type\": \"string\",\n },\n }\n ]\n", "path": "website/members/api/v2/filters.py"}]} | 1,771 | 845 |
gh_patches_debug_8184 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-2766 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dbt compile fails against redshift when using multi-threading
### Describe the bug
When I run `dbt compile` against our redshift data warehouse the command fails with the error
```
KeyError: 'endpoint_resolver'
```
The error only occurs when threads > 1 and `method: iam` is used.
From what I can gather this is happening because the boto session object is not thread safe and it is being accessed from multiple threads without being protected, and this happens during the call to get_tmp_iam_cluster_credentials.
### Steps To Reproduce
* Create a dbt project containing a significant number of models.
* Configure the target in the profiles.yml file to point to a redshift dwh, with threads > 1 and method iam:
```
type: redshift
method: iam
threads: 8
host: xxxx.redshift.amazonaws.com
cluster_id: xxx
port: 5439
user: xxx
dbname: xxx
schema: xxx
```
* Run `dbt compile`
### Expected behavior
That `dbt compile` succeeds.
### Screenshots and log output
```
2020-09-14 11:15:23.743840 (MainThread): Traceback (most recent call last):
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/postgres/connections.py", line 46, in exception_handler
yield
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/sql/connections.py", line 76, in add_query
cursor = connection.handle.cursor()
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/contracts/connection.py", line 69, in handle
self._handle.resolve(self)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/contracts/connection.py", line 90, in resolve
return self.opener(connection)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/postgres/connections.py", line 77, in open
credentials = cls.get_credentials(connection.credentials)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py", line 152, in get_credentials
return cls.get_tmp_iam_cluster_credentials(credentials)
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py", line 128, in get_tmp_iam_cluster_credentials
credentials.db_groups,
File "/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py", line 93, in fetch_cluster_credentials
boto_client = boto3.client('redshift')
File "/Users/xxx/venv/lib/python3.7/site-packages/boto3/__init__.py", line 91, in client
return _get_default_session().client(*args, **kwargs)
File "/Users/xxx/venv/lib/python3.7/site-packages/boto3/session.py", line 263, in client
aws_session_token=aws_session_token, config=config)
File "/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py", line 828, in create_client
endpoint_resolver = self._get_internal_component('endpoint_resolver')
File "/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py", line 695, in _get_internal_component
return self._internal_components.get_component(name)
File "/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py", line 907, in get_component
del self._deferred[name]
KeyError: 'endpoint_resolver'
```
Sometimes the error returned is
```
KeyError: 'credential_provider'
```
but the stack trace is identical.
### System information
**Which database are you using dbt with?**
- [ ] postgres
- [x] redshift
- [ ] bigquery
- [ ] snowflake
- [ ] other (specify: ____________)
**The output of `dbt --version`:**
```
installed version: 0.17.2
latest version: 0.18.0
Your version of dbt is out of date! You can find instructions for upgrading here:
https://docs.getdbt.com/docs/installation
Plugins:
- bigquery: 0.17.2
- snowflake: 0.17.2
- redshift: 0.17.2
- postgres: 0.17.2
```
**The operating system you're using:**
macOS Catalina
**The output of `python --version`:**
Python 3.7.3
### Additional context
The error surfaced after I bumped dbt from version 0.14.2 to 0.17.2
</issue>
<code>
[start of plugins/redshift/dbt/adapters/redshift/connections.py]
1 from multiprocessing import Lock
2 from contextlib import contextmanager
3 from typing import NewType
4
5 from dbt.adapters.postgres import PostgresConnectionManager
6 from dbt.adapters.postgres import PostgresCredentials
7 from dbt.logger import GLOBAL_LOGGER as logger # noqa
8 import dbt.exceptions
9 import dbt.flags
10
11 import boto3
12
13 from hologram import FieldEncoder, JsonSchemaMixin
14 from hologram.helpers import StrEnum
15
16 from dataclasses import dataclass, field
17 from typing import Optional, List
18
19 drop_lock: Lock = dbt.flags.MP_CONTEXT.Lock()
20
21
22 IAMDuration = NewType('IAMDuration', int)
23
24
25 class IAMDurationEncoder(FieldEncoder):
26 @property
27 def json_schema(self):
28 return {'type': 'integer', 'minimum': 0, 'maximum': 65535}
29
30
31 JsonSchemaMixin.register_field_encoders({IAMDuration: IAMDurationEncoder()})
32
33
34 class RedshiftConnectionMethod(StrEnum):
35 DATABASE = 'database'
36 IAM = 'iam'
37
38
39 @dataclass
40 class RedshiftCredentials(PostgresCredentials):
41 method: RedshiftConnectionMethod = RedshiftConnectionMethod.DATABASE
42 password: Optional[str] = None
43 cluster_id: Optional[str] = field(
44 default=None,
45 metadata={'description': 'If using IAM auth, the name of the cluster'},
46 )
47 iam_profile: Optional[str] = None
48 iam_duration_seconds: int = 900
49 search_path: Optional[str] = None
50 keepalives_idle: int = 240
51 autocreate: bool = False
52 db_groups: List[str] = field(default_factory=list)
53
54 @property
55 def type(self):
56 return 'redshift'
57
58 def _connection_keys(self):
59 keys = super()._connection_keys()
60 return keys + (
61 'method',
62 'cluster_id',
63 'iam_profile',
64 'iam_duration_seconds'
65 )
66
67
68 class RedshiftConnectionManager(PostgresConnectionManager):
69 TYPE = 'redshift'
70
71 @contextmanager
72 def fresh_transaction(self, name=None):
73 """On entrance to this context manager, hold an exclusive lock and
74 create a fresh transaction for redshift, then commit and begin a new
75 one before releasing the lock on exit.
76
77 See drop_relation in RedshiftAdapter for more information.
78
79 :param Optional[str] name: The name of the connection to use, or None
80 to use the default.
81 """
82 with drop_lock:
83 connection = self.get_thread_connection()
84
85 if connection.transaction_open:
86 self.commit()
87
88 self.begin()
89 yield
90
91 self.commit()
92 self.begin()
93
94 @classmethod
95 def fetch_cluster_credentials(cls, db_user, db_name, cluster_id,
96 iam_profile, duration_s, autocreate,
97 db_groups):
98 """Fetches temporary login credentials from AWS. The specified user
99 must already exist in the database, or else an error will occur"""
100
101 if iam_profile is None:
102 boto_client = boto3.client('redshift')
103 else:
104 logger.debug("Connecting to Redshift using 'IAM'" +
105 f"with profile {iam_profile}")
106 boto_session = boto3.Session(
107 profile_name=iam_profile
108 )
109 boto_client = boto_session.client('redshift')
110
111 try:
112 return boto_client.get_cluster_credentials(
113 DbUser=db_user,
114 DbName=db_name,
115 ClusterIdentifier=cluster_id,
116 DurationSeconds=duration_s,
117 AutoCreate=autocreate,
118 DbGroups=db_groups,)
119
120 except boto_client.exceptions.ClientError as e:
121 raise dbt.exceptions.FailedToConnectException(
122 "Unable to get temporary Redshift cluster credentials: {}"
123 .format(e))
124
125 @classmethod
126 def get_tmp_iam_cluster_credentials(cls, credentials):
127 cluster_id = credentials.cluster_id
128
129 # default via:
130 # boto3.readthedocs.io/en/latest/reference/services/redshift.html
131 iam_duration_s = credentials.iam_duration_seconds
132
133 if not cluster_id:
134 raise dbt.exceptions.FailedToConnectException(
135 "'cluster_id' must be provided in profile if IAM "
136 "authentication method selected")
137
138 cluster_creds = cls.fetch_cluster_credentials(
139 credentials.user,
140 credentials.database,
141 credentials.cluster_id,
142 credentials.iam_profile,
143 iam_duration_s,
144 credentials.autocreate,
145 credentials.db_groups,
146 )
147
148 # replace username and password with temporary redshift credentials
149 return credentials.replace(user=cluster_creds.get('DbUser'),
150 password=cluster_creds.get('DbPassword'))
151
152 @classmethod
153 def get_credentials(cls, credentials):
154 method = credentials.method
155
156 # Support missing 'method' for backwards compatibility
157 if method == 'database' or method is None:
158 logger.debug("Connecting to Redshift using 'database' credentials")
159 # this requirement is really annoying to encode into json schema,
160 # so validate it here
161 if credentials.password is None:
162 raise dbt.exceptions.FailedToConnectException(
163 "'password' field is required for 'database' credentials"
164 )
165 return credentials
166
167 elif method == 'iam':
168 logger.debug("Connecting to Redshift using 'IAM' credentials")
169 return cls.get_tmp_iam_cluster_credentials(credentials)
170
171 else:
172 raise dbt.exceptions.FailedToConnectException(
173 "Invalid 'method' in profile: '{}'".format(method))
174
[end of plugins/redshift/dbt/adapters/redshift/connections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/redshift/dbt/adapters/redshift/connections.py b/plugins/redshift/dbt/adapters/redshift/connections.py
--- a/plugins/redshift/dbt/adapters/redshift/connections.py
+++ b/plugins/redshift/dbt/adapters/redshift/connections.py
@@ -99,7 +99,8 @@
must already exist in the database, or else an error will occur"""
if iam_profile is None:
- boto_client = boto3.client('redshift')
+ session = boto3.Session()
+ boto_client = session.client("redshift")
else:
logger.debug("Connecting to Redshift using 'IAM'" +
f"with profile {iam_profile}")
| {"golden_diff": "diff --git a/plugins/redshift/dbt/adapters/redshift/connections.py b/plugins/redshift/dbt/adapters/redshift/connections.py\n--- a/plugins/redshift/dbt/adapters/redshift/connections.py\n+++ b/plugins/redshift/dbt/adapters/redshift/connections.py\n@@ -99,7 +99,8 @@\n must already exist in the database, or else an error will occur\"\"\"\n \n if iam_profile is None:\n- boto_client = boto3.client('redshift')\n+ session = boto3.Session()\n+ boto_client = session.client(\"redshift\")\n else:\n logger.debug(\"Connecting to Redshift using 'IAM'\" +\n f\"with profile {iam_profile}\")\n", "issue": "dbt compile fails against redshift when using multi-threading\n### Describe the bug\r\nWhen I run `dbt compile` against our redshift data warehouse the command fails with the error \r\n```\r\nKeyError: 'endpoint_resolver'\r\n```\r\nThe error only occurs when threads > 1 and `method: iam` is used.\r\n\r\nFrom what I can gather this is happening because the boto session object is not thread safe and it is being accessed from multiple threads without being protected, and this happens during the call to get_tmp_iam_cluster_credentials.\r\n\r\n### Steps To Reproduce\r\n* Create a dbt project containing a significant number of models.\r\n* Configure the target in the profiles.yml file to point to a redshift dwh, with threads > 1 and method iam:\r\n```\r\n type: redshift\r\n method: iam\r\n threads: 8\r\n host: xxxx.redshift.amazonaws.com\r\n cluster_id: xxx\r\n port: 5439\r\n user: xxx\r\n dbname: xxx\r\n schema: xxx\r\n```\r\n* Run `dbt compile`\r\n\r\n### Expected behavior\r\nThat `dbt compile` succeeds.\r\n\r\n### Screenshots and log output\r\n\r\n```\r\n2020-09-14 11:15:23.743840 (MainThread): Traceback (most recent call last):\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/postgres/connections.py\", line 46, in exception_handler\r\n yield\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/sql/connections.py\", line 76, in add_query\r\n cursor = connection.handle.cursor()\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/contracts/connection.py\", line 69, in handle\r\n self._handle.resolve(self)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/contracts/connection.py\", line 90, in resolve\r\n return self.opener(connection)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/postgres/connections.py\", line 77, in open\r\n credentials = cls.get_credentials(connection.credentials)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py\", line 152, in get_credentials\r\n return cls.get_tmp_iam_cluster_credentials(credentials)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py\", line 128, in get_tmp_iam_cluster_credentials\r\n credentials.db_groups,\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/dbt/adapters/redshift/connections.py\", line 93, in fetch_cluster_credentials\r\n boto_client = boto3.client('redshift')\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/boto3/__init__.py\", line 91, in client\r\n return _get_default_session().client(*args, **kwargs)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/boto3/session.py\", line 263, in client\r\n aws_session_token=aws_session_token, config=config)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py\", line 828, in create_client\r\n endpoint_resolver = self._get_internal_component('endpoint_resolver')\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py\", line 695, in _get_internal_component\r\n return self._internal_components.get_component(name)\r\n File \"/Users/xxx/venv/lib/python3.7/site-packages/botocore/session.py\", line 907, in get_component\r\n del self._deferred[name]\r\nKeyError: 'endpoint_resolver'\r\n```\r\n\r\nSometimes the error returned is \r\n```\r\nKeyError: 'credential_provider'\r\n```\r\nbut the stack trace is identical.\r\n\r\n### System information\r\n**Which database are you using dbt with?**\r\n- [ ] postgres\r\n- [x] redshift\r\n- [ ] bigquery\r\n- [ ] snowflake\r\n- [ ] other (specify: ____________)\r\n\r\n\r\n**The output of `dbt --version`:**\r\n```\r\ninstalled version: 0.17.2\r\n latest version: 0.18.0\r\n\r\nYour version of dbt is out of date! You can find instructions for upgrading here:\r\nhttps://docs.getdbt.com/docs/installation\r\n\r\nPlugins:\r\n - bigquery: 0.17.2\r\n - snowflake: 0.17.2\r\n - redshift: 0.17.2\r\n - postgres: 0.17.2\r\n```\r\n\r\n**The operating system you're using:**\r\nmacOS Catalina\r\n\r\n**The output of `python --version`:**\r\nPython 3.7.3\r\n\r\n### Additional context\r\nThe error surfaced after I bumped dbt from version 0.14.2 to 0.17.2\r\n\n", "before_files": [{"content": "from multiprocessing import Lock\nfrom contextlib import contextmanager\nfrom typing import NewType\n\nfrom dbt.adapters.postgres import PostgresConnectionManager\nfrom dbt.adapters.postgres import PostgresCredentials\nfrom dbt.logger import GLOBAL_LOGGER as logger # noqa\nimport dbt.exceptions\nimport dbt.flags\n\nimport boto3\n\nfrom hologram import FieldEncoder, JsonSchemaMixin\nfrom hologram.helpers import StrEnum\n\nfrom dataclasses import dataclass, field\nfrom typing import Optional, List\n\ndrop_lock: Lock = dbt.flags.MP_CONTEXT.Lock()\n\n\nIAMDuration = NewType('IAMDuration', int)\n\n\nclass IAMDurationEncoder(FieldEncoder):\n @property\n def json_schema(self):\n return {'type': 'integer', 'minimum': 0, 'maximum': 65535}\n\n\nJsonSchemaMixin.register_field_encoders({IAMDuration: IAMDurationEncoder()})\n\n\nclass RedshiftConnectionMethod(StrEnum):\n DATABASE = 'database'\n IAM = 'iam'\n\n\n@dataclass\nclass RedshiftCredentials(PostgresCredentials):\n method: RedshiftConnectionMethod = RedshiftConnectionMethod.DATABASE\n password: Optional[str] = None\n cluster_id: Optional[str] = field(\n default=None,\n metadata={'description': 'If using IAM auth, the name of the cluster'},\n )\n iam_profile: Optional[str] = None\n iam_duration_seconds: int = 900\n search_path: Optional[str] = None\n keepalives_idle: int = 240\n autocreate: bool = False\n db_groups: List[str] = field(default_factory=list)\n\n @property\n def type(self):\n return 'redshift'\n\n def _connection_keys(self):\n keys = super()._connection_keys()\n return keys + (\n 'method',\n 'cluster_id',\n 'iam_profile',\n 'iam_duration_seconds'\n )\n\n\nclass RedshiftConnectionManager(PostgresConnectionManager):\n TYPE = 'redshift'\n\n @contextmanager\n def fresh_transaction(self, name=None):\n \"\"\"On entrance to this context manager, hold an exclusive lock and\n create a fresh transaction for redshift, then commit and begin a new\n one before releasing the lock on exit.\n\n See drop_relation in RedshiftAdapter for more information.\n\n :param Optional[str] name: The name of the connection to use, or None\n to use the default.\n \"\"\"\n with drop_lock:\n connection = self.get_thread_connection()\n\n if connection.transaction_open:\n self.commit()\n\n self.begin()\n yield\n\n self.commit()\n self.begin()\n\n @classmethod\n def fetch_cluster_credentials(cls, db_user, db_name, cluster_id,\n iam_profile, duration_s, autocreate,\n db_groups):\n \"\"\"Fetches temporary login credentials from AWS. The specified user\n must already exist in the database, or else an error will occur\"\"\"\n\n if iam_profile is None:\n boto_client = boto3.client('redshift')\n else:\n logger.debug(\"Connecting to Redshift using 'IAM'\" +\n f\"with profile {iam_profile}\")\n boto_session = boto3.Session(\n profile_name=iam_profile\n )\n boto_client = boto_session.client('redshift')\n\n try:\n return boto_client.get_cluster_credentials(\n DbUser=db_user,\n DbName=db_name,\n ClusterIdentifier=cluster_id,\n DurationSeconds=duration_s,\n AutoCreate=autocreate,\n DbGroups=db_groups,)\n\n except boto_client.exceptions.ClientError as e:\n raise dbt.exceptions.FailedToConnectException(\n \"Unable to get temporary Redshift cluster credentials: {}\"\n .format(e))\n\n @classmethod\n def get_tmp_iam_cluster_credentials(cls, credentials):\n cluster_id = credentials.cluster_id\n\n # default via:\n # boto3.readthedocs.io/en/latest/reference/services/redshift.html\n iam_duration_s = credentials.iam_duration_seconds\n\n if not cluster_id:\n raise dbt.exceptions.FailedToConnectException(\n \"'cluster_id' must be provided in profile if IAM \"\n \"authentication method selected\")\n\n cluster_creds = cls.fetch_cluster_credentials(\n credentials.user,\n credentials.database,\n credentials.cluster_id,\n credentials.iam_profile,\n iam_duration_s,\n credentials.autocreate,\n credentials.db_groups,\n )\n\n # replace username and password with temporary redshift credentials\n return credentials.replace(user=cluster_creds.get('DbUser'),\n password=cluster_creds.get('DbPassword'))\n\n @classmethod\n def get_credentials(cls, credentials):\n method = credentials.method\n\n # Support missing 'method' for backwards compatibility\n if method == 'database' or method is None:\n logger.debug(\"Connecting to Redshift using 'database' credentials\")\n # this requirement is really annoying to encode into json schema,\n # so validate it here\n if credentials.password is None:\n raise dbt.exceptions.FailedToConnectException(\n \"'password' field is required for 'database' credentials\"\n )\n return credentials\n\n elif method == 'iam':\n logger.debug(\"Connecting to Redshift using 'IAM' credentials\")\n return cls.get_tmp_iam_cluster_credentials(credentials)\n\n else:\n raise dbt.exceptions.FailedToConnectException(\n \"Invalid 'method' in profile: '{}'\".format(method))\n", "path": "plugins/redshift/dbt/adapters/redshift/connections.py"}]} | 3,224 | 151 |
gh_patches_debug_25139 | rasdani/github-patches | git_diff | biolab__orange3-2295 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove Nans from Sparse Data
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.5.dev
##### Expected behavior
Logistic Regression works on sparse data that contains nan values.
##### Actual behavior
It crashes with `Input contains Nans, ...`
##### Steps to reproduce the behavior
Load `Election-2016-Tweets.tab` in Corpus. Use just first 100 rows for speed. Pass corpus through bag of words into test & score and use 10-fold cross validation. Add logistic regression on input and test & score crashes.
##### Additional info (worksheets, data, screenshots, ...)
</issue>
<code>
[start of Orange/preprocess/impute.py]
1 import numpy
2 from scipy.sparse import issparse
3
4 import Orange.data
5 from Orange.statistics import distribution, basic_stats
6 from Orange.util import Reprable
7 from .transformation import Transformation, Lookup
8
9 __all__ = ["ReplaceUnknowns", "Average", "DoNotImpute", "DropInstances",
10 "Model", "AsValue", "Random", "Default"]
11
12
13 class ReplaceUnknowns(Transformation):
14 """
15 A column transformation which replaces unknown values with a fixed `value`.
16
17 Parameters
18 ----------
19 variable : Orange.data.Variable
20 The target variable for imputation.
21 value : int or float
22 The value with which to replace the unknown values
23 """
24 def __init__(self, variable, value=0):
25 super().__init__(variable)
26 self.value = value
27
28 def transform(self, c):
29 if issparse(c): # sparse does not have unknown values
30 return c
31 else:
32 return numpy.where(numpy.isnan(c), self.value, c)
33
34
35 class BaseImputeMethod(Reprable):
36 name = ""
37 short_name = ""
38 description = ""
39 format = "{var.name} -> {self.short_name}"
40 columns_only = False
41
42 def __call__(self, data, variable):
43 """ Imputes table along variable column.
44
45 Args:
46 data (Table): A table to impute.
47 variable (Variable): Variable for completing missing values.
48
49 Returns:
50 A new Variable instance with completed missing values or
51 a array mask of rows to drop out.
52 """
53 raise NotImplementedError
54
55 def format_variable(self, var):
56 return self.format.format(var=var, self=self)
57
58 def __str__(self):
59 return self.name
60
61 def copy(self):
62 return self
63
64 @classmethod
65 def supports_variable(cls, variable):
66 return True
67
68
69 class DoNotImpute(BaseImputeMethod):
70 name = "Don't impute"
71 short_name = "leave"
72 description = ""
73
74 def __call__(self, data, variable):
75 return variable
76
77
78 class DropInstances(BaseImputeMethod):
79 name = "Remove instances with unknown values"
80 short_name = "drop"
81 description = ""
82
83 def __call__(self, data, variable):
84 index = data.domain.index(variable)
85 return numpy.isnan(data[:, index]).reshape(-1)
86
87
88 class Average(BaseImputeMethod):
89 name = "Average/Most frequent"
90 short_name = "average"
91 description = "Replace with average/mode of the column"
92
93 def __call__(self, data, variable, value=None):
94 variable = data.domain[variable]
95 if value is None:
96 if variable.is_continuous:
97 stats = basic_stats.BasicStats(data, variable)
98 value = stats.mean
99 elif variable.is_discrete:
100 dist = distribution.get_distribution(data, variable)
101 value = dist.modus()
102 else:
103 raise TypeError("Variable must be continuous or discrete")
104
105 a = variable.copy(compute_value=ReplaceUnknowns(variable, value))
106 a.to_sql = ImputeSql(variable, value)
107 return a
108
109
110 class ImputeSql(Reprable):
111 def __init__(self, var, default):
112 self.var = var
113 self.default = default
114
115 def __call__(self):
116 return 'coalesce(%s, %s)' % (self.var.to_sql(), str(self.default))
117
118
119 class Default(BaseImputeMethod):
120 name = "Value"
121 short_name = "value"
122 description = ""
123 columns_only = True
124 format = '{var} -> {self.default}'
125
126 def __init__(self, default=0):
127 self.default = default
128
129 def __call__(self, data, variable, *, default=None):
130 variable = data.domain[variable]
131 default = default if default is not None else self.default
132 return variable.copy(compute_value=ReplaceUnknowns(variable, default))
133
134 def copy(self):
135 return Default(self.default)
136
137
138 class ReplaceUnknownsModel(Reprable):
139 """
140 Replace unknown values with predicted values using a `Orange.base.Model`
141
142 Parameters
143 ----------
144 variable : Orange.data.Variable
145 The target variable for the imputation.
146 model : Orange.base.Model
147 A fitted model predicting `variable`.
148 """
149 def __init__(self, variable, model):
150 assert model.domain.class_var == variable
151 self.variable = variable
152 self.model = model
153
154 def __call__(self, data):
155 if isinstance(data, Orange.data.Instance):
156 column = numpy.array([float(data[self.variable])])
157 else:
158 column = numpy.array(data.get_column_view(self.variable)[0],
159 copy=True)
160
161 mask = numpy.isnan(column)
162 if not numpy.any(mask):
163 return column
164
165 if isinstance(data, Orange.data.Instance):
166 predicted = self.model(data)
167 else:
168 predicted = self.model(data[mask])
169 column[mask] = predicted
170 return column
171
172
173 class Model(BaseImputeMethod):
174 _name = "Model-based imputer"
175 short_name = "model"
176 description = ""
177 format = BaseImputeMethod.format + " ({self.learner.name})"
178 @property
179 def name(self):
180 return "{} ({})".format(self._name, getattr(self.learner, 'name', ''))
181
182 def __init__(self, learner):
183 self.learner = learner
184
185 def __call__(self, data, variable):
186 variable = data.domain[variable]
187 domain = domain_with_class_var(data.domain, variable)
188
189 if self.learner.check_learner_adequacy(domain):
190 data = data.transform(domain)
191 model = self.learner(data)
192 assert model.domain.class_var == variable
193 return variable.copy(
194 compute_value=ReplaceUnknownsModel(variable, model))
195 else:
196 raise ValueError("`{}` doesn't support domain type"
197 .format(self.learner.name))
198
199 def copy(self):
200 return Model(self.learner)
201
202 def supports_variable(self, variable):
203 domain = Orange.data.Domain([], class_vars=variable)
204 return self.learner.check_learner_adequacy(domain)
205
206
207 def domain_with_class_var(domain, class_var):
208 """
209 Return a domain with class_var as output domain.class_var.
210
211 If class_var is in the input domain's attributes it is removed from the
212 output's domain.attributes.
213 """
214 if domain.class_var is class_var:
215 return domain
216 elif class_var in domain.attributes:
217 attrs = [var for var in domain.attributes
218 if var is not class_var]
219 else:
220 attrs = domain.attributes
221 return Orange.data.Domain(attrs, class_var)
222
223
224 class IsDefined(Transformation):
225 def transform(self, c):
226 return ~numpy.isnan(c)
227
228
229 class Lookup(Lookup):
230 def __init__(self, variable, lookup_table, unknown=None):
231 super().__init__(variable, lookup_table)
232 self.unknown = unknown
233
234 def transform(self, column):
235 if self.unknown is None:
236 unknown = numpy.nan
237 else:
238 unknown = self.unknown
239
240 mask = numpy.isnan(column)
241 column_valid = numpy.where(mask, 0, column)
242 values = self.lookup_table[numpy.array(column_valid, dtype=int)]
243 return numpy.where(mask, unknown, values)
244
245
246 class AsValue(BaseImputeMethod):
247 name = "As a distinct value"
248 short_name = "new value"
249 description = ""
250
251 def __call__(self, data, variable):
252 variable = data.domain[variable]
253 if variable.is_discrete:
254 fmt = "{var.name}"
255 value = "N/A"
256 var = Orange.data.DiscreteVariable(
257 fmt.format(var=variable),
258 values=variable.values + [value],
259 base_value=variable.base_value,
260 compute_value=Lookup(
261 variable,
262 numpy.arange(len(variable.values), dtype=int),
263 unknown=len(variable.values))
264 )
265 return var
266
267 elif variable.is_continuous:
268 fmt = "{var.name}_def"
269 indicator_var = Orange.data.DiscreteVariable(
270 fmt.format(var=variable),
271 values=("undef", "def"),
272 compute_value=IsDefined(variable))
273 stats = basic_stats.BasicStats(data, variable)
274 return (variable.copy(compute_value=ReplaceUnknowns(variable,
275 stats.mean)),
276 indicator_var)
277 else:
278 raise TypeError(type(variable))
279
280
281 class ReplaceUnknownsRandom(Transformation):
282 """
283 A column transformation replacing unknowns with values drawn randomly from
284 an empirical distribution.
285
286 Parameters
287 ----------
288 variable : Orange.data.Variable
289 The target variable for imputation.
290 distribution : Orange.statistics.distribution.Distribution
291 The corresponding sampling distribution
292 """
293 def __init__(self, variable, distribution):
294 assert distribution.size > 0
295 assert distribution.variable == variable
296 super().__init__(variable)
297 self.distribution = distribution
298
299 if variable.is_discrete:
300 counts = numpy.array(distribution)
301 elif variable.is_continuous:
302 counts = numpy.array(distribution)[1, :]
303 else:
304 raise TypeError("Only discrete and continuous "
305 "variables are supported")
306 csum = numpy.sum(counts)
307 if csum > 0:
308 self.sample_prob = counts / csum
309 else:
310 self.sample_prob = numpy.ones_like(counts) / len(counts)
311
312 def transform(self, c):
313 c = numpy.array(c, copy=True)
314 nanindices = numpy.flatnonzero(numpy.isnan(c))
315
316 if self.variable.is_discrete:
317 sample = numpy.random.choice(
318 len(self.variable.values), size=len(nanindices),
319 replace=True, p=self.sample_prob)
320 else:
321 sample = numpy.random.choice(
322 numpy.asarray(self.distribution)[0, :], size=len(nanindices),
323 replace=True, p=self.sample_prob)
324
325 c[nanindices] = sample
326 return c
327
328
329 class Random(BaseImputeMethod):
330 name = "Random values"
331 short_name = "random"
332 description = "Replace with a random value"
333
334 def __call__(self, data, variable):
335 variable = data.domain[variable]
336 dist = distribution.get_distribution(data, variable)
337 # A distribution is invalid if a continuous variable's column does not
338 # contain any known values or if a discrete variable's .values == []
339 isinvalid = dist.size == 0
340 if isinvalid and variable.is_discrete:
341 assert len(variable.values) == 0
342 raise ValueError("'{}' has no values".format(variable))
343 elif isinvalid and variable.is_continuous:
344 raise ValueError("'{}' has an unknown distribution"
345 .format(variable))
346
347 if variable.is_discrete and numpy.sum(dist) == 0:
348 dist += 1 / len(dist)
349 elif variable.is_continuous and numpy.sum(dist[1, :]) == 0:
350 dist[1, :] += 1 / dist.shape[1]
351 return variable.copy(
352 compute_value=ReplaceUnknownsRandom(variable, dist))
353
[end of Orange/preprocess/impute.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Orange/preprocess/impute.py b/Orange/preprocess/impute.py
--- a/Orange/preprocess/impute.py
+++ b/Orange/preprocess/impute.py
@@ -4,7 +4,7 @@
import Orange.data
from Orange.statistics import distribution, basic_stats
from Orange.util import Reprable
-from .transformation import Transformation, Lookup
+from .transformation import Transformation, Lookup as BaseLookup
__all__ = ["ReplaceUnknowns", "Average", "DoNotImpute", "DropInstances",
"Model", "AsValue", "Random", "Default"]
@@ -26,7 +26,8 @@
self.value = value
def transform(self, c):
- if issparse(c): # sparse does not have unknown values
+ if issparse(c):
+ c.data = numpy.where(numpy.isnan(c.data), self.value, c.data)
return c
else:
return numpy.where(numpy.isnan(c), self.value, c)
@@ -226,7 +227,7 @@
return ~numpy.isnan(c)
-class Lookup(Lookup):
+class Lookup(BaseLookup):
def __init__(self, variable, lookup_table, unknown=None):
super().__init__(variable, lookup_table)
self.unknown = unknown
| {"golden_diff": "diff --git a/Orange/preprocess/impute.py b/Orange/preprocess/impute.py\n--- a/Orange/preprocess/impute.py\n+++ b/Orange/preprocess/impute.py\n@@ -4,7 +4,7 @@\n import Orange.data\n from Orange.statistics import distribution, basic_stats\n from Orange.util import Reprable\n-from .transformation import Transformation, Lookup\n+from .transformation import Transformation, Lookup as BaseLookup\n \n __all__ = [\"ReplaceUnknowns\", \"Average\", \"DoNotImpute\", \"DropInstances\",\n \"Model\", \"AsValue\", \"Random\", \"Default\"]\n@@ -26,7 +26,8 @@\n self.value = value\n \n def transform(self, c):\n- if issparse(c): # sparse does not have unknown values\n+ if issparse(c):\n+ c.data = numpy.where(numpy.isnan(c.data), self.value, c.data)\n return c\n else:\n return numpy.where(numpy.isnan(c), self.value, c)\n@@ -226,7 +227,7 @@\n return ~numpy.isnan(c)\n \n \n-class Lookup(Lookup):\n+class Lookup(BaseLookup):\n def __init__(self, variable, lookup_table, unknown=None):\n super().__init__(variable, lookup_table)\n self.unknown = unknown\n", "issue": "Remove Nans from Sparse Data\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.5.dev\r\n\r\n##### Expected behavior\r\n\r\nLogistic Regression works on sparse data that contains nan values.\r\n\r\n##### Actual behavior\r\nIt crashes with `Input contains Nans, ...`\r\n\r\n\r\n##### Steps to reproduce the behavior\r\nLoad `Election-2016-Tweets.tab` in Corpus. Use just first 100 rows for speed. Pass corpus through bag of words into test & score and use 10-fold cross validation. Add logistic regression on input and test & score crashes.\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy\nfrom scipy.sparse import issparse\n\nimport Orange.data\nfrom Orange.statistics import distribution, basic_stats\nfrom Orange.util import Reprable\nfrom .transformation import Transformation, Lookup\n\n__all__ = [\"ReplaceUnknowns\", \"Average\", \"DoNotImpute\", \"DropInstances\",\n \"Model\", \"AsValue\", \"Random\", \"Default\"]\n\n\nclass ReplaceUnknowns(Transformation):\n \"\"\"\n A column transformation which replaces unknown values with a fixed `value`.\n\n Parameters\n ----------\n variable : Orange.data.Variable\n The target variable for imputation.\n value : int or float\n The value with which to replace the unknown values\n \"\"\"\n def __init__(self, variable, value=0):\n super().__init__(variable)\n self.value = value\n\n def transform(self, c):\n if issparse(c): # sparse does not have unknown values\n return c\n else:\n return numpy.where(numpy.isnan(c), self.value, c)\n\n\nclass BaseImputeMethod(Reprable):\n name = \"\"\n short_name = \"\"\n description = \"\"\n format = \"{var.name} -> {self.short_name}\"\n columns_only = False\n\n def __call__(self, data, variable):\n \"\"\" Imputes table along variable column.\n\n Args:\n data (Table): A table to impute.\n variable (Variable): Variable for completing missing values.\n\n Returns:\n A new Variable instance with completed missing values or\n a array mask of rows to drop out.\n \"\"\"\n raise NotImplementedError\n\n def format_variable(self, var):\n return self.format.format(var=var, self=self)\n\n def __str__(self):\n return self.name\n\n def copy(self):\n return self\n\n @classmethod\n def supports_variable(cls, variable):\n return True\n\n\nclass DoNotImpute(BaseImputeMethod):\n name = \"Don't impute\"\n short_name = \"leave\"\n description = \"\"\n\n def __call__(self, data, variable):\n return variable\n\n\nclass DropInstances(BaseImputeMethod):\n name = \"Remove instances with unknown values\"\n short_name = \"drop\"\n description = \"\"\n\n def __call__(self, data, variable):\n index = data.domain.index(variable)\n return numpy.isnan(data[:, index]).reshape(-1)\n\n\nclass Average(BaseImputeMethod):\n name = \"Average/Most frequent\"\n short_name = \"average\"\n description = \"Replace with average/mode of the column\"\n\n def __call__(self, data, variable, value=None):\n variable = data.domain[variable]\n if value is None:\n if variable.is_continuous:\n stats = basic_stats.BasicStats(data, variable)\n value = stats.mean\n elif variable.is_discrete:\n dist = distribution.get_distribution(data, variable)\n value = dist.modus()\n else:\n raise TypeError(\"Variable must be continuous or discrete\")\n\n a = variable.copy(compute_value=ReplaceUnknowns(variable, value))\n a.to_sql = ImputeSql(variable, value)\n return a\n\n\nclass ImputeSql(Reprable):\n def __init__(self, var, default):\n self.var = var\n self.default = default\n\n def __call__(self):\n return 'coalesce(%s, %s)' % (self.var.to_sql(), str(self.default))\n\n\nclass Default(BaseImputeMethod):\n name = \"Value\"\n short_name = \"value\"\n description = \"\"\n columns_only = True\n format = '{var} -> {self.default}'\n\n def __init__(self, default=0):\n self.default = default\n\n def __call__(self, data, variable, *, default=None):\n variable = data.domain[variable]\n default = default if default is not None else self.default\n return variable.copy(compute_value=ReplaceUnknowns(variable, default))\n\n def copy(self):\n return Default(self.default)\n\n\nclass ReplaceUnknownsModel(Reprable):\n \"\"\"\n Replace unknown values with predicted values using a `Orange.base.Model`\n\n Parameters\n ----------\n variable : Orange.data.Variable\n The target variable for the imputation.\n model : Orange.base.Model\n A fitted model predicting `variable`.\n \"\"\"\n def __init__(self, variable, model):\n assert model.domain.class_var == variable\n self.variable = variable\n self.model = model\n\n def __call__(self, data):\n if isinstance(data, Orange.data.Instance):\n column = numpy.array([float(data[self.variable])])\n else:\n column = numpy.array(data.get_column_view(self.variable)[0],\n copy=True)\n\n mask = numpy.isnan(column)\n if not numpy.any(mask):\n return column\n\n if isinstance(data, Orange.data.Instance):\n predicted = self.model(data)\n else:\n predicted = self.model(data[mask])\n column[mask] = predicted\n return column\n\n\nclass Model(BaseImputeMethod):\n _name = \"Model-based imputer\"\n short_name = \"model\"\n description = \"\"\n format = BaseImputeMethod.format + \" ({self.learner.name})\"\n @property\n def name(self):\n return \"{} ({})\".format(self._name, getattr(self.learner, 'name', ''))\n\n def __init__(self, learner):\n self.learner = learner\n\n def __call__(self, data, variable):\n variable = data.domain[variable]\n domain = domain_with_class_var(data.domain, variable)\n\n if self.learner.check_learner_adequacy(domain):\n data = data.transform(domain)\n model = self.learner(data)\n assert model.domain.class_var == variable\n return variable.copy(\n compute_value=ReplaceUnknownsModel(variable, model))\n else:\n raise ValueError(\"`{}` doesn't support domain type\"\n .format(self.learner.name))\n\n def copy(self):\n return Model(self.learner)\n\n def supports_variable(self, variable):\n domain = Orange.data.Domain([], class_vars=variable)\n return self.learner.check_learner_adequacy(domain)\n\n\ndef domain_with_class_var(domain, class_var):\n \"\"\"\n Return a domain with class_var as output domain.class_var.\n\n If class_var is in the input domain's attributes it is removed from the\n output's domain.attributes.\n \"\"\"\n if domain.class_var is class_var:\n return domain\n elif class_var in domain.attributes:\n attrs = [var for var in domain.attributes\n if var is not class_var]\n else:\n attrs = domain.attributes\n return Orange.data.Domain(attrs, class_var)\n\n\nclass IsDefined(Transformation):\n def transform(self, c):\n return ~numpy.isnan(c)\n\n\nclass Lookup(Lookup):\n def __init__(self, variable, lookup_table, unknown=None):\n super().__init__(variable, lookup_table)\n self.unknown = unknown\n\n def transform(self, column):\n if self.unknown is None:\n unknown = numpy.nan\n else:\n unknown = self.unknown\n\n mask = numpy.isnan(column)\n column_valid = numpy.where(mask, 0, column)\n values = self.lookup_table[numpy.array(column_valid, dtype=int)]\n return numpy.where(mask, unknown, values)\n\n\nclass AsValue(BaseImputeMethod):\n name = \"As a distinct value\"\n short_name = \"new value\"\n description = \"\"\n\n def __call__(self, data, variable):\n variable = data.domain[variable]\n if variable.is_discrete:\n fmt = \"{var.name}\"\n value = \"N/A\"\n var = Orange.data.DiscreteVariable(\n fmt.format(var=variable),\n values=variable.values + [value],\n base_value=variable.base_value,\n compute_value=Lookup(\n variable,\n numpy.arange(len(variable.values), dtype=int),\n unknown=len(variable.values))\n )\n return var\n\n elif variable.is_continuous:\n fmt = \"{var.name}_def\"\n indicator_var = Orange.data.DiscreteVariable(\n fmt.format(var=variable),\n values=(\"undef\", \"def\"),\n compute_value=IsDefined(variable))\n stats = basic_stats.BasicStats(data, variable)\n return (variable.copy(compute_value=ReplaceUnknowns(variable,\n stats.mean)),\n indicator_var)\n else:\n raise TypeError(type(variable))\n\n\nclass ReplaceUnknownsRandom(Transformation):\n \"\"\"\n A column transformation replacing unknowns with values drawn randomly from\n an empirical distribution.\n\n Parameters\n ----------\n variable : Orange.data.Variable\n The target variable for imputation.\n distribution : Orange.statistics.distribution.Distribution\n The corresponding sampling distribution\n \"\"\"\n def __init__(self, variable, distribution):\n assert distribution.size > 0\n assert distribution.variable == variable\n super().__init__(variable)\n self.distribution = distribution\n\n if variable.is_discrete:\n counts = numpy.array(distribution)\n elif variable.is_continuous:\n counts = numpy.array(distribution)[1, :]\n else:\n raise TypeError(\"Only discrete and continuous \"\n \"variables are supported\")\n csum = numpy.sum(counts)\n if csum > 0:\n self.sample_prob = counts / csum\n else:\n self.sample_prob = numpy.ones_like(counts) / len(counts)\n\n def transform(self, c):\n c = numpy.array(c, copy=True)\n nanindices = numpy.flatnonzero(numpy.isnan(c))\n\n if self.variable.is_discrete:\n sample = numpy.random.choice(\n len(self.variable.values), size=len(nanindices),\n replace=True, p=self.sample_prob)\n else:\n sample = numpy.random.choice(\n numpy.asarray(self.distribution)[0, :], size=len(nanindices),\n replace=True, p=self.sample_prob)\n\n c[nanindices] = sample\n return c\n\n\nclass Random(BaseImputeMethod):\n name = \"Random values\"\n short_name = \"random\"\n description = \"Replace with a random value\"\n\n def __call__(self, data, variable):\n variable = data.domain[variable]\n dist = distribution.get_distribution(data, variable)\n # A distribution is invalid if a continuous variable's column does not\n # contain any known values or if a discrete variable's .values == []\n isinvalid = dist.size == 0\n if isinvalid and variable.is_discrete:\n assert len(variable.values) == 0\n raise ValueError(\"'{}' has no values\".format(variable))\n elif isinvalid and variable.is_continuous:\n raise ValueError(\"'{}' has an unknown distribution\"\n .format(variable))\n\n if variable.is_discrete and numpy.sum(dist) == 0:\n dist += 1 / len(dist)\n elif variable.is_continuous and numpy.sum(dist[1, :]) == 0:\n dist[1, :] += 1 / dist.shape[1]\n return variable.copy(\n compute_value=ReplaceUnknownsRandom(variable, dist))\n", "path": "Orange/preprocess/impute.py"}]} | 4,011 | 280 |
gh_patches_debug_4661 | rasdani/github-patches | git_diff | SeldonIO__MLServer-478 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Activating custom conda env in mlserver build image
Hello,
I’ve encountered an issue when using `mlserver build ...` with `1.1.0dev` where the custom conda environment is not activated. The image builds and all packages are present in the image. However, when starting the image it crashes on `mlserver start` calling what seems to be native Python 3.8 rather than the conda installed python.
```
--> Sourcing new environment at ./envs/base/environment...
--> Calling conda-unpack...
--> Disabling user-installed packages...
Traceback (most recent call last):
File "/opt/mlserver/envs/base/environment/bin/mlserver", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py", line 76, in main
root()
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1053, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.8/site-packages/click/core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py", line 19, in wrapper
return asyncio.run(f(*args, **kwargs))
File "/usr/local/lib/python3.8/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/usr/local/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py", line 40, in start
settings, models = await load_settings(folder)
File "/usr/local/lib/python3.8/site-packages/mlserver/cli/serve.py", line 37, in load_settings
available_models = await repository.list()
File "/usr/local/lib/python3.8/site-packages/mlserver/repository.py", line 37, in list
model_settings = ModelSettings()
File "pydantic/env_settings.py", line 36, in pydantic.env_settings.BaseSettings.__init__
File "pydantic/main.py", line 406, in pydantic.main.BaseModel.__init__
pydantic.error_wrappers.ValidationError: 1 validation error for ModelSettings
implementation
ensure this value contains valid import path or valid callable: No module named 'mlserver_mlflow' (type=type_error.pyobject; error_message=No module named 'mlserver_mlflow')
```
- [x] manually removing final CMD line in Dockerfile and starting interactive container. Running `./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && mlserver start $MLSERVER_MODELS_DIR` successfully launches the service
conda.yaml file:
```
channels:
- defaults
- conda-forge
- anaconda
dependencies:
- python=3.7.10
- pip
- gcc_linux-aarch64
- gxx_linux-aarch64
- pip:
- mlflow
- mlserver==0.4.0
- mlserver-mlflow==0.4.0
name: conda
```
</issue>
<code>
[start of mlserver/cli/constants.py]
1 DockerfileName = "Dockerfile"
2 DockerfileTemplate = """
3 FROM continuumio/miniconda3:4.10.3 AS env-builder
4 SHELL ["/bin/bash", "-c"]
5
6 ARG MLSERVER_ENV_NAME="mlserver-custom-env" \\
7 MLSERVER_ENV_TARBALL="./envs/base.tar.gz"
8
9 RUN conda config --add channels conda-forge && \\
10 conda install conda-pack
11
12 # The `[]` character range will ensure that Docker doesn't complain if the
13 # files don't exist:
14 # https://stackoverflow.com/a/65138098/5015573
15 COPY \\
16 ./environment.ym[l] \\
17 ./environment.yam[l] \\
18 ./conda.ym[l] \\
19 ./conda.yam[l] \\
20 .
21 RUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\
22 for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\
23 if [[ -f $envFile ]]; then \\
24 conda env create \
25 --name $MLSERVER_ENV_NAME \\
26 --file $envFile; \\
27 conda-pack \
28 -n $MLSERVER_ENV_NAME \\
29 -o $MLSERVER_ENV_TARBALL; \\
30 fi \\
31 done; \\
32 chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)
33
34 FROM seldonio/mlserver:{version}-slim
35 SHELL ["/bin/bash", "-c"]
36
37 # Copy all potential sources for custom environments
38 COPY \\
39 --chown=1000 \\
40 --from=env-builder \\
41 /envs/base.tar.g[z] \\
42 ./envs/base.tar.gz
43 COPY \\
44 ./settings.jso[n] \\
45 ./model-settings.jso[n] \\
46 ./requirements.tx[t] \\
47 .
48
49 USER root
50 # Install dependencies system-wide, to ensure that they are available for every
51 # user
52 RUN ./hack/build-env.sh . ./envs/base && \
53 chown -R 1000:0 ./envs/base && \\
54 chmod -R 776 ./envs/base
55 USER 1000
56
57 # Copy everything else
58 COPY . .
59
60 # Override MLServer's own `CMD` to activate the embedded environment
61 # (optionally activating the hot-loaded one as well).
62 CMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\
63 mlserver start $MLSERVER_MODELS_DIR
64 """
65
66 DockerignoreName = ".dockerignore"
67 Dockerignore = """
68 # Binaries for programs and plugins
69 *.exe
70 *.exe~
71 *.dll
72 *.so
73 *.dylib
74 *.pyc
75 *.pyo
76 *.pyd
77 bin
78
79 # Mac file system
80 **/.DS_Store
81
82 # Python dev
83 __pycache__
84 .Python
85 env
86 pip-log.txt
87 pip-delete-this-directory.txt
88 .mypy_cache
89 eggs/
90 .eggs/
91 *.egg-info/
92 ./pytest_cache
93 .tox
94 build/
95 dist/
96
97 # Notebook Checkpoints
98 .ipynb_checkpoints
99
100 .coverage
101 .coverage.*
102 .cache
103 nosetests.xml
104 coverage.xml
105 *,cover
106 *.log
107 .git
108 """
109
[end of mlserver/cli/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlserver/cli/constants.py b/mlserver/cli/constants.py
--- a/mlserver/cli/constants.py
+++ b/mlserver/cli/constants.py
@@ -24,7 +24,7 @@
conda env create \
--name $MLSERVER_ENV_NAME \\
--file $envFile; \\
- conda-pack \
+ conda-pack --ignore-missing-files \
-n $MLSERVER_ENV_NAME \\
-o $MLSERVER_ENV_TARBALL; \\
fi \\
| {"golden_diff": "diff --git a/mlserver/cli/constants.py b/mlserver/cli/constants.py\n--- a/mlserver/cli/constants.py\n+++ b/mlserver/cli/constants.py\n@@ -24,7 +24,7 @@\n conda env create \\\n --name $MLSERVER_ENV_NAME \\\\\n --file $envFile; \\\\\n- conda-pack \\\n+ conda-pack --ignore-missing-files \\\n -n $MLSERVER_ENV_NAME \\\\\n -o $MLSERVER_ENV_TARBALL; \\\\\n fi \\\\\n", "issue": "Activating custom conda env in mlserver build image\nHello,\r\nI\u2019ve encountered an issue when using `mlserver build ...` with `1.1.0dev` where the custom conda environment is not activated. The image builds and all packages are present in the image. However, when starting the image it crashes on `mlserver start` calling what seems to be native Python 3.8 rather than the conda installed python.\r\n\r\n```\r\n--> Sourcing new environment at ./envs/base/environment...\r\n\r\n--> Calling conda-unpack...\r\n\r\n--> Disabling user-installed packages...\r\n\r\nTraceback (most recent call last):\r\n\r\n File \"/opt/mlserver/envs/base/environment/bin/mlserver\", line 8, in <module>\r\n\r\n sys.exit(main())\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py\", line 76, in main\r\n\r\n root()\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1128, in __call__\r\n\r\n return self.main(*args, **kwargs)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1053, in main\r\n\r\n rv = self.invoke(ctx)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1659, in invoke\r\n\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 1395, in invoke\r\n\r\n return ctx.invoke(self.callback, **ctx.params)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/click/core.py\", line 754, in invoke\r\n\r\n return __callback(*args, **kwargs)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py\", line 19, in wrapper\r\n\r\n return asyncio.run(f(*args, **kwargs))\r\n\r\n File \"/usr/local/lib/python3.8/asyncio/runners.py\", line 44, in run\r\n\r\n return loop.run_until_complete(main)\r\n\r\n File \"/usr/local/lib/python3.8/asyncio/base_events.py\", line 616, in run_until_complete\r\n\r\n return future.result()\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/main.py\", line 40, in start\r\n\r\n settings, models = await load_settings(folder)\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/cli/serve.py\", line 37, in load_settings\r\n\r\n available_models = await repository.list()\r\n\r\n File \"/usr/local/lib/python3.8/site-packages/mlserver/repository.py\", line 37, in list\r\n\r\n model_settings = ModelSettings()\r\n\r\n File \"pydantic/env_settings.py\", line 36, in pydantic.env_settings.BaseSettings.__init__\r\n\r\n File \"pydantic/main.py\", line 406, in pydantic.main.BaseModel.__init__\r\n\r\npydantic.error_wrappers.ValidationError: 1 validation error for ModelSettings\r\n\r\nimplementation\r\n\r\n ensure this value contains valid import path or valid callable: No module named 'mlserver_mlflow' (type=type_error.pyobject; error_message=No module named 'mlserver_mlflow')\r\n\r\n```\r\n\r\n- [x] manually removing final CMD line in Dockerfile and starting interactive container. Running `./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && mlserver start $MLSERVER_MODELS_DIR` successfully launches the service\r\n\r\nconda.yaml file:\r\n```\r\nchannels:\r\n- defaults\r\n- conda-forge\r\n- anaconda\r\ndependencies:\r\n- python=3.7.10\r\n- pip\r\n- gcc_linux-aarch64\r\n- gxx_linux-aarch64\r\n- pip:\r\n - mlflow\r\n - mlserver==0.4.0\r\n - mlserver-mlflow==0.4.0\r\nname: conda\r\n```\n", "before_files": [{"content": "DockerfileName = \"Dockerfile\"\nDockerfileTemplate = \"\"\"\nFROM continuumio/miniconda3:4.10.3 AS env-builder\nSHELL [\"/bin/bash\", \"-c\"]\n\nARG MLSERVER_ENV_NAME=\"mlserver-custom-env\" \\\\\n MLSERVER_ENV_TARBALL=\"./envs/base.tar.gz\"\n\nRUN conda config --add channels conda-forge && \\\\\n conda install conda-pack\n\n# The `[]` character range will ensure that Docker doesn't complain if the\n# files don't exist:\n# https://stackoverflow.com/a/65138098/5015573\nCOPY \\\\\n ./environment.ym[l] \\\\\n ./environment.yam[l] \\\\\n ./conda.ym[l] \\\\\n ./conda.yam[l] \\\\\n .\nRUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\\\\n for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\\\\n if [[ -f $envFile ]]; then \\\\\n conda env create \\\n --name $MLSERVER_ENV_NAME \\\\\n --file $envFile; \\\\\n conda-pack \\\n -n $MLSERVER_ENV_NAME \\\\\n -o $MLSERVER_ENV_TARBALL; \\\\\n fi \\\\\n done; \\\\\n chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)\n\nFROM seldonio/mlserver:{version}-slim\nSHELL [\"/bin/bash\", \"-c\"]\n\n# Copy all potential sources for custom environments\nCOPY \\\\\n --chown=1000 \\\\\n --from=env-builder \\\\\n /envs/base.tar.g[z] \\\\\n ./envs/base.tar.gz\nCOPY \\\\\n ./settings.jso[n] \\\\\n ./model-settings.jso[n] \\\\\n ./requirements.tx[t] \\\\\n .\n\nUSER root\n# Install dependencies system-wide, to ensure that they are available for every\n# user\nRUN ./hack/build-env.sh . ./envs/base && \\\n chown -R 1000:0 ./envs/base && \\\\\n chmod -R 776 ./envs/base\nUSER 1000\n\n# Copy everything else\nCOPY . .\n\n# Override MLServer's own `CMD` to activate the embedded environment\n# (optionally activating the hot-loaded one as well).\nCMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\\\\n mlserver start $MLSERVER_MODELS_DIR\n\"\"\"\n\nDockerignoreName = \".dockerignore\"\nDockerignore = \"\"\"\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n*.pyc\n*.pyo\n*.pyd\nbin\n\n# Mac file system\n**/.DS_Store\n\n# Python dev\n__pycache__\n.Python\nenv\npip-log.txt\npip-delete-this-directory.txt\n.mypy_cache\neggs/\n.eggs/\n*.egg-info/\n./pytest_cache\n.tox\nbuild/\ndist/\n\n# Notebook Checkpoints\n.ipynb_checkpoints\n\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n*.log\n.git\n\"\"\"\n", "path": "mlserver/cli/constants.py"}]} | 2,276 | 108 |
gh_patches_debug_32002 | rasdani/github-patches | git_diff | Mailu__Mailu-2069 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Setup utility] Cannot generate files when database flavors have been switched
## Before you open your issue
- [x] Check if no issue or pull-request for this already exists.
- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [x] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [x] docker swarm
### Versions
All versions
## Description
In the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected.
When you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.
As a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite.
The problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html .
I didn't check yet how to resolve this.
## Replication Steps
1) Go to https://setup.mailu.io/master/ and click next.
2). Enter a value for main domain server and public hostname.
3). Select roundcube as webmail.
4). At the bottom switch to postgresql or mysql as database
5). Switch back to SQLite as database.
6). Click Setup mailu. Note that the button does not work.
## Expected behaviour
The Setup Mailu button works after following about steps.
[Setup utility] Cannot generate files when database flavors have been switched
## Before you open your issue
- [x] Check if no issue or pull-request for this already exists.
- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)
- [x] You understand `Mailu` is made by volunteers in their **free time** — be conscise, civil and accept that delays can occur.
- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.
## Environment & Versions
### Environment
- [x] docker-compose
- [ ] kubernetes
- [x] docker swarm
### Versions
All versions
## Description
In the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected.
When you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.
As a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite.
The problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html .
I didn't check yet how to resolve this.
## Replication Steps
1) Go to https://setup.mailu.io/master/ and click next.
2). Enter a value for main domain server and public hostname.
3). Select roundcube as webmail.
4). At the bottom switch to postgresql or mysql as database
5). Switch back to SQLite as database.
6). Click Setup mailu. Note that the button does not work.
## Expected behaviour
The Setup Mailu button works after following about steps.
</issue>
<code>
[start of optional/postgresql/start.py]
1 #!/usr/bin/python3
2
3 import anosql
4 import psycopg2
5 import glob
6 import os
7 import subprocess
8 from socrate import conf
9
10 def setup():
11 conn = psycopg2.connect(user='postgres')
12 queries = anosql.load_queries('postgres', '/conf/queries.sql')
13 # Mailu user
14 queries.create_mailu_user(conn)
15 queries.update_pw(conn, pw=os.environ.get("DB_PW"))
16 # Healthcheck user
17 queries.create_health_user(conn)
18 queries.grant_health(conn)
19 conn.commit()
20 # create db cannot be atomic. But this script is the only active connection, this is kinda safe.
21 if not queries.check_db(conn):
22 conn.set_isolation_level(0)
23 queries.create_db(conn)
24 conn.set_isolation_level(1)
25 conn.close()
26
27 # Check if /data is empty
28 if not os.listdir("/data"):
29 os.system("chown -R postgres:postgres /data")
30 os.system("chmod 0700 /data")
31 base_backups=sorted(glob.glob("/backup/base-*"))
32 if base_backups:
33 # Restore the latest backup
34 subprocess.call(["tar", "--same-owner", "-zpxf", base_backups[-1] + "/base.tar.gz" , "-C", "/data"])
35 if os.listdir("/backup/wal_archive"):
36 with open("/data/recovery.conf", "w") as rec:
37 rec.write("restore_command = 'gunzip < /backup/wal_archive/%f > %p'\n")
38 rec.write("standby_mode = off\n")
39 os.system("chown postgres:postgres /data/recovery.conf")
40 else:
41 # Bootstrap the database
42 os.system("sudo -u postgres initdb -D /data")
43
44 # Create backup directory structure, if it does not yet exist
45 os.system("mkdir -p /backup/wal_archive")
46 os.system("chown -R postgres:postgres /backup")
47
48 # Render config files
49 for pg_file in glob.glob("/conf/*.conf"):
50 conf.jinja(pg_file, os.environ, os.path.join("/data", os.path.basename(pg_file)))
51
52 # (Re)start postgresql locally for DB and user creation
53 os.system("sudo -u postgres pg_ctl start -D /data -o '-h \"''\" '")
54 while os.path.isfile("recovery.conf"):
55 pass
56 os.system("sudo -u postgres pg_ctl -D /data promote")
57 setup()
58 os.system("sudo -u postgres pg_ctl stop -m smart -w -D /data")
59
60 out=open("/proc/1/fd/1", "w")
61 err=open("/proc/1/fd/2", "w")
62 # Run the cron deamon
63 subprocess.Popen(["crond", "-f"], stdout=out, stderr=err)
64 # Run postgresql service
65 os.system("sudo -u postgres postgres -D /data -h \*")
66
[end of optional/postgresql/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optional/postgresql/start.py b/optional/postgresql/start.py
deleted file mode 100755
--- a/optional/postgresql/start.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/python3
-
-import anosql
-import psycopg2
-import glob
-import os
-import subprocess
-from socrate import conf
-
-def setup():
- conn = psycopg2.connect(user='postgres')
- queries = anosql.load_queries('postgres', '/conf/queries.sql')
- # Mailu user
- queries.create_mailu_user(conn)
- queries.update_pw(conn, pw=os.environ.get("DB_PW"))
- # Healthcheck user
- queries.create_health_user(conn)
- queries.grant_health(conn)
- conn.commit()
- # create db cannot be atomic. But this script is the only active connection, this is kinda safe.
- if not queries.check_db(conn):
- conn.set_isolation_level(0)
- queries.create_db(conn)
- conn.set_isolation_level(1)
- conn.close()
-
-# Check if /data is empty
-if not os.listdir("/data"):
- os.system("chown -R postgres:postgres /data")
- os.system("chmod 0700 /data")
- base_backups=sorted(glob.glob("/backup/base-*"))
- if base_backups:
- # Restore the latest backup
- subprocess.call(["tar", "--same-owner", "-zpxf", base_backups[-1] + "/base.tar.gz" , "-C", "/data"])
- if os.listdir("/backup/wal_archive"):
- with open("/data/recovery.conf", "w") as rec:
- rec.write("restore_command = 'gunzip < /backup/wal_archive/%f > %p'\n")
- rec.write("standby_mode = off\n")
- os.system("chown postgres:postgres /data/recovery.conf")
- else:
- # Bootstrap the database
- os.system("sudo -u postgres initdb -D /data")
-
-# Create backup directory structure, if it does not yet exist
-os.system("mkdir -p /backup/wal_archive")
-os.system("chown -R postgres:postgres /backup")
-
-# Render config files
-for pg_file in glob.glob("/conf/*.conf"):
- conf.jinja(pg_file, os.environ, os.path.join("/data", os.path.basename(pg_file)))
-
-# (Re)start postgresql locally for DB and user creation
-os.system("sudo -u postgres pg_ctl start -D /data -o '-h \"''\" '")
-while os.path.isfile("recovery.conf"):
- pass
-os.system("sudo -u postgres pg_ctl -D /data promote")
-setup()
-os.system("sudo -u postgres pg_ctl stop -m smart -w -D /data")
-
-out=open("/proc/1/fd/1", "w")
-err=open("/proc/1/fd/2", "w")
-# Run the cron deamon
-subprocess.Popen(["crond", "-f"], stdout=out, stderr=err)
-# Run postgresql service
-os.system("sudo -u postgres postgres -D /data -h \*")
| {"golden_diff": "diff --git a/optional/postgresql/start.py b/optional/postgresql/start.py\ndeleted file mode 100755\n--- a/optional/postgresql/start.py\n+++ /dev/null\n@@ -1,65 +0,0 @@\n-#!/usr/bin/python3\n-\n-import anosql\n-import psycopg2\n-import glob\n-import os\n-import subprocess\n-from socrate import conf\n-\n-def setup():\n- conn = psycopg2.connect(user='postgres')\n- queries = anosql.load_queries('postgres', '/conf/queries.sql')\n- # Mailu user\n- queries.create_mailu_user(conn)\n- queries.update_pw(conn, pw=os.environ.get(\"DB_PW\"))\n- # Healthcheck user\n- queries.create_health_user(conn)\n- queries.grant_health(conn)\n- conn.commit()\n- # create db cannot be atomic. But this script is the only active connection, this is kinda safe.\n- if not queries.check_db(conn):\n- conn.set_isolation_level(0)\n- queries.create_db(conn)\n- conn.set_isolation_level(1)\n- conn.close()\n-\n-# Check if /data is empty\n-if not os.listdir(\"/data\"):\n- os.system(\"chown -R postgres:postgres /data\")\n- os.system(\"chmod 0700 /data\")\n- base_backups=sorted(glob.glob(\"/backup/base-*\"))\n- if base_backups:\n- # Restore the latest backup\n- subprocess.call([\"tar\", \"--same-owner\", \"-zpxf\", base_backups[-1] + \"/base.tar.gz\" , \"-C\", \"/data\"])\n- if os.listdir(\"/backup/wal_archive\"):\n- with open(\"/data/recovery.conf\", \"w\") as rec:\n- rec.write(\"restore_command = 'gunzip < /backup/wal_archive/%f > %p'\\n\")\n- rec.write(\"standby_mode = off\\n\")\n- os.system(\"chown postgres:postgres /data/recovery.conf\")\n- else:\n- # Bootstrap the database\n- os.system(\"sudo -u postgres initdb -D /data\")\n-\n-# Create backup directory structure, if it does not yet exist\n-os.system(\"mkdir -p /backup/wal_archive\")\n-os.system(\"chown -R postgres:postgres /backup\")\n-\n-# Render config files\n-for pg_file in glob.glob(\"/conf/*.conf\"):\n- conf.jinja(pg_file, os.environ, os.path.join(\"/data\", os.path.basename(pg_file)))\n-\n-# (Re)start postgresql locally for DB and user creation\n-os.system(\"sudo -u postgres pg_ctl start -D /data -o '-h \\\"''\\\" '\")\n-while os.path.isfile(\"recovery.conf\"):\n- pass\n-os.system(\"sudo -u postgres pg_ctl -D /data promote\")\n-setup()\n-os.system(\"sudo -u postgres pg_ctl stop -m smart -w -D /data\")\n-\n-out=open(\"/proc/1/fd/1\", \"w\")\n-err=open(\"/proc/1/fd/2\", \"w\")\n-# Run the cron deamon\n-subprocess.Popen([\"crond\", \"-f\"], stdout=out, stderr=err)\n-# Run postgresql service\n-os.system(\"sudo -u postgres postgres -D /data -h \\*\")\n", "issue": "[Setup utility] Cannot generate files when database flavors have been switched\n## Before you open your issue\r\n- [x] Check if no issue or pull-request for this already exists.\r\n- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [x] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [x] docker-compose\r\n - [ ] kubernetes\r\n - [x] docker swarm\r\n\r\n### Versions\r\nAll versions\r\n\r\n## Description\r\nIn the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected. \r\nWhen you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.\r\n\r\nAs a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite. \r\n\r\nThe problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html . \r\nI didn't check yet how to resolve this.\r\n\r\n## Replication Steps\r\n1) Go to https://setup.mailu.io/master/ and click next.\r\n2). Enter a value for main domain server and public hostname.\r\n3). Select roundcube as webmail.\r\n4). At the bottom switch to postgresql or mysql as database\r\n5). Switch back to SQLite as database.\r\n6). Click Setup mailu. Note that the button does not work.\r\n\r\n## Expected behaviour\r\nThe Setup Mailu button works after following about steps.\n[Setup utility] Cannot generate files when database flavors have been switched\n## Before you open your issue\r\n- [x] Check if no issue or pull-request for this already exists.\r\n- [x] Check [documentation](https://mailu.io/master/) and [FAQ](https://mailu.io/master/faq.html). (Tip, use the search function on the documentation page)\r\n- [x] You understand `Mailu` is made by volunteers in their **free time** \u2014 be conscise, civil and accept that delays can occur.\r\n- [x] The title of the issue should be short and simple. It should contain specific terms related to the actual issue. Be specific while writing the title.\r\n\r\n## Environment & Versions\r\n### Environment\r\n - [x] docker-compose\r\n - [ ] kubernetes\r\n - [x] docker swarm\r\n\r\n### Versions\r\nAll versions\r\n\r\n## Description\r\nIn the setup utility when you select a different database flavor and then switch back to SQLite, then you can not generate the files. The reason is that it still expects the fields to be filled of the alternative database flavor you previously selected. \r\nWhen you select an alternative database flavor, the fields (e.g. hostname, database name) are mandatory. These fields are still mandatory when you switch back to SQLlite as database flavor.\r\n\r\nAs a workaround you can fill in mandatory fields and then switch back to SQLite again. Or you could refresh the page and do not switch from SQLite. \r\n\r\nThe problem is in https://github.com/Mailu/Mailu/blob/master/setup/templates/steps/database.html . \r\nI didn't check yet how to resolve this.\r\n\r\n## Replication Steps\r\n1) Go to https://setup.mailu.io/master/ and click next.\r\n2). Enter a value for main domain server and public hostname.\r\n3). Select roundcube as webmail.\r\n4). At the bottom switch to postgresql or mysql as database\r\n5). Switch back to SQLite as database.\r\n6). Click Setup mailu. Note that the button does not work.\r\n\r\n## Expected behaviour\r\nThe Setup Mailu button works after following about steps.\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport anosql\nimport psycopg2\nimport glob\nimport os\nimport subprocess\nfrom socrate import conf\n\ndef setup():\n conn = psycopg2.connect(user='postgres')\n queries = anosql.load_queries('postgres', '/conf/queries.sql')\n # Mailu user\n queries.create_mailu_user(conn)\n queries.update_pw(conn, pw=os.environ.get(\"DB_PW\"))\n # Healthcheck user\n queries.create_health_user(conn)\n queries.grant_health(conn)\n conn.commit()\n # create db cannot be atomic. But this script is the only active connection, this is kinda safe.\n if not queries.check_db(conn):\n conn.set_isolation_level(0)\n queries.create_db(conn)\n conn.set_isolation_level(1)\n conn.close()\n\n# Check if /data is empty\nif not os.listdir(\"/data\"):\n os.system(\"chown -R postgres:postgres /data\")\n os.system(\"chmod 0700 /data\")\n base_backups=sorted(glob.glob(\"/backup/base-*\"))\n if base_backups:\n # Restore the latest backup\n subprocess.call([\"tar\", \"--same-owner\", \"-zpxf\", base_backups[-1] + \"/base.tar.gz\" , \"-C\", \"/data\"])\n if os.listdir(\"/backup/wal_archive\"):\n with open(\"/data/recovery.conf\", \"w\") as rec:\n rec.write(\"restore_command = 'gunzip < /backup/wal_archive/%f > %p'\\n\")\n rec.write(\"standby_mode = off\\n\")\n os.system(\"chown postgres:postgres /data/recovery.conf\")\n else:\n # Bootstrap the database\n os.system(\"sudo -u postgres initdb -D /data\")\n\n# Create backup directory structure, if it does not yet exist\nos.system(\"mkdir -p /backup/wal_archive\")\nos.system(\"chown -R postgres:postgres /backup\")\n\n# Render config files\nfor pg_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(pg_file, os.environ, os.path.join(\"/data\", os.path.basename(pg_file)))\n\n# (Re)start postgresql locally for DB and user creation\nos.system(\"sudo -u postgres pg_ctl start -D /data -o '-h \\\"''\\\" '\")\nwhile os.path.isfile(\"recovery.conf\"):\n pass\nos.system(\"sudo -u postgres pg_ctl -D /data promote\")\nsetup()\nos.system(\"sudo -u postgres pg_ctl stop -m smart -w -D /data\")\n\nout=open(\"/proc/1/fd/1\", \"w\")\nerr=open(\"/proc/1/fd/2\", \"w\")\n# Run the cron deamon\nsubprocess.Popen([\"crond\", \"-f\"], stdout=out, stderr=err)\n# Run postgresql service\nos.system(\"sudo -u postgres postgres -D /data -h \\*\")\n", "path": "optional/postgresql/start.py"}]} | 2,130 | 710 |
gh_patches_debug_11268 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-1022 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django middleware not properly supported if left empty
In Django 1.10 and above, `dd-trace-py` will attempt to check first if the user is using the `MIDDLEWARE` Django setting and will fallback to `MIDDLEWARE_CLASSES` if unset. In my application, I deliberately declare an empty list for the `MIDDLEWARE` setting to tell Django that I am not personally putting any middleware into the application. Unfortunately, `dd-trace-py` simply checks if the `settings.MIDDLEWARE` attribute is falsy and since this is true it then falls back to `MIDDLEWARE_CLASSES`.
If an empty list/tuple is declared in Django settings for `MIDDLEWARE`, it should still be properly injected.
</issue>
<code>
[start of ddtrace/contrib/django/middleware.py]
1 # project
2 from .conf import settings
3 from .compat import user_is_authenticated, get_resolver
4 from .utils import get_request_uri
5
6 from ...constants import ANALYTICS_SAMPLE_RATE_KEY
7 from ...contrib import func_name
8 from ...ext import http
9 from ...internal.logger import get_logger
10 from ...propagation.http import HTTPPropagator
11 from ...settings import config
12
13 # 3p
14 from django.core.exceptions import MiddlewareNotUsed
15 from django.conf import settings as django_settings
16 import django
17
18 try:
19 from django.utils.deprecation import MiddlewareMixin
20 MiddlewareClass = MiddlewareMixin
21 except ImportError:
22 MiddlewareClass = object
23
24 log = get_logger(__name__)
25
26 EXCEPTION_MIDDLEWARE = 'ddtrace.contrib.django.TraceExceptionMiddleware'
27 TRACE_MIDDLEWARE = 'ddtrace.contrib.django.TraceMiddleware'
28 MIDDLEWARE = 'MIDDLEWARE'
29 MIDDLEWARE_CLASSES = 'MIDDLEWARE_CLASSES'
30
31 # Default views list available from:
32 # https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/views/defaults.py
33 # DEV: Django doesn't call `process_view` when falling back to one of these internal error handling views
34 # DEV: We only use these names when `span.resource == 'unknown'` and we have one of these status codes
35 _django_default_views = {
36 400: 'django.views.defaults.bad_request',
37 403: 'django.views.defaults.permission_denied',
38 404: 'django.views.defaults.page_not_found',
39 500: 'django.views.defaults.server_error',
40 }
41
42
43 def _analytics_enabled():
44 return (
45 (config.analytics_enabled and settings.ANALYTICS_ENABLED is not False)
46 or settings.ANALYTICS_ENABLED is True
47 ) and settings.ANALYTICS_SAMPLE_RATE is not None
48
49
50 def get_middleware_insertion_point():
51 """Returns the attribute name and collection object for the Django middleware.
52 If middleware cannot be found, returns None for the middleware collection."""
53 middleware = getattr(django_settings, MIDDLEWARE, None)
54 # Prioritise MIDDLEWARE over ..._CLASSES, but only in 1.10 and later.
55 if middleware and django.VERSION >= (1, 10):
56 return MIDDLEWARE, middleware
57 return MIDDLEWARE_CLASSES, getattr(django_settings, MIDDLEWARE_CLASSES, None)
58
59
60 def insert_trace_middleware():
61 middleware_attribute, middleware = get_middleware_insertion_point()
62 if middleware is not None and TRACE_MIDDLEWARE not in set(middleware):
63 setattr(django_settings, middleware_attribute, type(middleware)((TRACE_MIDDLEWARE,)) + middleware)
64
65
66 def remove_trace_middleware():
67 _, middleware = get_middleware_insertion_point()
68 if middleware and TRACE_MIDDLEWARE in set(middleware):
69 middleware.remove(TRACE_MIDDLEWARE)
70
71
72 def insert_exception_middleware():
73 middleware_attribute, middleware = get_middleware_insertion_point()
74 if middleware is not None and EXCEPTION_MIDDLEWARE not in set(middleware):
75 setattr(django_settings, middleware_attribute, middleware + type(middleware)((EXCEPTION_MIDDLEWARE,)))
76
77
78 def remove_exception_middleware():
79 _, middleware = get_middleware_insertion_point()
80 if middleware and EXCEPTION_MIDDLEWARE in set(middleware):
81 middleware.remove(EXCEPTION_MIDDLEWARE)
82
83
84 class InstrumentationMixin(MiddlewareClass):
85 """
86 Useful mixin base class for tracing middlewares
87 """
88 def __init__(self, get_response=None):
89 # disable the middleware if the tracer is not enabled
90 # or if the auto instrumentation is disabled
91 self.get_response = get_response
92 if not settings.AUTO_INSTRUMENT:
93 raise MiddlewareNotUsed
94
95
96 class TraceExceptionMiddleware(InstrumentationMixin):
97 """
98 Middleware that traces exceptions raised
99 """
100 def process_exception(self, request, exception):
101 try:
102 span = _get_req_span(request)
103 if span:
104 span.set_tag(http.STATUS_CODE, '500')
105 span.set_traceback() # will set the exception info
106 except Exception:
107 log.debug('error processing exception', exc_info=True)
108
109
110 class TraceMiddleware(InstrumentationMixin):
111 """
112 Middleware that traces Django requests
113 """
114 def process_request(self, request):
115 tracer = settings.TRACER
116 if settings.DISTRIBUTED_TRACING:
117 propagator = HTTPPropagator()
118 context = propagator.extract(request.META)
119 # Only need to active the new context if something was propagated
120 if context.trace_id:
121 tracer.context_provider.activate(context)
122 try:
123 span = tracer.trace(
124 'django.request',
125 service=settings.DEFAULT_SERVICE,
126 resource='unknown', # will be filled by process view
127 span_type=http.TYPE,
128 )
129
130 # set analytics sample rate
131 # DEV: django is special case maintains separate configuration from config api
132 if _analytics_enabled():
133 span.set_tag(
134 ANALYTICS_SAMPLE_RATE_KEY,
135 settings.ANALYTICS_SAMPLE_RATE,
136 )
137
138 # Set HTTP Request tags
139 span.set_tag(http.METHOD, request.method)
140 span.set_tag(http.URL, get_request_uri(request))
141 _set_req_span(request, span)
142 except Exception as e:
143 log.debug('error tracing request: %s', e)
144
145 def process_view(self, request, view_func, *args, **kwargs):
146 span = _get_req_span(request)
147 if span:
148 span.resource = func_name(view_func)
149
150 def process_response(self, request, response):
151 try:
152 span = _get_req_span(request)
153 if span:
154 if response.status_code < 500 and span.error:
155 # remove any existing stack trace since it must have been
156 # handled appropriately
157 span._remove_exc_info()
158
159 # If `process_view` was not called, try to determine the correct `span.resource` to set
160 # DEV: `process_view` won't get called if a middle `process_request` returns an HttpResponse
161 # DEV: `process_view` won't get called when internal error handlers are used (e.g. for 404 responses)
162 if span.resource == 'unknown':
163 try:
164 # Attempt to lookup the view function from the url resolver
165 # https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/core/handlers/base.py#L104-L113 # noqa
166 urlconf = None
167 if hasattr(request, 'urlconf'):
168 urlconf = request.urlconf
169 resolver = get_resolver(urlconf)
170
171 # Try to resolve the Django view for handling this request
172 if getattr(request, 'request_match', None):
173 request_match = request.request_match
174 else:
175 # This may raise a `django.urls.exceptions.Resolver404` exception
176 request_match = resolver.resolve(request.path_info)
177 span.resource = func_name(request_match.func)
178 except Exception:
179 log.debug('error determining request view function', exc_info=True)
180
181 # If the view could not be found, try to set from a static list of
182 # known internal error handler views
183 span.resource = _django_default_views.get(response.status_code, 'unknown')
184
185 span.set_tag(http.STATUS_CODE, response.status_code)
186 span = _set_auth_tags(span, request)
187 span.finish()
188 except Exception as e:
189 log.debug('error tracing request: %s', e)
190 finally:
191 return response
192
193
194 def _get_req_span(request):
195 """ Return the datadog span from the given request. """
196 return getattr(request, '_datadog_request_span', None)
197
198
199 def _set_req_span(request, span):
200 """ Set the datadog span on the given request. """
201 return setattr(request, '_datadog_request_span', span)
202
203
204 def _set_auth_tags(span, request):
205 """ Patch any available auth tags from the request onto the span. """
206 user = getattr(request, 'user', None)
207 if not user:
208 return span
209
210 if hasattr(user, 'is_authenticated'):
211 span.set_tag('django.user.is_authenticated', user_is_authenticated(user))
212
213 uid = getattr(user, 'pk', None)
214 if uid:
215 span.set_tag('django.user.id', uid)
216
217 uname = getattr(user, 'username', None)
218 if uname:
219 span.set_tag('django.user.name', uname)
220
221 return span
222
[end of ddtrace/contrib/django/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/django/middleware.py b/ddtrace/contrib/django/middleware.py
--- a/ddtrace/contrib/django/middleware.py
+++ b/ddtrace/contrib/django/middleware.py
@@ -52,7 +52,7 @@
If middleware cannot be found, returns None for the middleware collection."""
middleware = getattr(django_settings, MIDDLEWARE, None)
# Prioritise MIDDLEWARE over ..._CLASSES, but only in 1.10 and later.
- if middleware and django.VERSION >= (1, 10):
+ if middleware is not None and django.VERSION >= (1, 10):
return MIDDLEWARE, middleware
return MIDDLEWARE_CLASSES, getattr(django_settings, MIDDLEWARE_CLASSES, None)
| {"golden_diff": "diff --git a/ddtrace/contrib/django/middleware.py b/ddtrace/contrib/django/middleware.py\n--- a/ddtrace/contrib/django/middleware.py\n+++ b/ddtrace/contrib/django/middleware.py\n@@ -52,7 +52,7 @@\n If middleware cannot be found, returns None for the middleware collection.\"\"\"\n middleware = getattr(django_settings, MIDDLEWARE, None)\n # Prioritise MIDDLEWARE over ..._CLASSES, but only in 1.10 and later.\n- if middleware and django.VERSION >= (1, 10):\n+ if middleware is not None and django.VERSION >= (1, 10):\n return MIDDLEWARE, middleware\n return MIDDLEWARE_CLASSES, getattr(django_settings, MIDDLEWARE_CLASSES, None)\n", "issue": "Django middleware not properly supported if left empty\nIn Django 1.10 and above, `dd-trace-py` will attempt to check first if the user is using the `MIDDLEWARE` Django setting and will fallback to `MIDDLEWARE_CLASSES` if unset. In my application, I deliberately declare an empty list for the `MIDDLEWARE` setting to tell Django that I am not personally putting any middleware into the application. Unfortunately, `dd-trace-py` simply checks if the `settings.MIDDLEWARE` attribute is falsy and since this is true it then falls back to `MIDDLEWARE_CLASSES`.\r\n\r\nIf an empty list/tuple is declared in Django settings for `MIDDLEWARE`, it should still be properly injected.\n", "before_files": [{"content": "# project\nfrom .conf import settings\nfrom .compat import user_is_authenticated, get_resolver\nfrom .utils import get_request_uri\n\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...contrib import func_name\nfrom ...ext import http\nfrom ...internal.logger import get_logger\nfrom ...propagation.http import HTTPPropagator\nfrom ...settings import config\n\n# 3p\nfrom django.core.exceptions import MiddlewareNotUsed\nfrom django.conf import settings as django_settings\nimport django\n\ntry:\n from django.utils.deprecation import MiddlewareMixin\n MiddlewareClass = MiddlewareMixin\nexcept ImportError:\n MiddlewareClass = object\n\nlog = get_logger(__name__)\n\nEXCEPTION_MIDDLEWARE = 'ddtrace.contrib.django.TraceExceptionMiddleware'\nTRACE_MIDDLEWARE = 'ddtrace.contrib.django.TraceMiddleware'\nMIDDLEWARE = 'MIDDLEWARE'\nMIDDLEWARE_CLASSES = 'MIDDLEWARE_CLASSES'\n\n# Default views list available from:\n# https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/views/defaults.py\n# DEV: Django doesn't call `process_view` when falling back to one of these internal error handling views\n# DEV: We only use these names when `span.resource == 'unknown'` and we have one of these status codes\n_django_default_views = {\n 400: 'django.views.defaults.bad_request',\n 403: 'django.views.defaults.permission_denied',\n 404: 'django.views.defaults.page_not_found',\n 500: 'django.views.defaults.server_error',\n}\n\n\ndef _analytics_enabled():\n return (\n (config.analytics_enabled and settings.ANALYTICS_ENABLED is not False)\n or settings.ANALYTICS_ENABLED is True\n ) and settings.ANALYTICS_SAMPLE_RATE is not None\n\n\ndef get_middleware_insertion_point():\n \"\"\"Returns the attribute name and collection object for the Django middleware.\n If middleware cannot be found, returns None for the middleware collection.\"\"\"\n middleware = getattr(django_settings, MIDDLEWARE, None)\n # Prioritise MIDDLEWARE over ..._CLASSES, but only in 1.10 and later.\n if middleware and django.VERSION >= (1, 10):\n return MIDDLEWARE, middleware\n return MIDDLEWARE_CLASSES, getattr(django_settings, MIDDLEWARE_CLASSES, None)\n\n\ndef insert_trace_middleware():\n middleware_attribute, middleware = get_middleware_insertion_point()\n if middleware is not None and TRACE_MIDDLEWARE not in set(middleware):\n setattr(django_settings, middleware_attribute, type(middleware)((TRACE_MIDDLEWARE,)) + middleware)\n\n\ndef remove_trace_middleware():\n _, middleware = get_middleware_insertion_point()\n if middleware and TRACE_MIDDLEWARE in set(middleware):\n middleware.remove(TRACE_MIDDLEWARE)\n\n\ndef insert_exception_middleware():\n middleware_attribute, middleware = get_middleware_insertion_point()\n if middleware is not None and EXCEPTION_MIDDLEWARE not in set(middleware):\n setattr(django_settings, middleware_attribute, middleware + type(middleware)((EXCEPTION_MIDDLEWARE,)))\n\n\ndef remove_exception_middleware():\n _, middleware = get_middleware_insertion_point()\n if middleware and EXCEPTION_MIDDLEWARE in set(middleware):\n middleware.remove(EXCEPTION_MIDDLEWARE)\n\n\nclass InstrumentationMixin(MiddlewareClass):\n \"\"\"\n Useful mixin base class for tracing middlewares\n \"\"\"\n def __init__(self, get_response=None):\n # disable the middleware if the tracer is not enabled\n # or if the auto instrumentation is disabled\n self.get_response = get_response\n if not settings.AUTO_INSTRUMENT:\n raise MiddlewareNotUsed\n\n\nclass TraceExceptionMiddleware(InstrumentationMixin):\n \"\"\"\n Middleware that traces exceptions raised\n \"\"\"\n def process_exception(self, request, exception):\n try:\n span = _get_req_span(request)\n if span:\n span.set_tag(http.STATUS_CODE, '500')\n span.set_traceback() # will set the exception info\n except Exception:\n log.debug('error processing exception', exc_info=True)\n\n\nclass TraceMiddleware(InstrumentationMixin):\n \"\"\"\n Middleware that traces Django requests\n \"\"\"\n def process_request(self, request):\n tracer = settings.TRACER\n if settings.DISTRIBUTED_TRACING:\n propagator = HTTPPropagator()\n context = propagator.extract(request.META)\n # Only need to active the new context if something was propagated\n if context.trace_id:\n tracer.context_provider.activate(context)\n try:\n span = tracer.trace(\n 'django.request',\n service=settings.DEFAULT_SERVICE,\n resource='unknown', # will be filled by process view\n span_type=http.TYPE,\n )\n\n # set analytics sample rate\n # DEV: django is special case maintains separate configuration from config api\n if _analytics_enabled():\n span.set_tag(\n ANALYTICS_SAMPLE_RATE_KEY,\n settings.ANALYTICS_SAMPLE_RATE,\n )\n\n # Set HTTP Request tags\n span.set_tag(http.METHOD, request.method)\n span.set_tag(http.URL, get_request_uri(request))\n _set_req_span(request, span)\n except Exception as e:\n log.debug('error tracing request: %s', e)\n\n def process_view(self, request, view_func, *args, **kwargs):\n span = _get_req_span(request)\n if span:\n span.resource = func_name(view_func)\n\n def process_response(self, request, response):\n try:\n span = _get_req_span(request)\n if span:\n if response.status_code < 500 and span.error:\n # remove any existing stack trace since it must have been\n # handled appropriately\n span._remove_exc_info()\n\n # If `process_view` was not called, try to determine the correct `span.resource` to set\n # DEV: `process_view` won't get called if a middle `process_request` returns an HttpResponse\n # DEV: `process_view` won't get called when internal error handlers are used (e.g. for 404 responses)\n if span.resource == 'unknown':\n try:\n # Attempt to lookup the view function from the url resolver\n # https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/core/handlers/base.py#L104-L113 # noqa\n urlconf = None\n if hasattr(request, 'urlconf'):\n urlconf = request.urlconf\n resolver = get_resolver(urlconf)\n\n # Try to resolve the Django view for handling this request\n if getattr(request, 'request_match', None):\n request_match = request.request_match\n else:\n # This may raise a `django.urls.exceptions.Resolver404` exception\n request_match = resolver.resolve(request.path_info)\n span.resource = func_name(request_match.func)\n except Exception:\n log.debug('error determining request view function', exc_info=True)\n\n # If the view could not be found, try to set from a static list of\n # known internal error handler views\n span.resource = _django_default_views.get(response.status_code, 'unknown')\n\n span.set_tag(http.STATUS_CODE, response.status_code)\n span = _set_auth_tags(span, request)\n span.finish()\n except Exception as e:\n log.debug('error tracing request: %s', e)\n finally:\n return response\n\n\ndef _get_req_span(request):\n \"\"\" Return the datadog span from the given request. \"\"\"\n return getattr(request, '_datadog_request_span', None)\n\n\ndef _set_req_span(request, span):\n \"\"\" Set the datadog span on the given request. \"\"\"\n return setattr(request, '_datadog_request_span', span)\n\n\ndef _set_auth_tags(span, request):\n \"\"\" Patch any available auth tags from the request onto the span. \"\"\"\n user = getattr(request, 'user', None)\n if not user:\n return span\n\n if hasattr(user, 'is_authenticated'):\n span.set_tag('django.user.is_authenticated', user_is_authenticated(user))\n\n uid = getattr(user, 'pk', None)\n if uid:\n span.set_tag('django.user.id', uid)\n\n uname = getattr(user, 'username', None)\n if uname:\n span.set_tag('django.user.name', uname)\n\n return span\n", "path": "ddtrace/contrib/django/middleware.py"}]} | 3,098 | 173 |
gh_patches_debug_41778 | rasdani/github-patches | git_diff | google__flax-2064 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document default stride for pooling functions
### Discussed in https://github.com/google/flax/discussions/2023
<div type='discussions-op-text'>
<sup>Originally posted by **dogeplusplus** April 3, 2022</sup>
A bit of a nitpick but I was wondering why the default behavior of pooling functions is to have stride 1 instead of the `window_shape`? I feel that for most use cases the stride would be the dimension of the kernel size as in other frameworks.</div>
</issue>
<code>
[start of flax/linen/pooling.py]
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Pooling modules."""
16
17 from jax import lax
18 import jax.numpy as jnp
19
20 import numpy as np
21
22
23 def pool(inputs, init, reduce_fn, window_shape, strides, padding):
24 """Helper function to define pooling functions.
25
26 Pooling functions are implemented using the ReduceWindow XLA op.
27 NOTE: Be aware that pooling is not generally differentiable.
28 That means providing a reduce_fn that is differentiable does not imply
29 that pool is differentiable.
30
31 Args:
32 inputs: input data with dimensions (batch, window dims..., features).
33 init: the initial value for the reduction
34 reduce_fn: a reduce function of the form `(T, T) -> T`.
35 window_shape: a shape tuple defining the window to reduce over.
36 strides: a sequence of `n` integers, representing the inter-window
37 strides.
38 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
39 of `n` `(low, high)` integer pairs that give the padding to apply before
40 and after each spatial dimension.
41 Returns:
42 The output of the reduction for each window slice.
43 """
44 strides = strides or (1,) * len(window_shape)
45 assert len(window_shape) == len(strides), (
46 f"len({window_shape}) must equal len({strides})")
47 strides = (1,) + strides + (1,)
48 dims = (1,) + window_shape + (1,)
49
50 is_single_input = False
51 if inputs.ndim == len(dims) - 1:
52 # add singleton batch dimension because lax.reduce_window always
53 # needs a batch dimension.
54 inputs = inputs[None]
55 is_single_input = True
56
57 assert inputs.ndim == len(dims), f"len({inputs.shape}) != len({dims})"
58 if not isinstance(padding, str):
59 padding = tuple(map(tuple, padding))
60 assert len(padding) == len(window_shape), (
61 f"padding {padding} must specify pads for same number of dims as "
62 f"window_shape {window_shape}")
63 assert all([len(x) == 2 for x in padding]), (
64 f"each entry in padding {padding} must be length 2")
65 padding = ((0, 0),) + padding + ((0, 0),)
66 y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)
67 if is_single_input:
68 y = jnp.squeeze(y, axis=0)
69 return y
70
71
72 def avg_pool(inputs, window_shape, strides=None, padding="VALID"):
73 """Pools the input by taking the average over a window.
74
75 Args:
76 inputs: input data with dimensions (batch, window dims..., features).
77 window_shape: a shape tuple defining the window to reduce over.
78 strides: a sequence of `n` integers, representing the inter-window
79 strides (default: `(1, ..., 1)`).
80 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
81 of `n` `(low, high)` integer pairs that give the padding to apply before
82 and after each spatial dimension (default: `'VALID'`).
83 Returns:
84 The average for each window slice.
85 """
86 y = pool(inputs, 0., lax.add, window_shape, strides, padding)
87 y = y / np.prod(window_shape)
88 return y
89
90
91 def max_pool(inputs, window_shape, strides=None, padding="VALID"):
92 """Pools the input by taking the maximum of a window slice.
93
94 Args:
95 inputs: input data with dimensions (batch, window dims..., features).
96 window_shape: a shape tuple defining the window to reduce over.
97 strides: a sequence of `n` integers, representing the inter-window
98 strides (default: `(1, ..., 1)`).
99 padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
100 of `n` `(low, high)` integer pairs that give the padding to apply before
101 and after each spatial dimension (default: `'VALID'`).
102 Returns:
103 The maximum for each window slice.
104 """
105 y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)
106 return y
107
108
109 def min_pool(inputs, window_shape, strides=None, padding="VALID"):
110 """Pools the input by taking the minimum of a window slice.
111
112 Args:
113 inputs: Input data with dimensions (batch, window dims..., features).
114 window_shape: A shape tuple defining the window to reduce over.
115 strides: A sequence of `n` integers, representing the inter-window strides
116 (default: `(1, ..., 1)`).
117 padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of
118 `n` `(low, high)` integer pairs that give the padding to apply before and
119 after each spatial dimension (default: `'VALID'`).
120
121 Returns:
122 The minimum for each window slice.
123 """
124 return pool(inputs, jnp.inf, lax.min, window_shape, strides, padding)
125
[end of flax/linen/pooling.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/linen/pooling.py b/flax/linen/pooling.py
--- a/flax/linen/pooling.py
+++ b/flax/linen/pooling.py
@@ -25,8 +25,8 @@
Pooling functions are implemented using the ReduceWindow XLA op.
NOTE: Be aware that pooling is not generally differentiable.
- That means providing a reduce_fn that is differentiable does not imply
- that pool is differentiable.
+ That means providing a reduce_fn that is differentiable does not imply that
+ pool is differentiable.
Args:
inputs: input data with dimensions (batch, window dims..., features).
@@ -34,7 +34,7 @@
reduce_fn: a reduce function of the form `(T, T) -> T`.
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
- strides.
+ strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension.
@@ -76,7 +76,7 @@
inputs: input data with dimensions (batch, window dims..., features).
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
- strides (default: `(1, ..., 1)`).
+ strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension (default: `'VALID'`).
@@ -95,7 +95,7 @@
inputs: input data with dimensions (batch, window dims..., features).
window_shape: a shape tuple defining the window to reduce over.
strides: a sequence of `n` integers, representing the inter-window
- strides (default: `(1, ..., 1)`).
+ strides (default: `(1, ..., 1)`).
padding: either the string `'SAME'`, the string `'VALID'`, or a sequence
of `n` `(low, high)` integer pairs that give the padding to apply before
and after each spatial dimension (default: `'VALID'`).
@@ -113,7 +113,7 @@
inputs: Input data with dimensions (batch, window dims..., features).
window_shape: A shape tuple defining the window to reduce over.
strides: A sequence of `n` integers, representing the inter-window strides
- (default: `(1, ..., 1)`).
+ (default: `(1, ..., 1)`).
padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of
`n` `(low, high)` integer pairs that give the padding to apply before and
after each spatial dimension (default: `'VALID'`).
| {"golden_diff": "diff --git a/flax/linen/pooling.py b/flax/linen/pooling.py\n--- a/flax/linen/pooling.py\n+++ b/flax/linen/pooling.py\n@@ -25,8 +25,8 @@\n \n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n- That means providing a reduce_fn that is differentiable does not imply\n- that pool is differentiable.\n+ That means providing a reduce_fn that is differentiable does not imply that\n+ pool is differentiable.\n \n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n@@ -34,7 +34,7 @@\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n- strides.\n+ strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n@@ -76,7 +76,7 @@\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n- strides (default: `(1, ..., 1)`).\n+ strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n@@ -95,7 +95,7 @@\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n- strides (default: `(1, ..., 1)`).\n+ strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n@@ -113,7 +113,7 @@\n inputs: Input data with dimensions (batch, window dims..., features).\n window_shape: A shape tuple defining the window to reduce over.\n strides: A sequence of `n` integers, representing the inter-window strides\n- (default: `(1, ..., 1)`).\n+ (default: `(1, ..., 1)`).\n padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of\n `n` `(low, high)` integer pairs that give the padding to apply before and\n after each spatial dimension (default: `'VALID'`).\n", "issue": "Document default stride for pooling functions\n### Discussed in https://github.com/google/flax/discussions/2023\r\n\r\n<div type='discussions-op-text'>\r\n\r\n<sup>Originally posted by **dogeplusplus** April 3, 2022</sup>\r\nA bit of a nitpick but I was wondering why the default behavior of pooling functions is to have stride 1 instead of the `window_shape`? I feel that for most use cases the stride would be the dimension of the kernel size as in other frameworks.</div>\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Pooling modules.\"\"\"\n\nfrom jax import lax\nimport jax.numpy as jnp\n\nimport numpy as np\n\n\ndef pool(inputs, init, reduce_fn, window_shape, strides, padding):\n \"\"\"Helper function to define pooling functions.\n\n Pooling functions are implemented using the ReduceWindow XLA op.\n NOTE: Be aware that pooling is not generally differentiable.\n That means providing a reduce_fn that is differentiable does not imply\n that pool is differentiable.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n init: the initial value for the reduction\n reduce_fn: a reduce function of the form `(T, T) -> T`.\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides.\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension.\n Returns:\n The output of the reduction for each window slice.\n \"\"\"\n strides = strides or (1,) * len(window_shape)\n assert len(window_shape) == len(strides), (\n f\"len({window_shape}) must equal len({strides})\")\n strides = (1,) + strides + (1,)\n dims = (1,) + window_shape + (1,)\n\n is_single_input = False\n if inputs.ndim == len(dims) - 1:\n # add singleton batch dimension because lax.reduce_window always\n # needs a batch dimension.\n inputs = inputs[None]\n is_single_input = True\n\n assert inputs.ndim == len(dims), f\"len({inputs.shape}) != len({dims})\"\n if not isinstance(padding, str):\n padding = tuple(map(tuple, padding))\n assert len(padding) == len(window_shape), (\n f\"padding {padding} must specify pads for same number of dims as \"\n f\"window_shape {window_shape}\")\n assert all([len(x) == 2 for x in padding]), (\n f\"each entry in padding {padding} must be length 2\")\n padding = ((0, 0),) + padding + ((0, 0),)\n y = lax.reduce_window(inputs, init, reduce_fn, dims, strides, padding)\n if is_single_input:\n y = jnp.squeeze(y, axis=0)\n return y\n\n\ndef avg_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the average over a window.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The average for each window slice.\n \"\"\"\n y = pool(inputs, 0., lax.add, window_shape, strides, padding)\n y = y / np.prod(window_shape)\n return y\n\n\ndef max_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the maximum of a window slice.\n\n Args:\n inputs: input data with dimensions (batch, window dims..., features).\n window_shape: a shape tuple defining the window to reduce over.\n strides: a sequence of `n` integers, representing the inter-window\n strides (default: `(1, ..., 1)`).\n padding: either the string `'SAME'`, the string `'VALID'`, or a sequence\n of `n` `(low, high)` integer pairs that give the padding to apply before\n and after each spatial dimension (default: `'VALID'`).\n Returns:\n The maximum for each window slice.\n \"\"\"\n y = pool(inputs, -jnp.inf, lax.max, window_shape, strides, padding)\n return y\n\n\ndef min_pool(inputs, window_shape, strides=None, padding=\"VALID\"):\n \"\"\"Pools the input by taking the minimum of a window slice.\n\n Args:\n inputs: Input data with dimensions (batch, window dims..., features).\n window_shape: A shape tuple defining the window to reduce over.\n strides: A sequence of `n` integers, representing the inter-window strides\n (default: `(1, ..., 1)`).\n padding: Either the string `'SAME'`, the string `'VALID'`, or a sequence of\n `n` `(low, high)` integer pairs that give the padding to apply before and\n after each spatial dimension (default: `'VALID'`).\n\n Returns:\n The minimum for each window slice.\n \"\"\"\n return pool(inputs, jnp.inf, lax.min, window_shape, strides, padding)\n", "path": "flax/linen/pooling.py"}]} | 2,151 | 698 |
gh_patches_debug_23929 | rasdani/github-patches | git_diff | sunpy__sunpy-5089 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remote data manager does not recover well from its database being deleted while a session is running
<!-- This comments are hidden when you submit the issue so you do not need to remove them!
Please be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst
Please be sure to check out our code of conduct:
https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->
<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue! -->
### Description
You can permanently corrupt the data manager state. So nothing works.
### Expected behavior
It recovers from my abuse and fixes itself.
### Steps to Reproduce
<!-- Ideally a code example could be provided so we can run it ourselves. -->
<!-- If you are pasting code, use tripe backticks (```) around your code snippet. -->
1. Delete all your data_manager stuff so you have a clean slate.
2. Run the following code inside an interactive prompt:
```python
from sunpy.data import manager
@manager.require('test_file',
['http://data.sunpy.org/sample-data/predicted-sunspot-radio-flux.txt'],
'4c85b04a5528aa97eb84a087450eda0421c71833820576330bba148564089b11')
def test_function():
pass
test_function()
```
3. Observe how everything works.
4. do a rm ~/sunpy/data_manager/*
5. **Without** starting a new python session run: `test_function()`, observe the following error:
```
---------------------------------------------------------------------------
OperationalError Traceback (most recent call last)
<ipython-input-2-7e96d146b416> in <module>
----> 1 test_function()
~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/manager.py in wrapper(*args, **kwargs)
64 file_path = self._cache.download(urls, redownload=True)
65 else:
---> 66 details = self._cache.get_by_hash(sha_hash)
67 if not details:
68 # In case we are matching by hash and file does not exist
~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/cache.py in get_by_hash(self, sha_hash)
127 SHA-1 hash of the file.
128 """
--> 129 details = self._storage.find_by_key('file_hash', sha_hash)
130 return details
131
~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/storage.py in find_by_key(self, key, value)
161 with self.connection() as conn:
162 cursor = conn.cursor()
--> 163 cursor.execute(f'''SELECT * FROM {self._table_name}
164 WHERE {key}="{value}"''')
165 row = cursor.fetchone()
OperationalError: no such table: cache_storage
```
6. Restart your Python session and re-run:
```python
from sunpy.data import manager
@manager.require('test_file',
['http://data.sunpy.org/sample-data/predicted-sunspot-radio-flux.txt'],
'4c85b04a5528aa97eb84a087450eda0421c71833820576330bba148564089b11')
def test_function():
pass
test_function()
```
7. Observe how it's still broken.
Note, that if you delete the contents of the `data_manager` dir and then *restart* your Python session before calling the data manager again it recovers fine.
### System Details
<!-- We at least need to know the SunPy version you are using. -->
<!-- We provide a short function in SunPy that will provide some of the below information. -->
<!-- It is sunpy.util.system_info(), this is optional but strongly recommended. -->
- SunPy Version: 1.1.1
- Python Version: 3.8
</issue>
<code>
[start of sunpy/data/data_manager/storage.py]
1 """
2 Storage module contains the abstract implementation of storage
3 for `sunpy.data.data_manager.Cache` and a concrete implementation
4 using sqlite.
5 """
6 import sqlite3
7 from abc import ABCMeta, abstractmethod
8 from pathlib import Path
9 from contextlib import contextmanager
10
11 __all__ = [
12 'StorageProviderBase',
13 'SqliteStorage',
14 'InMemStorage',
15 ]
16
17
18 class StorageProviderBase(metaclass=ABCMeta):
19 """
20 Base class for remote data manager storage providers.
21 """
22 @abstractmethod
23 def find_by_key(self, key, value):
24 """
25 Returns the file details if value coresponding to the key
26 found in storage. Returns `None` if hash not found.
27
28 Parameters
29 ----------
30 key: `str`
31 The key/column name of the field.
32 value: `str`
33 The value associated with the key of the entry.
34
35 Returns
36 -------
37 `dict` or `None`
38 `dict` contains the details of the file. `None` if hash not found.
39
40 Raises
41 ------
42 ``KeyError``
43 KeyError is raised if key does not exist.
44 """
45
46 @abstractmethod
47 def delete_by_key(self, key, value):
48 """
49 Deletes the matching entry from the store.
50
51 Parameters
52 ----------
53 key: `str`
54 The key/column name of the field.
55 value: `str`
56 The value associated with the key of the entry.
57
58 Raises
59 ------
60 ``KeyError``
61 KeyError is raised if key does not exist.
62 """
63
64 @abstractmethod
65 def store(self, details):
66 """
67 Stores the details in the storage.
68
69 Parameters
70 ----------
71 details: `dict`
72 Details to be stored.
73 """
74
75
76 class InMemStorage(StorageProviderBase):
77 """
78 This provides a storage stored in memory.
79 """
80
81 def __init__(self):
82 self._store = []
83
84 def store(self, details):
85 self._store += [details]
86
87 def delete_by_key(self, key, value):
88 for i in self._store:
89 if i[key] == value:
90 self._store.remove(i)
91
92 def find_by_key(self, key, value):
93 for i in self._store:
94 if i[key] == value:
95 return i
96 return None
97
98
99 class SqliteStorage(StorageProviderBase):
100 """
101 This provides a sqlite backend for storage.
102
103 Parameters
104 ----------
105 path: `str`
106 Path to the database file.
107 """
108 COLUMN_NAMES = [
109 'file_hash',
110 'file_path',
111 'url',
112 'time',
113 ]
114
115 def __init__(self, path):
116 self._db_path = Path(path)
117 self._table_name = 'cache_storage'
118
119 self._db_path.parent.mkdir(parents=True, exist_ok=True)
120 if not self._db_path.exists():
121 # setup database
122 self._setup()
123
124 def _setup(self):
125 schema = ' text, '.join(self.COLUMN_NAMES) + ' text'
126 with self.connection(commit=True) as conn:
127 # Do this in a try...except to prevent race conditions in the tests
128 try:
129 conn.execute(f'''CREATE TABLE {self._table_name}
130 ({schema})''')
131 except sqlite3.OperationalError as exc:
132 if "cache_storage already exists" in str(exc):
133 return
134 raise exc
135
136 @contextmanager
137 def connection(self, commit=False):
138 """
139 A context manager which provides an easy way to handle db connections.
140
141 Parameters
142 ----------
143 commit: `bool`
144 Whether to commit after succesful execution of db command.
145 """
146 conn = sqlite3.connect(str(self._db_path))
147 try:
148 yield conn
149 if commit:
150 conn.commit()
151 finally:
152 conn.close()
153
154 def find_by_key(self, key, value):
155 if key not in self.COLUMN_NAMES:
156 raise KeyError
157 with self.connection() as conn:
158 cursor = conn.cursor()
159 cursor.execute(f'''SELECT * FROM {self._table_name}
160 WHERE {key}="{value}"''')
161 row = cursor.fetchone()
162 if row:
163 return dict(zip(self.COLUMN_NAMES, row))
164 return None
165
166 def delete_by_key(self, key, value):
167 if key not in self.COLUMN_NAMES:
168 raise KeyError
169 with self.connection(commit=True) as conn:
170 cursor = conn.cursor()
171 cursor.execute(f'''DELETE FROM {self._table_name}
172 WHERE {key}="{value}"''')
173
174 def store(self, details):
175 values = [details[k] for k in self.COLUMN_NAMES]
176 placeholder = '?,' * len(values)
177 placeholder = placeholder[:-1]
178 with self.connection(commit=True) as conn:
179 conn.execute(f'''INSERT INTO {self._table_name}
180 VALUES ({placeholder})''', list(values))
181
[end of sunpy/data/data_manager/storage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/data/data_manager/storage.py b/sunpy/data/data_manager/storage.py
--- a/sunpy/data/data_manager/storage.py
+++ b/sunpy/data/data_manager/storage.py
@@ -122,16 +122,12 @@
self._setup()
def _setup(self):
- schema = ' text, '.join(self.COLUMN_NAMES) + ' text'
with self.connection(commit=True) as conn:
- # Do this in a try...except to prevent race conditions in the tests
- try:
- conn.execute(f'''CREATE TABLE {self._table_name}
- ({schema})''')
- except sqlite3.OperationalError as exc:
- if "cache_storage already exists" in str(exc):
- return
- raise exc
+ self._create_table(conn)
+
+ def _create_table(self, conn):
+ schema = ' text, '.join(self.COLUMN_NAMES) + ' text'
+ conn.execute(f'''CREATE TABLE IF NOT EXISTS {self._table_name} ({schema})''')
@contextmanager
def connection(self, commit=False):
@@ -144,6 +140,7 @@
Whether to commit after succesful execution of db command.
"""
conn = sqlite3.connect(str(self._db_path))
+ self._create_table(conn)
try:
yield conn
if commit:
| {"golden_diff": "diff --git a/sunpy/data/data_manager/storage.py b/sunpy/data/data_manager/storage.py\n--- a/sunpy/data/data_manager/storage.py\n+++ b/sunpy/data/data_manager/storage.py\n@@ -122,16 +122,12 @@\n self._setup()\n \n def _setup(self):\n- schema = ' text, '.join(self.COLUMN_NAMES) + ' text'\n with self.connection(commit=True) as conn:\n- # Do this in a try...except to prevent race conditions in the tests\n- try:\n- conn.execute(f'''CREATE TABLE {self._table_name}\n- ({schema})''')\n- except sqlite3.OperationalError as exc:\n- if \"cache_storage already exists\" in str(exc):\n- return\n- raise exc\n+ self._create_table(conn)\n+\n+ def _create_table(self, conn):\n+ schema = ' text, '.join(self.COLUMN_NAMES) + ' text'\n+ conn.execute(f'''CREATE TABLE IF NOT EXISTS {self._table_name} ({schema})''')\n \n @contextmanager\n def connection(self, commit=False):\n@@ -144,6 +140,7 @@\n Whether to commit after succesful execution of db command.\n \"\"\"\n conn = sqlite3.connect(str(self._db_path))\n+ self._create_table(conn)\n try:\n yield conn\n if commit:\n", "issue": "Remote data manager does not recover well from its database being deleted while a session is running\n<!-- This comments are hidden when you submit the issue so you do not need to remove them!\r\nPlease be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst\r\nPlease be sure to check out our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->\r\n\r\n<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue! -->\r\n\r\n### Description\r\nYou can permanently corrupt the data manager state. So nothing works.\r\n\r\n### Expected behavior\r\nIt recovers from my abuse and fixes itself.\r\n\r\n\r\n### Steps to Reproduce\r\n<!-- Ideally a code example could be provided so we can run it ourselves. -->\r\n<!-- If you are pasting code, use tripe backticks (```) around your code snippet. -->\r\n\r\n1. Delete all your data_manager stuff so you have a clean slate.\r\n2. Run the following code inside an interactive prompt:\r\n```python\r\nfrom sunpy.data import manager\r\[email protected]('test_file',\r\n ['http://data.sunpy.org/sample-data/predicted-sunspot-radio-flux.txt'],\r\n '4c85b04a5528aa97eb84a087450eda0421c71833820576330bba148564089b11')\r\ndef test_function():\r\n pass\r\n\r\ntest_function()\r\n```\r\n3. Observe how everything works.\r\n4. do a rm ~/sunpy/data_manager/*\r\n5. **Without** starting a new python session run: `test_function()`, observe the following error:\r\n```\r\n---------------------------------------------------------------------------\r\nOperationalError Traceback (most recent call last)\r\n<ipython-input-2-7e96d146b416> in <module>\r\n----> 1 test_function()\r\n\r\n~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/manager.py in wrapper(*args, **kwargs)\r\n 64 file_path = self._cache.download(urls, redownload=True)\r\n 65 else:\r\n---> 66 details = self._cache.get_by_hash(sha_hash)\r\n 67 if not details:\r\n 68 # In case we are matching by hash and file does not exist\r\n\r\n~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/cache.py in get_by_hash(self, sha_hash)\r\n 127 SHA-1 hash of the file.\r\n 128 \"\"\"\r\n--> 129 details = self._storage.find_by_key('file_hash', sha_hash)\r\n 130 return details\r\n 131 \r\n\r\n~/.virtualenvs/sunpy-release/lib/python3.8/site-packages/sunpy/data/data_manager/storage.py in find_by_key(self, key, value)\r\n 161 with self.connection() as conn:\r\n 162 cursor = conn.cursor()\r\n--> 163 cursor.execute(f'''SELECT * FROM {self._table_name}\r\n 164 WHERE {key}=\"{value}\"''')\r\n 165 row = cursor.fetchone()\r\n\r\nOperationalError: no such table: cache_storage\r\n\r\n```\r\n6. Restart your Python session and re-run:\r\n```python\r\nfrom sunpy.data import manager\r\[email protected]('test_file',\r\n ['http://data.sunpy.org/sample-data/predicted-sunspot-radio-flux.txt'],\r\n '4c85b04a5528aa97eb84a087450eda0421c71833820576330bba148564089b11')\r\ndef test_function():\r\n pass\r\n\r\ntest_function()\r\n```\r\n7. Observe how it's still broken.\r\n\r\nNote, that if you delete the contents of the `data_manager` dir and then *restart* your Python session before calling the data manager again it recovers fine.\r\n\r\n### System Details\r\n<!-- We at least need to know the SunPy version you are using. -->\r\n<!-- We provide a short function in SunPy that will provide some of the below information. -->\r\n<!-- It is sunpy.util.system_info(), this is optional but strongly recommended. -->\r\n\r\n - SunPy Version: 1.1.1\r\n - Python Version: 3.8\r\n\n", "before_files": [{"content": "\"\"\"\nStorage module contains the abstract implementation of storage\nfor `sunpy.data.data_manager.Cache` and a concrete implementation\nusing sqlite.\n\"\"\"\nimport sqlite3\nfrom abc import ABCMeta, abstractmethod\nfrom pathlib import Path\nfrom contextlib import contextmanager\n\n__all__ = [\n 'StorageProviderBase',\n 'SqliteStorage',\n 'InMemStorage',\n]\n\n\nclass StorageProviderBase(metaclass=ABCMeta):\n \"\"\"\n Base class for remote data manager storage providers.\n \"\"\"\n @abstractmethod\n def find_by_key(self, key, value):\n \"\"\"\n Returns the file details if value coresponding to the key\n found in storage. Returns `None` if hash not found.\n\n Parameters\n ----------\n key: `str`\n The key/column name of the field.\n value: `str`\n The value associated with the key of the entry.\n\n Returns\n -------\n `dict` or `None`\n `dict` contains the details of the file. `None` if hash not found.\n\n Raises\n ------\n ``KeyError``\n KeyError is raised if key does not exist.\n \"\"\"\n\n @abstractmethod\n def delete_by_key(self, key, value):\n \"\"\"\n Deletes the matching entry from the store.\n\n Parameters\n ----------\n key: `str`\n The key/column name of the field.\n value: `str`\n The value associated with the key of the entry.\n\n Raises\n ------\n ``KeyError``\n KeyError is raised if key does not exist.\n \"\"\"\n\n @abstractmethod\n def store(self, details):\n \"\"\"\n Stores the details in the storage.\n\n Parameters\n ----------\n details: `dict`\n Details to be stored.\n \"\"\"\n\n\nclass InMemStorage(StorageProviderBase):\n \"\"\"\n This provides a storage stored in memory.\n \"\"\"\n\n def __init__(self):\n self._store = []\n\n def store(self, details):\n self._store += [details]\n\n def delete_by_key(self, key, value):\n for i in self._store:\n if i[key] == value:\n self._store.remove(i)\n\n def find_by_key(self, key, value):\n for i in self._store:\n if i[key] == value:\n return i\n return None\n\n\nclass SqliteStorage(StorageProviderBase):\n \"\"\"\n This provides a sqlite backend for storage.\n\n Parameters\n ----------\n path: `str`\n Path to the database file.\n \"\"\"\n COLUMN_NAMES = [\n 'file_hash',\n 'file_path',\n 'url',\n 'time',\n ]\n\n def __init__(self, path):\n self._db_path = Path(path)\n self._table_name = 'cache_storage'\n\n self._db_path.parent.mkdir(parents=True, exist_ok=True)\n if not self._db_path.exists():\n # setup database\n self._setup()\n\n def _setup(self):\n schema = ' text, '.join(self.COLUMN_NAMES) + ' text'\n with self.connection(commit=True) as conn:\n # Do this in a try...except to prevent race conditions in the tests\n try:\n conn.execute(f'''CREATE TABLE {self._table_name}\n ({schema})''')\n except sqlite3.OperationalError as exc:\n if \"cache_storage already exists\" in str(exc):\n return\n raise exc\n\n @contextmanager\n def connection(self, commit=False):\n \"\"\"\n A context manager which provides an easy way to handle db connections.\n\n Parameters\n ----------\n commit: `bool`\n Whether to commit after succesful execution of db command.\n \"\"\"\n conn = sqlite3.connect(str(self._db_path))\n try:\n yield conn\n if commit:\n conn.commit()\n finally:\n conn.close()\n\n def find_by_key(self, key, value):\n if key not in self.COLUMN_NAMES:\n raise KeyError\n with self.connection() as conn:\n cursor = conn.cursor()\n cursor.execute(f'''SELECT * FROM {self._table_name}\n WHERE {key}=\"{value}\"''')\n row = cursor.fetchone()\n if row:\n return dict(zip(self.COLUMN_NAMES, row))\n return None\n\n def delete_by_key(self, key, value):\n if key not in self.COLUMN_NAMES:\n raise KeyError\n with self.connection(commit=True) as conn:\n cursor = conn.cursor()\n cursor.execute(f'''DELETE FROM {self._table_name}\n WHERE {key}=\"{value}\"''')\n\n def store(self, details):\n values = [details[k] for k in self.COLUMN_NAMES]\n placeholder = '?,' * len(values)\n placeholder = placeholder[:-1]\n with self.connection(commit=True) as conn:\n conn.execute(f'''INSERT INTO {self._table_name}\n VALUES ({placeholder})''', list(values))\n", "path": "sunpy/data/data_manager/storage.py"}]} | 3,029 | 307 |
gh_patches_debug_30539 | rasdani/github-patches | git_diff | chainer__chainer-1266 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Concat reverse indexing not working correctly
The following works correctly:
```
a_data = np.zeros((10, 5))
b_data = np.zeros((10, 3))
a = Variable(a_data)
b = Variable(b_data)
F.concat((a, b), axis=1)
```
However, if I change the last line into:
```
F.concat((a, b), axis=-1)
```
it gives the error:
```
Invalid operation is performed in: Concat (Forward)
Expect: in_types[0].shape[1] == in_types[1].shape[1]
Actual: 5 != 3
```
</issue>
<code>
[start of chainer/functions/array/concat.py]
1 import numpy
2
3 from chainer import cuda
4 from chainer import function
5 from chainer.utils import type_check
6
7
8 class Concat(function.Function):
9
10 """Concatenate multiple tensors towards specified axis."""
11
12 # concat along the channel dimension by default
13 def __init__(self, axis=1):
14 self.axis = axis
15
16 def check_type_forward(self, in_types):
17 type_check.expect(in_types.size() > 0)
18 type_check.expect(in_types[0].ndim >
19 type_check.Variable(self.axis, 'axis'))
20
21 ndim = in_types[0].ndim.eval()
22 for i in range(1, in_types.size().eval()):
23 type_check.expect(
24 in_types[0].dtype == in_types[i].dtype,
25 in_types[0].ndim == in_types[i].ndim,
26 )
27 for d in range(0, ndim):
28 if d == self.axis:
29 continue
30 type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
31
32 def forward(self, xs):
33 xp = cuda.get_array_module(*xs)
34 return xp.concatenate(xs, axis=self.axis),
35
36 def backward(self, xs, gy):
37 if not xs[:-1]:
38 return gy
39
40 xp = cuda.get_array_module(*xs)
41 sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()
42 return xp.split(gy[0], sizes, axis=self.axis)
43
44
45 def concat(xs, axis=1):
46 """Concatenates given variables along an axis.
47
48 Args:
49 xs (tuple of Variables): Variables to be concatenated.
50 axis (int): Axis that the input arrays are concatenated along.
51
52 Returns:
53 ~chainer.Variable: Output variable.
54
55 """
56 return Concat(axis=axis)(*xs)
57
[end of chainer/functions/array/concat.py]
[start of cupy/manipulation/split.py]
1 import numpy
2 import six
3
4
5 def array_split(ary, indices_or_sections, axis=0):
6 """Splits an array into multiple sub arrays along a given axis.
7
8 This function is almost equivalent to :func:`cupy.split`. The only
9 difference is that this function allows an integer sections that does not
10 evenly divide the axis.
11
12 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`
13
14 """
15 if ary.ndim <= axis:
16 raise IndexError('Axis exceeds ndim')
17 size = ary.shape[axis]
18
19 if numpy.isscalar(indices_or_sections):
20 each_size = (size - 1) // indices_or_sections + 1
21 indices = [i * each_size
22 for i in six.moves.range(1, indices_or_sections)]
23 else:
24 indices = indices_or_sections
25
26 if len(indices) == 0:
27 return [ary]
28
29 skip = (slice(None),) * axis
30 ret = []
31 i = 0
32 for index in indices:
33 ret.append(ary[skip + (slice(i, index),)])
34 i = index
35 ret.append(ary[skip + (slice(i, size),)])
36
37 return ret
38
39
40 def dsplit(ary, indices_or_sections):
41 """Splits an array into multiple sub arrays along the third axis.
42
43 This is equivalent to ``split`` with ``axis=2``.
44
45 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
46
47 """
48 if ary.ndim <= 2:
49 raise ValueError('Cannot dsplit an array with less than 3 dimensions')
50 return split(ary, indices_or_sections, 2)
51
52
53 def hsplit(ary, indices_or_sections):
54 """Splits an array into multiple sub arrays horizontally.
55
56 This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
57 dimension, and otherwise that with ``axis=1``.
58
59 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`
60
61 """
62 if ary.ndim == 0:
63 raise ValueError('Cannot hsplit a zero-dimensional array')
64 if ary.ndim == 1:
65 return split(ary, indices_or_sections, 0)
66 else:
67 return split(ary, indices_or_sections, 1)
68
69
70 def split(ary, indices_or_sections, axis=0):
71 """Splits an array into multiple sub arrays along a given axis.
72
73 Args:
74 ary (cupy.ndarray): Array to split.
75 indices_or_sections (int or sequence of ints): A value indicating how
76 to divide the axis. If it is an integer, then is treated as the
77 number of sections, and the axis is evenly divided. Otherwise,
78 the integers indicate indices to split at. Note that the sequence
79 on the device memory is not allowed.
80 axis (int): Axis along which the array is split.
81
82 Returns:
83 A list of sub arrays. Each array is a view of the corresponding input
84 array.
85
86 .. seealso:: :func:`numpy.split`
87
88 """
89 if ary.ndim <= axis:
90 raise IndexError('Axis exceeds ndim')
91 size = ary.shape[axis]
92
93 if numpy.isscalar(indices_or_sections):
94 if size % indices_or_sections != 0:
95 raise ValueError(
96 'indices_or_sections must divide the size along the axes.\n'
97 'If you want to split the array into non-equally-sized '
98 'arrays, use array_split instead.')
99 return array_split(ary, indices_or_sections, axis)
100
101
102 def vsplit(ary, indices_or_sections):
103 """Splits an array into multiple sub arrays along the first axis.
104
105 This is equivalent to ``split`` with ``axis=0``.
106
107 .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`
108
109 """
110 if ary.ndim <= 1:
111 raise ValueError('Cannot vsplit an array with less than 2 dimensions')
112 return split(ary, indices_or_sections, 0)
113
[end of cupy/manipulation/split.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/functions/array/concat.py b/chainer/functions/array/concat.py
--- a/chainer/functions/array/concat.py
+++ b/chainer/functions/array/concat.py
@@ -18,14 +18,19 @@
type_check.expect(in_types[0].ndim >
type_check.Variable(self.axis, 'axis'))
+ type_check.expect(
+ -in_types[0].ndim <= self.axis,
+ self.axis < in_types[0].ndim
+ )
ndim = in_types[0].ndim.eval()
+ axis = self.axis % ndim
for i in range(1, in_types.size().eval()):
type_check.expect(
in_types[0].dtype == in_types[i].dtype,
in_types[0].ndim == in_types[i].ndim,
)
for d in range(0, ndim):
- if d == self.axis:
+ if d == axis:
continue
type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
diff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py
--- a/cupy/manipulation/split.py
+++ b/cupy/manipulation/split.py
@@ -12,8 +12,10 @@
.. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`
"""
- if ary.ndim <= axis:
+ ndim = ary.ndim
+ if -ndim > axis or ndim <= axis:
raise IndexError('Axis exceeds ndim')
+ axis %= ndim
size = ary.shape[axis]
if numpy.isscalar(indices_or_sections):
@@ -33,7 +35,6 @@
ret.append(ary[skip + (slice(i, index),)])
i = index
ret.append(ary[skip + (slice(i, size),)])
-
return ret
| {"golden_diff": "diff --git a/chainer/functions/array/concat.py b/chainer/functions/array/concat.py\n--- a/chainer/functions/array/concat.py\n+++ b/chainer/functions/array/concat.py\n@@ -18,14 +18,19 @@\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n \n+ type_check.expect(\n+ -in_types[0].ndim <= self.axis,\n+ self.axis < in_types[0].ndim\n+ )\n ndim = in_types[0].ndim.eval()\n+ axis = self.axis % ndim\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n- if d == self.axis:\n+ if d == axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n \ndiff --git a/cupy/manipulation/split.py b/cupy/manipulation/split.py\n--- a/cupy/manipulation/split.py\n+++ b/cupy/manipulation/split.py\n@@ -12,8 +12,10 @@\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n \n \"\"\"\n- if ary.ndim <= axis:\n+ ndim = ary.ndim\n+ if -ndim > axis or ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n+ axis %= ndim\n size = ary.shape[axis]\n \n if numpy.isscalar(indices_or_sections):\n@@ -33,7 +35,6 @@\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(i, size),)])\n-\n return ret\n", "issue": "Concat reverse indexing not working correctly\nThe following works correctly:\n\n```\na_data = np.zeros((10, 5))\nb_data = np.zeros((10, 3))\na = Variable(a_data)\nb = Variable(b_data)\nF.concat((a, b), axis=1)\n```\n\nHowever, if I change the last line into:\n\n```\nF.concat((a, b), axis=-1)\n```\n\nit gives the error:\n\n```\nInvalid operation is performed in: Concat (Forward)\n\nExpect: in_types[0].shape[1] == in_types[1].shape[1]\nActual: 5 != 3\n```\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Concat(function.Function):\n\n \"\"\"Concatenate multiple tensors towards specified axis.\"\"\"\n\n # concat along the channel dimension by default\n def __init__(self, axis=1):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() > 0)\n type_check.expect(in_types[0].ndim >\n type_check.Variable(self.axis, 'axis'))\n\n ndim = in_types[0].ndim.eval()\n for i in range(1, in_types.size().eval()):\n type_check.expect(\n in_types[0].dtype == in_types[i].dtype,\n in_types[0].ndim == in_types[i].ndim,\n )\n for d in range(0, ndim):\n if d == self.axis:\n continue\n type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])\n\n def forward(self, xs):\n xp = cuda.get_array_module(*xs)\n return xp.concatenate(xs, axis=self.axis),\n\n def backward(self, xs, gy):\n if not xs[:-1]:\n return gy\n\n xp = cuda.get_array_module(*xs)\n sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()\n return xp.split(gy[0], sizes, axis=self.axis)\n\n\ndef concat(xs, axis=1):\n \"\"\"Concatenates given variables along an axis.\n\n Args:\n xs (tuple of Variables): Variables to be concatenated.\n axis (int): Axis that the input arrays are concatenated along.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Concat(axis=axis)(*xs)\n", "path": "chainer/functions/array/concat.py"}, {"content": "import numpy\nimport six\n\n\ndef array_split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n This function is almost equivalent to :func:`cupy.split`. The only\n difference is that this function allows an integer sections that does not\n evenly divide the axis.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.array_split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n each_size = (size - 1) // indices_or_sections + 1\n indices = [i * each_size\n for i in six.moves.range(1, indices_or_sections)]\n else:\n indices = indices_or_sections\n\n if len(indices) == 0:\n return [ary]\n\n skip = (slice(None),) * axis\n ret = []\n i = 0\n for index in indices:\n ret.append(ary[skip + (slice(i, index),)])\n i = index\n ret.append(ary[skip + (slice(i, size),)])\n\n return ret\n\n\ndef dsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the third axis.\n\n This is equivalent to ``split`` with ``axis=2``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 2:\n raise ValueError('Cannot dsplit an array with less than 3 dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef hsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays horizontally.\n\n This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one\n dimension, and otherwise that with ``axis=1``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`\n\n \"\"\"\n if ary.ndim == 0:\n raise ValueError('Cannot hsplit a zero-dimensional array')\n if ary.ndim == 1:\n return split(ary, indices_or_sections, 0)\n else:\n return split(ary, indices_or_sections, 1)\n\n\ndef split(ary, indices_or_sections, axis=0):\n \"\"\"Splits an array into multiple sub arrays along a given axis.\n\n Args:\n ary (cupy.ndarray): Array to split.\n indices_or_sections (int or sequence of ints): A value indicating how\n to divide the axis. If it is an integer, then is treated as the\n number of sections, and the axis is evenly divided. Otherwise,\n the integers indicate indices to split at. Note that the sequence\n on the device memory is not allowed.\n axis (int): Axis along which the array is split.\n\n Returns:\n A list of sub arrays. Each array is a view of the corresponding input\n array.\n\n .. seealso:: :func:`numpy.split`\n\n \"\"\"\n if ary.ndim <= axis:\n raise IndexError('Axis exceeds ndim')\n size = ary.shape[axis]\n\n if numpy.isscalar(indices_or_sections):\n if size % indices_or_sections != 0:\n raise ValueError(\n 'indices_or_sections must divide the size along the axes.\\n'\n 'If you want to split the array into non-equally-sized '\n 'arrays, use array_split instead.')\n return array_split(ary, indices_or_sections, axis)\n\n\ndef vsplit(ary, indices_or_sections):\n \"\"\"Splits an array into multiple sub arrays along the first axis.\n\n This is equivalent to ``split`` with ``axis=0``.\n\n .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.dsplit`\n\n \"\"\"\n if ary.ndim <= 1:\n raise ValueError('Cannot vsplit an array with less than 2 dimensions')\n return split(ary, indices_or_sections, 0)\n", "path": "cupy/manipulation/split.py"}]} | 2,324 | 436 |
gh_patches_debug_389 | rasdani/github-patches | git_diff | mlflow__mlflow-4368 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make mlflow compatible with protobuf 3.6.1
## What changes are proposed in this pull request?
Make mlflow compatible with protobuf 3.6.1:
for protobuf ==3.6.1
Add EnumTypeWrapper.__getattr__ to access values
## How is this patch tested?
N/A
## Release Notes
### Is this a user-facing change?
- [x] No. You can skip the rest of this section.
- [ ] Yes. Give a description of this change to be included in the release notes for MLflow users.
(Details in 1-2 sentences. You can just refer to another PR with a description if this PR is part of a larger change.)
### What component(s), interfaces, languages, and integrations does this PR affect?
Components
- [ ] `area/artifacts`: Artifact stores and artifact logging
- [ ] `area/build`: Build and test infrastructure for MLflow
- [ ] `area/docs`: MLflow documentation pages
- [ ] `area/examples`: Example code
- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry
- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors
- [ ] `area/projects`: MLproject format, project running backends
- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs
- [ ] `area/server-infra`: MLflow server, JavaScript dev server
- [x] `area/tracking`: Tracking Service, tracking client APIs, autologging
Interface
- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting
- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models
- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry
- [ ] `area/windows`: Windows support
Language
- [ ] `language/r`: R APIs and clients
- [ ] `language/java`: Java APIs and clients
- [ ] `language/new`: Proposals for new client languages
Integrations
- [ ] `integrations/azure`: Azure and Azure ML integrations
- [ ] `integrations/sagemaker`: SageMaker integrations
- [ ] `integrations/databricks`: Databricks integrations
<!--
Insert an empty named anchor here to allow jumping to this section with a fragment URL
(e.g. https://github.com/mlflow/mlflow/pull/123#user-content-release-note-category).
Note that GitHub prefixes anchor names in markdown with "user-content-".
-->
<a name="release-note-category"></a>
### How should the PR be classified in the release notes? Choose one:
- [ ] `rn/breaking-change` - The PR will be mentioned in the "Breaking Changes" section
- [x] `rn/none` - No description will be included. The PR will be mentioned only by the PR number in the "Small Bugfixes and Documentation Updates" section
- [ ] `rn/feature` - A new user-facing feature worth mentioning in the release notes
- [ ] `rn/bug-fix` - A user-facing bug fix worth mentioning in the release notes
- [ ] `rn/documentation` - A user-facing documentation change worth mentioning in the release notes
</issue>
<code>
[start of setup.py]
1 import os
2 import logging
3
4 from importlib.machinery import SourceFileLoader
5 from setuptools import setup, find_packages
6
7 _MLFLOW_SKINNY_ENV_VAR = "MLFLOW_SKINNY"
8
9 version = (
10 SourceFileLoader("mlflow.version", os.path.join("mlflow", "version.py")).load_module().VERSION
11 )
12
13
14 # Get a list of all files in the JS directory to include in our module
15 def package_files(directory):
16 paths = []
17 for (path, _, filenames) in os.walk(directory):
18 for filename in filenames:
19 paths.append(os.path.join("..", path, filename))
20 return paths
21
22
23 # Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build
24 # to include in the wheel, e.g. "../mlflow/server/js/build/index.html"
25 js_files = package_files("mlflow/server/js/build")
26 models_container_server_files = package_files("mlflow/models/container")
27 alembic_files = [
28 "../mlflow/store/db_migrations/alembic.ini",
29 "../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini",
30 ]
31 extra_files = ["ml-package-versions.yml", "pyspark/ml/log_model_allowlist.txt"]
32
33 """
34 Minimal requirements for the skinny MLflow client which provides a limited
35 subset of functionality such as: RESTful client functionality for Tracking and
36 Model Registry, as well as support for Project execution against local backends
37 and Databricks.
38 """
39 SKINNY_REQUIREMENTS = [
40 "click>=7.0",
41 "cloudpickle",
42 "databricks-cli>=0.8.7",
43 "entrypoints",
44 "gitpython>=2.1.0",
45 "pyyaml",
46 "protobuf>=3.6.0",
47 "pytz",
48 "requests>=2.17.3",
49 "packaging",
50 ]
51
52 """
53 These are the core requirements for the complete MLflow platform, which augments
54 the skinny client functionality with support for running the MLflow Tracking
55 Server & UI. It also adds project backends such as Docker and Kubernetes among
56 other capabilities.
57 """
58 CORE_REQUIREMENTS = SKINNY_REQUIREMENTS + [
59 "alembic<=1.4.1",
60 # Required
61 "docker>=4.0.0",
62 "Flask",
63 "gunicorn; platform_system != 'Windows'",
64 "numpy",
65 "pandas",
66 "prometheus-flask-exporter",
67 "querystring_parser",
68 # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433
69 "sqlparse>=0.3.1",
70 # Required to run the MLflow server against SQL-backed storage
71 "sqlalchemy",
72 "waitress; platform_system == 'Windows'",
73 ]
74
75 _is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))
76 logging.debug("{} env var is set: {}".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))
77
78 setup(
79 name="mlflow" if not _is_mlflow_skinny else "mlflow-skinny",
80 version=version,
81 packages=find_packages(exclude=["tests", "tests.*"]),
82 package_data={"mlflow": js_files + models_container_server_files + alembic_files + extra_files}
83 if not _is_mlflow_skinny
84 # include alembic files to enable usage of the skinny client with SQL databases
85 # if users install sqlalchemy, alembic, and sqlparse independently
86 else {"mlflow": alembic_files + extra_files},
87 install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,
88 extras_require={
89 "extras": [
90 "scikit-learn",
91 # Required to log artifacts and models to HDFS artifact locations
92 "pyarrow",
93 # Required to log artifacts and models to AWS S3 artifact locations
94 "boto3",
95 "mleap",
96 # Required to log artifacts and models to GCS artifact locations
97 "google-cloud-storage",
98 "azureml-core>=1.2.0",
99 # Required to log artifacts to SFTP artifact locations
100 "pysftp",
101 # Required by the mlflow.projects module, when running projects against
102 # a remote Kubernetes cluster
103 "kubernetes",
104 ],
105 "sqlserver": ["mlflow-dbstore"],
106 "aliyun-oss": ["aliyunstoreplugin"],
107 },
108 entry_points="""
109 [console_scripts]
110 mlflow=mlflow.cli:cli
111 """,
112 zip_safe=False,
113 author="Databricks",
114 description="MLflow: A Platform for ML Development and Productionization",
115 long_description=open("README.rst").read()
116 if not _is_mlflow_skinny
117 else open("README_SKINNY.rst").read() + open("README.rst").read(),
118 long_description_content_type="text/x-rst",
119 license="Apache License 2.0",
120 classifiers=["Intended Audience :: Developers", "Programming Language :: Python :: 3.6"],
121 keywords="ml ai databricks",
122 url="https://mlflow.org/",
123 python_requires=">=3.6",
124 project_urls={
125 "Bug Tracker": "https://github.com/mlflow/mlflow/issues",
126 "Documentation": "https://mlflow.org/docs/latest/index.html",
127 "Source Code": "https://github.com/mlflow/mlflow",
128 },
129 )
130
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -43,7 +43,7 @@
"entrypoints",
"gitpython>=2.1.0",
"pyyaml",
- "protobuf>=3.6.0",
+ "protobuf>=3.7.0",
"pytz",
"requests>=2.17.3",
"packaging",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -43,7 +43,7 @@\n \"entrypoints\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n- \"protobuf>=3.6.0\",\n+ \"protobuf>=3.7.0\",\n \"pytz\",\n \"requests>=2.17.3\",\n \"packaging\",\n", "issue": "Make mlflow compatible with protobuf 3.6.1\n## What changes are proposed in this pull request?\r\n\r\nMake mlflow compatible with protobuf 3.6.1:\r\nfor protobuf ==3.6.1\r\nAdd EnumTypeWrapper.__getattr__ to access values\r\n\r\n## How is this patch tested?\r\n\r\nN/A\r\n\r\n## Release Notes\r\n\r\n### Is this a user-facing change?\r\n\r\n- [x] No. You can skip the rest of this section.\r\n- [ ] Yes. Give a description of this change to be included in the release notes for MLflow users.\r\n\r\n(Details in 1-2 sentences. You can just refer to another PR with a description if this PR is part of a larger change.)\r\n\r\n### What component(s), interfaces, languages, and integrations does this PR affect?\r\nComponents \r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: Local serving, model deployment tools, spark UDFs\r\n- [ ] `area/server-infra`: MLflow server, JavaScript dev server\r\n- [x] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\nInterface \r\n- [ ] `area/uiux`: Front-end, user experience, JavaScript, plotting\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\nLanguage \r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\nIntegrations\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\r\n\r\n<!--\r\nInsert an empty named anchor here to allow jumping to this section with a fragment URL\r\n(e.g. https://github.com/mlflow/mlflow/pull/123#user-content-release-note-category).\r\nNote that GitHub prefixes anchor names in markdown with \"user-content-\".\r\n-->\r\n<a name=\"release-note-category\"></a>\r\n### How should the PR be classified in the release notes? Choose one:\r\n\r\n- [ ] `rn/breaking-change` - The PR will be mentioned in the \"Breaking Changes\" section\r\n- [x] `rn/none` - No description will be included. The PR will be mentioned only by the PR number in the \"Small Bugfixes and Documentation Updates\" section\r\n- [ ] `rn/feature` - A new user-facing feature worth mentioning in the release notes\r\n- [ ] `rn/bug-fix` - A user-facing bug fix worth mentioning in the release notes\r\n- [ ] `rn/documentation` - A user-facing documentation change worth mentioning in the release notes\r\n\n", "before_files": [{"content": "import os\nimport logging\n\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages\n\n_MLFLOW_SKINNY_ENV_VAR = \"MLFLOW_SKINNY\"\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the JS directory to include in our module\ndef package_files(directory):\n paths = []\n for (path, _, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\nextra_files = [\"ml-package-versions.yml\", \"pyspark/ml/log_model_allowlist.txt\"]\n\n\"\"\"\nMinimal requirements for the skinny MLflow client which provides a limited\nsubset of functionality such as: RESTful client functionality for Tracking and\nModel Registry, as well as support for Project execution against local backends\nand Databricks.\n\"\"\"\nSKINNY_REQUIREMENTS = [\n \"click>=7.0\",\n \"cloudpickle\",\n \"databricks-cli>=0.8.7\",\n \"entrypoints\",\n \"gitpython>=2.1.0\",\n \"pyyaml\",\n \"protobuf>=3.6.0\",\n \"pytz\",\n \"requests>=2.17.3\",\n \"packaging\",\n]\n\n\"\"\"\nThese are the core requirements for the complete MLflow platform, which augments\nthe skinny client functionality with support for running the MLflow Tracking\nServer & UI. It also adds project backends such as Docker and Kubernetes among\nother capabilities.\n\"\"\"\nCORE_REQUIREMENTS = SKINNY_REQUIREMENTS + [\n \"alembic<=1.4.1\",\n # Required\n \"docker>=4.0.0\",\n \"Flask\",\n \"gunicorn; platform_system != 'Windows'\",\n \"numpy\",\n \"pandas\",\n \"prometheus-flask-exporter\",\n \"querystring_parser\",\n # Pin sqlparse for: https://github.com/mlflow/mlflow/issues/3433\n \"sqlparse>=0.3.1\",\n # Required to run the MLflow server against SQL-backed storage\n \"sqlalchemy\",\n \"waitress; platform_system == 'Windows'\",\n]\n\n_is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))\nlogging.debug(\"{} env var is set: {}\".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))\n\nsetup(\n name=\"mlflow\" if not _is_mlflow_skinny else \"mlflow-skinny\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\"mlflow\": js_files + models_container_server_files + alembic_files + extra_files}\n if not _is_mlflow_skinny\n # include alembic files to enable usage of the skinny client with SQL databases\n # if users install sqlalchemy, alembic, and sqlparse independently\n else {\"mlflow\": alembic_files + extra_files},\n install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,\n extras_require={\n \"extras\": [\n \"scikit-learn\",\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n \"mleap\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\"],\n \"aliyun-oss\": [\"aliyunstoreplugin\"],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n \"\"\",\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read()\n if not _is_mlflow_skinny\n else open(\"README_SKINNY.rst\").read() + open(\"README.rst\").read(),\n long_description_content_type=\"text/x-rst\",\n license=\"Apache License 2.0\",\n classifiers=[\"Intended Audience :: Developers\", \"Programming Language :: Python :: 3.6\"],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=\">=3.6\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n", "path": "setup.py"}]} | 2,690 | 96 |
gh_patches_debug_11688 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-272 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Core plugins zips to published artifacts.
The plugins built with the core repo should be bundled as zips and hosted at artifacts.opensearch.org.
This task involves updating Opensearch's build.sh script to build and include these artifacts with the component's artifact list.
Right now we also have brittle logic that assumes any component with "plugins" in their artifacts list is a plugin repository. This should be updated to identify the min bundle component in another way. Perhaps with a separate artifact folder for "min-bundle"
</issue>
<code>
[start of bundle-workflow/python/build_workflow/builder.py]
1 # Copyright OpenSearch Contributors.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import os
5
6 '''
7 This class is responsible for executing the build for a component and passing the results to a build recorder.
8 It will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.
9 Artifacts found in "<build root>/artifacts/<maven|plugins|libs|bundle>" will be recognized and recorded.
10 '''
11 class Builder:
12 def __init__(self, component_name, git_repo, script_finder, build_recorder):
13 '''
14 Construct a new Builder instance.
15 :param component_name: The name of the component to build.
16 :param git_repo: A GitRepository instance containing the checked-out code.
17 :param script_finder: The ScriptFinder to use for finding build.sh scripts.
18 :param build_recorder: The build recorder that will capture build information and artifacts.
19 '''
20
21 self.component_name = component_name
22 self.git_repo = git_repo
23 self.script_finder = script_finder
24 self.build_recorder = build_recorder
25 self.output_path = 'artifacts'
26
27 def build(self, version, arch, snapshot):
28 build_script = self.script_finder.find_build_script(self.component_name, self.git_repo.dir)
29 build_command = f'{build_script} -v {version} -a {arch} -s {str(snapshot).lower()} -o {self.output_path}'
30 self.git_repo.execute(build_command)
31 self.build_recorder.record_component(self.component_name, self.git_repo)
32
33 def export_artifacts(self):
34 artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))
35 for artifact_type in ["maven", "bundle", "plugins", "libs"]:
36 for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):
37 for file_name in files:
38 absolute_path = os.path.join(dir, file_name)
39 relative_path = os.path.relpath(absolute_path, artifacts_dir)
40 self.build_recorder.record_artifact(self.component_name, artifact_type, relative_path, absolute_path)
41
[end of bundle-workflow/python/build_workflow/builder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bundle-workflow/python/build_workflow/builder.py b/bundle-workflow/python/build_workflow/builder.py
--- a/bundle-workflow/python/build_workflow/builder.py
+++ b/bundle-workflow/python/build_workflow/builder.py
@@ -32,7 +32,7 @@
def export_artifacts(self):
artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))
- for artifact_type in ["maven", "bundle", "plugins", "libs"]:
+ for artifact_type in ["maven", "bundle", "plugins", "libs", "core-plugins"]:
for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):
for file_name in files:
absolute_path = os.path.join(dir, file_name)
| {"golden_diff": "diff --git a/bundle-workflow/python/build_workflow/builder.py b/bundle-workflow/python/build_workflow/builder.py\n--- a/bundle-workflow/python/build_workflow/builder.py\n+++ b/bundle-workflow/python/build_workflow/builder.py\n@@ -32,7 +32,7 @@\n \n def export_artifacts(self):\n artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))\n- for artifact_type in [\"maven\", \"bundle\", \"plugins\", \"libs\"]:\n+ for artifact_type in [\"maven\", \"bundle\", \"plugins\", \"libs\", \"core-plugins\"]:\n for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\n", "issue": "Add Core plugins zips to published artifacts.\nThe plugins built with the core repo should be bundled as zips and hosted at artifacts.opensearch.org.\r\n\r\nThis task involves updating Opensearch's build.sh script to build and include these artifacts with the component's artifact list.\r\n\r\nRight now we also have brittle logic that assumes any component with \"plugins\" in their artifacts list is a plugin repository. This should be updated to identify the min bundle component in another way. Perhaps with a separate artifact folder for \"min-bundle\"\n", "before_files": [{"content": "# Copyright OpenSearch Contributors.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\n\n'''\nThis class is responsible for executing the build for a component and passing the results to a build recorder.\nIt will notify the build recorder of build information such as repository and git ref, and any artifacts generated by the build.\nArtifacts found in \"<build root>/artifacts/<maven|plugins|libs|bundle>\" will be recognized and recorded.\n'''\nclass Builder:\n def __init__(self, component_name, git_repo, script_finder, build_recorder):\n '''\n Construct a new Builder instance.\n :param component_name: The name of the component to build.\n :param git_repo: A GitRepository instance containing the checked-out code.\n :param script_finder: The ScriptFinder to use for finding build.sh scripts.\n :param build_recorder: The build recorder that will capture build information and artifacts.\n '''\n\n self.component_name = component_name\n self.git_repo = git_repo\n self.script_finder = script_finder\n self.build_recorder = build_recorder\n self.output_path = 'artifacts'\n\n def build(self, version, arch, snapshot):\n build_script = self.script_finder.find_build_script(self.component_name, self.git_repo.dir)\n build_command = f'{build_script} -v {version} -a {arch} -s {str(snapshot).lower()} -o {self.output_path}'\n self.git_repo.execute(build_command)\n self.build_recorder.record_component(self.component_name, self.git_repo)\n\n def export_artifacts(self):\n artifacts_dir = os.path.realpath(os.path.join(self.git_repo.dir, self.output_path))\n for artifact_type in [\"maven\", \"bundle\", \"plugins\", \"libs\"]:\n for dir, dirs, files in os.walk(os.path.join(artifacts_dir, artifact_type)):\n for file_name in files:\n absolute_path = os.path.join(dir, file_name)\n relative_path = os.path.relpath(absolute_path, artifacts_dir)\n self.build_recorder.record_artifact(self.component_name, artifact_type, relative_path, absolute_path)\n", "path": "bundle-workflow/python/build_workflow/builder.py"}]} | 1,169 | 173 |
gh_patches_debug_390 | rasdani/github-patches | git_diff | google__turbinia-616 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add retries to tox
Tox fails when trying to check links within our docs if the link is temporarily down/unresponsive. Adding retries to sphinx config should take care of that.
</issue>
<code>
[start of docs/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 # -- Path setup --------------------------------------------------------------
8
9 # If extensions (or modules to document with autodoc) are in another directory,
10 # add these directories to sys.path here. If the directory is relative to the
11 # documentation root, use os.path.abspath to make it absolute, like shown here.
12 #
13 # import os
14 # import sys
15 # sys.path.insert(0, os.path.abspath('.'))
16
17 from __future__ import unicode_literals
18 import re
19
20 from recommonmark.parser import CommonMarkParser
21 from recommonmark.transform import AutoStructify
22 from docutils import nodes, transforms
23
24 # -- Project information -----------------------------------------------------
25
26 project = 'Turbinia'
27 copyright = '2020, Google Inc'
28 author = 'Turbinia maintainers'
29
30 # -- General configuration ---------------------------------------------------
31
32 # Add any Sphinx extension module names here, as strings. They can be
33 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
34 # ones.
35 extensions = [
36 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',
37 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',
38 'recommonmark'
39 ]
40
41 # Add any paths that contain templates here, relative to this directory.
42 templates_path = ['_templates']
43
44 # List of patterns, relative to source directory, that match files and
45 # directories to ignore when looking for source files.
46 # This pattern also affects html_static_path and html_extra_path.
47 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']
48
49 # -- Options for HTML output -------------------------------------------------
50
51 # The theme to use for HTML and HTML Help pages. See the documentation for
52 # a list of builtin themes.
53 #
54 html_theme = 'sphinx_rtd_theme'
55
56 # The master toctree document.
57 master_doc = 'index'
58
59 # The name of the Pygments (syntax highlighting) style to use.
60 pygments_style = 'sphinx'
61
62 # Add any paths that contain custom static files (such as style sheets) here,
63 # relative to this directory. They are copied after the builtin static files,
64 # so a file named "default.css" will overwrite the builtin "default.css".
65 html_static_path = ['_static']
66
67 # The default sidebars (for documents that don't match any pattern) are
68 # defined by theme itself. Builtin themes are using these templates by
69 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
70 # 'searchbox.html']``.
71 #
72 html_sidebars = {
73 '**': [
74 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',
75 'searchbox.html'
76 ]
77 }
78
79
80 # Output file base name for HTML help builder.
81 htmlhelp_basename = 'turbiniadoc'
82
83 html_logo = "images/turbinia-logo.jpg"
84
85
86 class ProcessLink(transforms.Transform):
87 """Transform definition to parse .md references to internal pages."""
88
89 default_priority = 1000
90
91 def find_replace(self, node):
92 """Parses URIs containing .md and replaces them with their HTML page."""
93 if isinstance(node, nodes.reference) and 'refuri' in node:
94 r = node['refuri']
95 if r.endswith('.md'):
96 r = r[:-3] + '.html'
97 node['refuri'] = r
98
99 return node
100
101 def traverse(self, node):
102 """Traverse the document tree rooted at node.
103 node : docutil node
104 current root node to traverse
105 """
106 self.find_replace(node)
107
108 for c in node.children:
109 self.traverse(c)
110
111 # pylint: disable=arguments-differ,attribute-defined-outside-init
112 # this was taken from GRR's config file for documentation
113 def apply(self):
114 self.current_level = 0
115 self.traverse(self.document)
116
117
118 def setup(app):
119 """Add custom parsers to Sphinx generation."""
120 app.add_config_value(
121 'recommonmark_config', {
122 'enable_auto_doc_ref': False,
123 }, True)
124 app.add_transform(AutoStructify)
125 app.add_transform(ProcessLink)
126
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -76,6 +76,8 @@
]
}
+# Adding retries to linkchecks before declaring a link broken
+linkcheck_retries = 3
# Output file base name for HTML help builder.
htmlhelp_basename = 'turbiniadoc'
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -76,6 +76,8 @@\n ]\n }\n \n+# Adding retries to linkchecks before declaring a link broken\n+linkcheck_retries = 3\n \n # Output file base name for HTML help builder.\n htmlhelp_basename = 'turbiniadoc'\n", "issue": "Add retries to tox\nTox fails when trying to check links within our docs if the link is temporarily down/unresponsive. Adding retries to sphinx config should take care of that.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nfrom __future__ import unicode_literals\nimport re\n\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom docutils import nodes, transforms\n\n# -- Project information -----------------------------------------------------\n\nproject = 'Turbinia'\ncopyright = '2020, Google Inc'\nauthor = 'Turbinia maintainers'\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx_markdown_tables',\n 'recommonmark'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'design/*']\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\nhtml_sidebars = {\n '**': [\n 'sidebar.html', 'localtoc.html', 'relations.html', 'sourcelink.html',\n 'searchbox.html'\n ]\n}\n\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'turbiniadoc'\n\nhtml_logo = \"images/turbinia-logo.jpg\"\n\n\nclass ProcessLink(transforms.Transform):\n \"\"\"Transform definition to parse .md references to internal pages.\"\"\"\n\n default_priority = 1000\n\n def find_replace(self, node):\n \"\"\"Parses URIs containing .md and replaces them with their HTML page.\"\"\"\n if isinstance(node, nodes.reference) and 'refuri' in node:\n r = node['refuri']\n if r.endswith('.md'):\n r = r[:-3] + '.html'\n node['refuri'] = r\n\n return node\n\n def traverse(self, node):\n \"\"\"Traverse the document tree rooted at node.\n node : docutil node\n current root node to traverse\n \"\"\"\n self.find_replace(node)\n\n for c in node.children:\n self.traverse(c)\n\n # pylint: disable=arguments-differ,attribute-defined-outside-init\n # this was taken from GRR's config file for documentation\n def apply(self):\n self.current_level = 0\n self.traverse(self.document)\n\n\ndef setup(app):\n \"\"\"Add custom parsers to Sphinx generation.\"\"\"\n app.add_config_value(\n 'recommonmark_config', {\n 'enable_auto_doc_ref': False,\n }, True)\n app.add_transform(AutoStructify)\n app.add_transform(ProcessLink)\n", "path": "docs/conf.py"}]} | 1,785 | 82 |
gh_patches_debug_34066 | rasdani/github-patches | git_diff | freedomofpress__securedrop-1309 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NetworkManager hook notifications broken on Tails 2.x
The invocation of `notify-send` in `securedrop_init.py` does not show a notification in Tails 2.x like it did in Tails 1.x. This is due to dbus-related changes in Debian Jessie, and is a known issue as a quick [search](https://labs.riseup.net/code/projects/tails/search?utf8=%E2%9C%93&changesets=1&q=notify-send) of the Tails issue tracker demonstrates.
Furthermore, it looks like Tails has a special wrapper script, `tails-notify-user`, specifically meant for the use case of displaying notifications to the user from background scripts running as different users, so we should just use that instead.
</issue>
<code>
[start of tails_files/securedrop_init.py]
1 #!/usr/bin/env python
2
3 import os
4 import sys
5 import subprocess
6
7
8 if __name__ == '__main__':
9 # check for root
10 if os.geteuid() != 0:
11 sys.exit('You need to run this as root')
12
13 # paths
14 path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'
15 path_torrc_backup = '/etc/tor/torrc.bak'
16 path_torrc = '/etc/tor/torrc'
17
18 # load torrc_additions
19 if os.path.isfile(path_torrc_additions):
20 torrc_additions = open(path_torrc_additions).read()
21 else:
22 sys.exit('Error opening {0} for reading'.format(path_torrc_additions))
23
24 # load torrc
25 if os.path.isfile(path_torrc_backup):
26 torrc = open(path_torrc_backup).read()
27 else:
28 if os.path.isfile(path_torrc):
29 torrc = open(path_torrc).read()
30 else:
31 sys.exit('Error opening {0} for reading'.format(path_torrc))
32
33 # save a backup
34 open(path_torrc_backup, 'w').write(torrc)
35
36 # append the additions
37 open(path_torrc, 'w').write(torrc + torrc_additions)
38
39 # reload tor
40 subprocess.call(['/usr/sbin/service', 'tor', 'reload'])
41
42 # success
43 subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',
44 'Updated torrc!', 'You can now connect to your SecureDrop\ndocument interface.'])
45
[end of tails_files/securedrop_init.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tails_files/securedrop_init.py b/tails_files/securedrop_init.py
--- a/tails_files/securedrop_init.py
+++ b/tails_files/securedrop_init.py
@@ -1,44 +1,47 @@
-#!/usr/bin/env python
+#!/usr/bin/python
import os
import sys
import subprocess
-if __name__ == '__main__':
- # check for root
- if os.geteuid() != 0:
- sys.exit('You need to run this as root')
+# check for root
+if os.geteuid() != 0:
+ sys.exit('You need to run this as root')
- # paths
- path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'
- path_torrc_backup = '/etc/tor/torrc.bak'
- path_torrc = '/etc/tor/torrc'
+# paths
+path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'
+path_torrc_backup = '/etc/tor/torrc.bak'
+path_torrc = '/etc/tor/torrc'
- # load torrc_additions
- if os.path.isfile(path_torrc_additions):
- torrc_additions = open(path_torrc_additions).read()
- else:
- sys.exit('Error opening {0} for reading'.format(path_torrc_additions))
+# load torrc_additions
+if os.path.isfile(path_torrc_additions):
+ torrc_additions = open(path_torrc_additions).read()
+else:
+ sys.exit('Error opening {0} for reading'.format(path_torrc_additions))
- # load torrc
- if os.path.isfile(path_torrc_backup):
- torrc = open(path_torrc_backup).read()
+# load torrc
+if os.path.isfile(path_torrc_backup):
+ torrc = open(path_torrc_backup).read()
+else:
+ if os.path.isfile(path_torrc):
+ torrc = open(path_torrc).read()
else:
- if os.path.isfile(path_torrc):
- torrc = open(path_torrc).read()
- else:
- sys.exit('Error opening {0} for reading'.format(path_torrc))
+ sys.exit('Error opening {0} for reading'.format(path_torrc))
- # save a backup
- open(path_torrc_backup, 'w').write(torrc)
+ # save a backup
+ open(path_torrc_backup, 'w').write(torrc)
- # append the additions
- open(path_torrc, 'w').write(torrc + torrc_additions)
+# append the additions
+open(path_torrc, 'w').write(torrc + torrc_additions)
- # reload tor
- subprocess.call(['/usr/sbin/service', 'tor', 'reload'])
+# reload tor
+try:
+ subprocess.check_call(['systemctl', 'reload', '[email protected]'])
+except subprocess.CalledProcessError:
+ sys.exit('Error reloading Tor')
- # success
- subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',
- 'Updated torrc!', 'You can now connect to your SecureDrop\ndocument interface.'])
+# notify the user
+subprocess.call(['tails-notify-user',
+ 'SecureDrop successfully auto-configured!',
+ 'You can now access the Document Interface.\nIf you are an admin, you can now SSH to the servers.'])
| {"golden_diff": "diff --git a/tails_files/securedrop_init.py b/tails_files/securedrop_init.py\n--- a/tails_files/securedrop_init.py\n+++ b/tails_files/securedrop_init.py\n@@ -1,44 +1,47 @@\n-#!/usr/bin/env python\n+#!/usr/bin/python\n \n import os\n import sys\n import subprocess\n \n \n-if __name__ == '__main__':\n- # check for root\n- if os.geteuid() != 0:\n- sys.exit('You need to run this as root')\n+# check for root\n+if os.geteuid() != 0:\n+ sys.exit('You need to run this as root')\n \n- # paths\n- path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\n- path_torrc_backup = '/etc/tor/torrc.bak'\n- path_torrc = '/etc/tor/torrc'\n+# paths\n+path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\n+path_torrc_backup = '/etc/tor/torrc.bak'\n+path_torrc = '/etc/tor/torrc'\n \n- # load torrc_additions\n- if os.path.isfile(path_torrc_additions):\n- torrc_additions = open(path_torrc_additions).read()\n- else:\n- sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n+# load torrc_additions\n+if os.path.isfile(path_torrc_additions):\n+ torrc_additions = open(path_torrc_additions).read()\n+else:\n+ sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n \n- # load torrc\n- if os.path.isfile(path_torrc_backup):\n- torrc = open(path_torrc_backup).read()\n+# load torrc\n+if os.path.isfile(path_torrc_backup):\n+ torrc = open(path_torrc_backup).read()\n+else:\n+ if os.path.isfile(path_torrc):\n+ torrc = open(path_torrc).read()\n else:\n- if os.path.isfile(path_torrc):\n- torrc = open(path_torrc).read()\n- else:\n- sys.exit('Error opening {0} for reading'.format(path_torrc))\n+ sys.exit('Error opening {0} for reading'.format(path_torrc))\n \n- # save a backup\n- open(path_torrc_backup, 'w').write(torrc)\n+ # save a backup\n+ open(path_torrc_backup, 'w').write(torrc)\n \n- # append the additions\n- open(path_torrc, 'w').write(torrc + torrc_additions)\n+# append the additions\n+open(path_torrc, 'w').write(torrc + torrc_additions)\n \n- # reload tor\n- subprocess.call(['/usr/sbin/service', 'tor', 'reload'])\n+# reload tor\n+try:\n+ subprocess.check_call(['systemctl', 'reload', '[email protected]'])\n+except subprocess.CalledProcessError:\n+ sys.exit('Error reloading Tor')\n \n- # success\n- subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',\n- 'Updated torrc!', 'You can now connect to your SecureDrop\\ndocument interface.'])\n+# notify the user\n+subprocess.call(['tails-notify-user',\n+ 'SecureDrop successfully auto-configured!',\n+ 'You can now access the Document Interface.\\nIf you are an admin, you can now SSH to the servers.'])\n", "issue": "NetworkManager hook notifications broken on Tails 2.x\nThe invocation of `notify-send` in `securedrop_init.py` does not show a notification in Tails 2.x like it did in Tails 1.x. This is due to dbus-related changes in Debian Jessie, and is a known issue as a quick [search](https://labs.riseup.net/code/projects/tails/search?utf8=%E2%9C%93&changesets=1&q=notify-send) of the Tails issue tracker demonstrates.\n\nFurthermore, it looks like Tails has a special wrapper script, `tails-notify-user`, specifically meant for the use case of displaying notifications to the user from background scripts running as different users, so we should just use that instead.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport subprocess\n\n\nif __name__ == '__main__':\n # check for root\n if os.geteuid() != 0:\n sys.exit('You need to run this as root')\n\n # paths\n path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\n path_torrc_backup = '/etc/tor/torrc.bak'\n path_torrc = '/etc/tor/torrc'\n\n # load torrc_additions\n if os.path.isfile(path_torrc_additions):\n torrc_additions = open(path_torrc_additions).read()\n else:\n sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n\n # load torrc\n if os.path.isfile(path_torrc_backup):\n torrc = open(path_torrc_backup).read()\n else:\n if os.path.isfile(path_torrc):\n torrc = open(path_torrc).read()\n else:\n sys.exit('Error opening {0} for reading'.format(path_torrc))\n\n # save a backup\n open(path_torrc_backup, 'w').write(torrc)\n\n # append the additions\n open(path_torrc, 'w').write(torrc + torrc_additions)\n\n # reload tor\n subprocess.call(['/usr/sbin/service', 'tor', 'reload'])\n\n # success\n subprocess.call(['/usr/bin/sudo', '-u', 'amnesia', '/usr/bin/notify-send', '-i', '/home/amnesia/Persistent/.securedrop/securedrop_icon.png',\n 'Updated torrc!', 'You can now connect to your SecureDrop\\ndocument interface.'])\n", "path": "tails_files/securedrop_init.py"}]} | 1,167 | 842 |
gh_patches_debug_38860 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1745 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] some PDFs raise a NotImplementedError
### Description
Since 1.9.2 (I skipped 1.9 and 1.9.1) account statements of german dkb bank (also ones previously working fine) get stuck in processing.
There are some statements about barcode separation, maybe something broken there, as there is no barcode visible in the PDF.
Unfortunately also "reprinting" the PDF with a tool usually fixing issues for files which cannot be processed in papaerless is not working here.
Unfortunately I cannot share samples as its bank data....
Have barcode separation enabled:
PAPERLESS_CONSUMER_ENABLE_BARCODES=true
PAPERLESS_CONSUMER_BARCODE_STRING=PATCHT
### Steps to reproduce
1. get the document consumed
2. process gets stuck in frontend

3. shows up as queued for quite some time

4. ends up as failed task
### Webserver logs
```bash
Kontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf
HISTORY
Id:
79423d05e71d4a10b2457bf23f91cd60
Name:
Kontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf
Func:
documents.tasks.consume_file
Hook:
-
Args:
/tmp/paperless/paperless-upload-0n2e8jty
Kwargs:
{'override_filename': 'Kontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf', 'override_title': None, 'override_correspondent_id': None, 'override_document_type_id': None, 'override_tag_ids': None, 'task_id': 'dc8d5b5b-5eec-4395-b29a-22f4ee8df01a', 'override_created': None}
Result:
Not sure how to handle PDF image of this type : Traceback (most recent call last):
File "/usr/src/paperless/src/src/django-q/django_q/cluster.py", line 454, in worker
res = f(*task["args"], **task["kwargs"])
File "/usr/src/paperless/src/documents/tasks.py", line 99, in consume_file
pdf_filepath, separators = barcodes.scan_file_for_separating_barcodes(path)
File "/usr/src/paperless/src/documents/barcodes.py", line 126, in scan_file_for_separating_barcodes
pillow_img = pdfimage.as_pil_image()
File "/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py", line 742, in as_pil_image
im = self._extract_transcoded()
File "/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py", line 606, in _extract_transcoded
if self.mode in {'DeviceN', 'Separation'}:
File "/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py", line 301, in mode
raise NotImplementedError(
NotImplementedError: Not sure how to handle PDF image of this type
Group:
-
Started:
Oct. 4, 2022, 5:52 p.m.
Stopped:
Oct. 4, 2022, 5:56 p.m.
```
### Paperless-ngx version
1.9.2
### Host OS
Synology with Docker
### Installation method
Docker - ghcr.io image
webserver:
image: ghcr.io/paperless-ngx/paperless-ngx:latest
### Browser
Chrome
### Configuration changes
barcodes for page separation was enabled, problem is resolved after commenting it out in compose file:
#PAPERLESS_CONSUMER_ENABLE_BARCODES=true
#PAPERLESS_CONSUMER_BARCODE_STRING=PATCHT
### Other
_No response_
</issue>
<code>
[start of src/documents/barcodes.py]
1 import logging
2 import os
3 import shutil
4 import tempfile
5 from functools import lru_cache
6 from typing import List
7 from typing import Optional
8 from typing import Tuple
9
10 import magic
11 from django.conf import settings
12 from pikepdf import Page
13 from pikepdf import Pdf
14 from pikepdf import PdfImage
15 from PIL import Image
16 from PIL import ImageSequence
17 from pyzbar import pyzbar
18
19 logger = logging.getLogger("paperless.barcodes")
20
21
22 @lru_cache(maxsize=8)
23 def supported_file_type(mime_type) -> bool:
24 """
25 Determines if the file is valid for barcode
26 processing, based on MIME type and settings
27
28 :return: True if the file is supported, False otherwise
29 """
30 supported_mime = ["application/pdf"]
31 if settings.CONSUMER_BARCODE_TIFF_SUPPORT:
32 supported_mime += ["image/tiff"]
33
34 return mime_type in supported_mime
35
36
37 def barcode_reader(image: Image) -> List[str]:
38 """
39 Read any barcodes contained in image
40 Returns a list containing all found barcodes
41 """
42 barcodes = []
43 # Decode the barcode image
44 detected_barcodes = pyzbar.decode(image)
45
46 if detected_barcodes:
47 # Traverse through all the detected barcodes in image
48 for barcode in detected_barcodes:
49 if barcode.data:
50 decoded_barcode = barcode.data.decode("utf-8")
51 barcodes.append(decoded_barcode)
52 logger.debug(
53 f"Barcode of type {str(barcode.type)} found: {decoded_barcode}",
54 )
55 return barcodes
56
57
58 def get_file_mime_type(path: str) -> str:
59 """
60 Determines the file type, based on MIME type.
61
62 Returns the MIME type.
63 """
64 mime_type = magic.from_file(path, mime=True)
65 logger.debug(f"Detected mime type: {mime_type}")
66 return mime_type
67
68
69 def convert_from_tiff_to_pdf(filepath: str) -> str:
70 """
71 converts a given TIFF image file to pdf into a temporary directory.
72
73 Returns the new pdf file.
74 """
75 file_name = os.path.splitext(os.path.basename(filepath))[0]
76 mime_type = get_file_mime_type(filepath)
77 tempdir = tempfile.mkdtemp(prefix="paperless-", dir=settings.SCRATCH_DIR)
78 # use old file name with pdf extension
79 if mime_type == "image/tiff":
80 newpath = os.path.join(tempdir, file_name + ".pdf")
81 else:
82 logger.warning(
83 f"Cannot convert mime type {str(mime_type)} from {str(filepath)} to pdf.",
84 )
85 return None
86 with Image.open(filepath) as image:
87 images = []
88 for i, page in enumerate(ImageSequence.Iterator(image)):
89 page = page.convert("RGB")
90 images.append(page)
91 try:
92 if len(images) == 1:
93 images[0].save(newpath)
94 else:
95 images[0].save(newpath, save_all=True, append_images=images[1:])
96 except OSError as e:
97 logger.warning(
98 f"Could not save the file as pdf. Error: {str(e)}",
99 )
100 return None
101 return newpath
102
103
104 def scan_file_for_separating_barcodes(filepath: str) -> Tuple[Optional[str], List[int]]:
105 """
106 Scan the provided pdf file for page separating barcodes
107 Returns a PDF filepath and a list of pagenumbers,
108 which separate the file into new files
109 """
110
111 separator_page_numbers = []
112 pdf_filepath = None
113
114 mime_type = get_file_mime_type(filepath)
115
116 if supported_file_type(mime_type):
117 pdf_filepath = filepath
118 if mime_type == "image/tiff":
119 pdf_filepath = convert_from_tiff_to_pdf(filepath)
120
121 pdf = Pdf.open(pdf_filepath)
122
123 for page_num, page in enumerate(pdf.pages):
124 for image_key in page.images:
125 pdfimage = PdfImage(page.images[image_key])
126 pillow_img = pdfimage.as_pil_image()
127
128 detected_barcodes = barcode_reader(pillow_img)
129
130 if settings.CONSUMER_BARCODE_STRING in detected_barcodes:
131 separator_page_numbers.append(page_num)
132 else:
133 logger.warning(
134 f"Unsupported file format for barcode reader: {str(mime_type)}",
135 )
136 return pdf_filepath, separator_page_numbers
137
138
139 def separate_pages(filepath: str, pages_to_split_on: List[int]) -> List[str]:
140 """
141 Separate the provided pdf file on the pages_to_split_on.
142 The pages which are defined by page_numbers will be removed.
143 Returns a list of (temporary) filepaths to consume.
144 These will need to be deleted later.
145 """
146
147 document_paths = []
148
149 if not pages_to_split_on:
150 logger.warning("No pages to split on!")
151 return document_paths
152
153 os.makedirs(settings.SCRATCH_DIR, exist_ok=True)
154 tempdir = tempfile.mkdtemp(prefix="paperless-", dir=settings.SCRATCH_DIR)
155 fname = os.path.splitext(os.path.basename(filepath))[0]
156 pdf = Pdf.open(filepath)
157
158 # A list of documents, ie a list of lists of pages
159 documents: List[List[Page]] = []
160 # A single document, ie a list of pages
161 document: List[Page] = []
162
163 for idx, page in enumerate(pdf.pages):
164 # Keep building the new PDF as long as it is not a
165 # separator index
166 if idx not in pages_to_split_on:
167 document.append(page)
168 # Make sure to append the very last document to the documents
169 if idx == (len(pdf.pages) - 1):
170 documents.append(document)
171 document = []
172 else:
173 # This is a split index, save the current PDF pages, and restart
174 # a new destination page listing
175 logger.debug(f"Starting new document at idx {idx}")
176 documents.append(document)
177 document = []
178
179 documents = [x for x in documents if len(x)]
180
181 logger.debug(f"Split into {len(documents)} new documents")
182
183 # Write the new documents out
184 for doc_idx, document in enumerate(documents):
185 dst = Pdf.new()
186 dst.pages.extend(document)
187
188 output_filename = f"{fname}_document_{doc_idx}.pdf"
189
190 logger.debug(f"pdf no:{doc_idx} has {len(dst.pages)} pages")
191 savepath = os.path.join(tempdir, output_filename)
192 with open(savepath, "wb") as out:
193 dst.save(out)
194 document_paths.append(savepath)
195
196 return document_paths
197
198
199 def save_to_dir(
200 filepath: str,
201 newname: str = None,
202 target_dir: str = settings.CONSUMPTION_DIR,
203 ):
204 """
205 Copies filepath to target_dir.
206 Optionally rename the file.
207 """
208 if os.path.isfile(filepath) and os.path.isdir(target_dir):
209 dst = shutil.copy(filepath, target_dir)
210 logging.debug(f"saved {str(filepath)} to {str(dst)}")
211 if newname:
212 dst_new = os.path.join(target_dir, newname)
213 logger.debug(f"moving {str(dst)} to {str(dst_new)}")
214 os.rename(dst, dst_new)
215 else:
216 logger.warning(f"{str(filepath)} or {str(target_dir)} don't exist.")
217
[end of src/documents/barcodes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/documents/barcodes.py b/src/documents/barcodes.py
--- a/src/documents/barcodes.py
+++ b/src/documents/barcodes.py
@@ -9,6 +9,7 @@
import magic
from django.conf import settings
+from pdf2image import convert_from_path
from pikepdf import Page
from pikepdf import Pdf
from pikepdf import PdfImage
@@ -19,6 +20,10 @@
logger = logging.getLogger("paperless.barcodes")
+class BarcodeImageFormatError(Exception):
+ pass
+
+
@lru_cache(maxsize=8)
def supported_file_type(mime_type) -> bool:
"""
@@ -108,6 +113,33 @@
which separate the file into new files
"""
+ def _pikepdf_barcode_scan(pdf_filepath: str):
+ with Pdf.open(pdf_filepath) as pdf:
+ for page_num, page in enumerate(pdf.pages):
+ for image_key in page.images:
+ pdfimage = PdfImage(page.images[image_key])
+
+ if "/CCITTFaxDecode" in pdfimage.filters:
+ raise BarcodeImageFormatError()
+
+ # Not all images can be transcoded to a PIL image, which
+ # is what pyzbar expects to receive
+ pillow_img = pdfimage.as_pil_image()
+
+ detected_barcodes = barcode_reader(pillow_img)
+
+ if settings.CONSUMER_BARCODE_STRING in detected_barcodes:
+ separator_page_numbers.append(page_num)
+
+ def _pdf2image_barcode_scan(pdf_filepath: str):
+ # use a temporary directory in case the file os too big to handle in memory
+ with tempfile.TemporaryDirectory() as path:
+ pages_from_path = convert_from_path(pdf_filepath, output_folder=path)
+ for current_page_number, page in enumerate(pages_from_path):
+ current_barcodes = barcode_reader(page)
+ if settings.CONSUMER_BARCODE_STRING in current_barcodes:
+ separator_page_numbers.append(current_page_number)
+
separator_page_numbers = []
pdf_filepath = None
@@ -118,17 +150,17 @@
if mime_type == "image/tiff":
pdf_filepath = convert_from_tiff_to_pdf(filepath)
- pdf = Pdf.open(pdf_filepath)
-
- for page_num, page in enumerate(pdf.pages):
- for image_key in page.images:
- pdfimage = PdfImage(page.images[image_key])
- pillow_img = pdfimage.as_pil_image()
+ try:
+ _pikepdf_barcode_scan(pdf_filepath)
+ except Exception as e:
- detected_barcodes = barcode_reader(pillow_img)
+ logger.warning(
+ f"Exception using pikepdf for barcodes, falling back to pdf2image: {e}",
+ )
+ # Reset this incase pikepdf got part way through
+ separator_page_numbers = []
+ _pdf2image_barcode_scan(pdf_filepath)
- if settings.CONSUMER_BARCODE_STRING in detected_barcodes:
- separator_page_numbers.append(page_num)
else:
logger.warning(
f"Unsupported file format for barcode reader: {str(mime_type)}",
| {"golden_diff": "diff --git a/src/documents/barcodes.py b/src/documents/barcodes.py\n--- a/src/documents/barcodes.py\n+++ b/src/documents/barcodes.py\n@@ -9,6 +9,7 @@\n \n import magic\n from django.conf import settings\n+from pdf2image import convert_from_path\n from pikepdf import Page\n from pikepdf import Pdf\n from pikepdf import PdfImage\n@@ -19,6 +20,10 @@\n logger = logging.getLogger(\"paperless.barcodes\")\n \n \n+class BarcodeImageFormatError(Exception):\n+ pass\n+\n+\n @lru_cache(maxsize=8)\n def supported_file_type(mime_type) -> bool:\n \"\"\"\n@@ -108,6 +113,33 @@\n which separate the file into new files\n \"\"\"\n \n+ def _pikepdf_barcode_scan(pdf_filepath: str):\n+ with Pdf.open(pdf_filepath) as pdf:\n+ for page_num, page in enumerate(pdf.pages):\n+ for image_key in page.images:\n+ pdfimage = PdfImage(page.images[image_key])\n+\n+ if \"/CCITTFaxDecode\" in pdfimage.filters:\n+ raise BarcodeImageFormatError()\n+\n+ # Not all images can be transcoded to a PIL image, which\n+ # is what pyzbar expects to receive\n+ pillow_img = pdfimage.as_pil_image()\n+\n+ detected_barcodes = barcode_reader(pillow_img)\n+\n+ if settings.CONSUMER_BARCODE_STRING in detected_barcodes:\n+ separator_page_numbers.append(page_num)\n+\n+ def _pdf2image_barcode_scan(pdf_filepath: str):\n+ # use a temporary directory in case the file os too big to handle in memory\n+ with tempfile.TemporaryDirectory() as path:\n+ pages_from_path = convert_from_path(pdf_filepath, output_folder=path)\n+ for current_page_number, page in enumerate(pages_from_path):\n+ current_barcodes = barcode_reader(page)\n+ if settings.CONSUMER_BARCODE_STRING in current_barcodes:\n+ separator_page_numbers.append(current_page_number)\n+\n separator_page_numbers = []\n pdf_filepath = None\n \n@@ -118,17 +150,17 @@\n if mime_type == \"image/tiff\":\n pdf_filepath = convert_from_tiff_to_pdf(filepath)\n \n- pdf = Pdf.open(pdf_filepath)\n-\n- for page_num, page in enumerate(pdf.pages):\n- for image_key in page.images:\n- pdfimage = PdfImage(page.images[image_key])\n- pillow_img = pdfimage.as_pil_image()\n+ try:\n+ _pikepdf_barcode_scan(pdf_filepath)\n+ except Exception as e:\n \n- detected_barcodes = barcode_reader(pillow_img)\n+ logger.warning(\n+ f\"Exception using pikepdf for barcodes, falling back to pdf2image: {e}\",\n+ )\n+ # Reset this incase pikepdf got part way through\n+ separator_page_numbers = []\n+ _pdf2image_barcode_scan(pdf_filepath)\n \n- if settings.CONSUMER_BARCODE_STRING in detected_barcodes:\n- separator_page_numbers.append(page_num)\n else:\n logger.warning(\n f\"Unsupported file format for barcode reader: {str(mime_type)}\",\n", "issue": "[BUG] some PDFs raise a NotImplementedError\n### Description\r\n\r\nSince 1.9.2 (I skipped 1.9 and 1.9.1) account statements of german dkb bank (also ones previously working fine) get stuck in processing.\r\nThere are some statements about barcode separation, maybe something broken there, as there is no barcode visible in the PDF.\r\nUnfortunately also \"reprinting\" the PDF with a tool usually fixing issues for files which cannot be processed in papaerless is not working here.\r\nUnfortunately I cannot share samples as its bank data....\r\n\r\nHave barcode separation enabled:\r\nPAPERLESS_CONSUMER_ENABLE_BARCODES=true\r\nPAPERLESS_CONSUMER_BARCODE_STRING=PATCHT\r\n\r\n### Steps to reproduce\r\n\r\n1. get the document consumed\r\n2. process gets stuck in frontend\r\n\r\n3. shows up as queued for quite some time\r\n\r\n4. ends up as failed task\r\n\r\n### Webserver logs\r\n\r\n```bash\r\nKontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf\r\nHISTORY\r\n\r\nId:\r\n79423d05e71d4a10b2457bf23f91cd60\r\nName:\r\nKontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf\r\nFunc:\r\ndocuments.tasks.consume_file\r\nHook:\r\n-\r\nArgs:\r\n/tmp/paperless/paperless-upload-0n2e8jty\r\nKwargs:\r\n{'override_filename': 'Kontoauszug_1023735317_Nr_2022_003_per_2022_08_01.pdf', 'override_title': None, 'override_correspondent_id': None, 'override_document_type_id': None, 'override_tag_ids': None, 'task_id': 'dc8d5b5b-5eec-4395-b29a-22f4ee8df01a', 'override_created': None}\r\nResult:\r\nNot sure how to handle PDF image of this type : Traceback (most recent call last):\r\nFile \"/usr/src/paperless/src/src/django-q/django_q/cluster.py\", line 454, in worker\r\nres = f(*task[\"args\"], **task[\"kwargs\"])\r\nFile \"/usr/src/paperless/src/documents/tasks.py\", line 99, in consume_file\r\npdf_filepath, separators = barcodes.scan_file_for_separating_barcodes(path)\r\nFile \"/usr/src/paperless/src/documents/barcodes.py\", line 126, in scan_file_for_separating_barcodes\r\npillow_img = pdfimage.as_pil_image()\r\nFile \"/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py\", line 742, in as_pil_image\r\nim = self._extract_transcoded()\r\nFile \"/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py\", line 606, in _extract_transcoded\r\nif self.mode in {'DeviceN', 'Separation'}:\r\nFile \"/usr/local/lib/python3.9/site-packages/pikepdf/models/image.py\", line 301, in mode\r\nraise NotImplementedError(\r\nNotImplementedError: Not sure how to handle PDF image of this type\r\nGroup:\r\n-\r\nStarted:\r\nOct. 4, 2022, 5:52 p.m.\r\nStopped:\r\nOct. 4, 2022, 5:56 p.m.\r\n```\r\n\r\n\r\n### Paperless-ngx version\r\n\r\n1.9.2\r\n\r\n### Host OS\r\n\r\nSynology with Docker\r\n\r\n### Installation method\r\n\r\nDocker - ghcr.io image\r\nwebserver:\r\n image: ghcr.io/paperless-ngx/paperless-ngx:latest\r\n\r\n### Browser\r\n\r\nChrome\r\n\r\n### Configuration changes\r\n\r\nbarcodes for page separation was enabled, problem is resolved after commenting it out in compose file:\r\n#PAPERLESS_CONSUMER_ENABLE_BARCODES=true\r\n#PAPERLESS_CONSUMER_BARCODE_STRING=PATCHT\r\n\r\n### Other\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\nimport os\nimport shutil\nimport tempfile\nfrom functools import lru_cache\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport magic\nfrom django.conf import settings\nfrom pikepdf import Page\nfrom pikepdf import Pdf\nfrom pikepdf import PdfImage\nfrom PIL import Image\nfrom PIL import ImageSequence\nfrom pyzbar import pyzbar\n\nlogger = logging.getLogger(\"paperless.barcodes\")\n\n\n@lru_cache(maxsize=8)\ndef supported_file_type(mime_type) -> bool:\n \"\"\"\n Determines if the file is valid for barcode\n processing, based on MIME type and settings\n\n :return: True if the file is supported, False otherwise\n \"\"\"\n supported_mime = [\"application/pdf\"]\n if settings.CONSUMER_BARCODE_TIFF_SUPPORT:\n supported_mime += [\"image/tiff\"]\n\n return mime_type in supported_mime\n\n\ndef barcode_reader(image: Image) -> List[str]:\n \"\"\"\n Read any barcodes contained in image\n Returns a list containing all found barcodes\n \"\"\"\n barcodes = []\n # Decode the barcode image\n detected_barcodes = pyzbar.decode(image)\n\n if detected_barcodes:\n # Traverse through all the detected barcodes in image\n for barcode in detected_barcodes:\n if barcode.data:\n decoded_barcode = barcode.data.decode(\"utf-8\")\n barcodes.append(decoded_barcode)\n logger.debug(\n f\"Barcode of type {str(barcode.type)} found: {decoded_barcode}\",\n )\n return barcodes\n\n\ndef get_file_mime_type(path: str) -> str:\n \"\"\"\n Determines the file type, based on MIME type.\n\n Returns the MIME type.\n \"\"\"\n mime_type = magic.from_file(path, mime=True)\n logger.debug(f\"Detected mime type: {mime_type}\")\n return mime_type\n\n\ndef convert_from_tiff_to_pdf(filepath: str) -> str:\n \"\"\"\n converts a given TIFF image file to pdf into a temporary directory.\n\n Returns the new pdf file.\n \"\"\"\n file_name = os.path.splitext(os.path.basename(filepath))[0]\n mime_type = get_file_mime_type(filepath)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n # use old file name with pdf extension\n if mime_type == \"image/tiff\":\n newpath = os.path.join(tempdir, file_name + \".pdf\")\n else:\n logger.warning(\n f\"Cannot convert mime type {str(mime_type)} from {str(filepath)} to pdf.\",\n )\n return None\n with Image.open(filepath) as image:\n images = []\n for i, page in enumerate(ImageSequence.Iterator(image)):\n page = page.convert(\"RGB\")\n images.append(page)\n try:\n if len(images) == 1:\n images[0].save(newpath)\n else:\n images[0].save(newpath, save_all=True, append_images=images[1:])\n except OSError as e:\n logger.warning(\n f\"Could not save the file as pdf. Error: {str(e)}\",\n )\n return None\n return newpath\n\n\ndef scan_file_for_separating_barcodes(filepath: str) -> Tuple[Optional[str], List[int]]:\n \"\"\"\n Scan the provided pdf file for page separating barcodes\n Returns a PDF filepath and a list of pagenumbers,\n which separate the file into new files\n \"\"\"\n\n separator_page_numbers = []\n pdf_filepath = None\n\n mime_type = get_file_mime_type(filepath)\n\n if supported_file_type(mime_type):\n pdf_filepath = filepath\n if mime_type == \"image/tiff\":\n pdf_filepath = convert_from_tiff_to_pdf(filepath)\n\n pdf = Pdf.open(pdf_filepath)\n\n for page_num, page in enumerate(pdf.pages):\n for image_key in page.images:\n pdfimage = PdfImage(page.images[image_key])\n pillow_img = pdfimage.as_pil_image()\n\n detected_barcodes = barcode_reader(pillow_img)\n\n if settings.CONSUMER_BARCODE_STRING in detected_barcodes:\n separator_page_numbers.append(page_num)\n else:\n logger.warning(\n f\"Unsupported file format for barcode reader: {str(mime_type)}\",\n )\n return pdf_filepath, separator_page_numbers\n\n\ndef separate_pages(filepath: str, pages_to_split_on: List[int]) -> List[str]:\n \"\"\"\n Separate the provided pdf file on the pages_to_split_on.\n The pages which are defined by page_numbers will be removed.\n Returns a list of (temporary) filepaths to consume.\n These will need to be deleted later.\n \"\"\"\n\n document_paths = []\n\n if not pages_to_split_on:\n logger.warning(\"No pages to split on!\")\n return document_paths\n\n os.makedirs(settings.SCRATCH_DIR, exist_ok=True)\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n fname = os.path.splitext(os.path.basename(filepath))[0]\n pdf = Pdf.open(filepath)\n\n # A list of documents, ie a list of lists of pages\n documents: List[List[Page]] = []\n # A single document, ie a list of pages\n document: List[Page] = []\n\n for idx, page in enumerate(pdf.pages):\n # Keep building the new PDF as long as it is not a\n # separator index\n if idx not in pages_to_split_on:\n document.append(page)\n # Make sure to append the very last document to the documents\n if idx == (len(pdf.pages) - 1):\n documents.append(document)\n document = []\n else:\n # This is a split index, save the current PDF pages, and restart\n # a new destination page listing\n logger.debug(f\"Starting new document at idx {idx}\")\n documents.append(document)\n document = []\n\n documents = [x for x in documents if len(x)]\n\n logger.debug(f\"Split into {len(documents)} new documents\")\n\n # Write the new documents out\n for doc_idx, document in enumerate(documents):\n dst = Pdf.new()\n dst.pages.extend(document)\n\n output_filename = f\"{fname}_document_{doc_idx}.pdf\"\n\n logger.debug(f\"pdf no:{doc_idx} has {len(dst.pages)} pages\")\n savepath = os.path.join(tempdir, output_filename)\n with open(savepath, \"wb\") as out:\n dst.save(out)\n document_paths.append(savepath)\n\n return document_paths\n\n\ndef save_to_dir(\n filepath: str,\n newname: str = None,\n target_dir: str = settings.CONSUMPTION_DIR,\n):\n \"\"\"\n Copies filepath to target_dir.\n Optionally rename the file.\n \"\"\"\n if os.path.isfile(filepath) and os.path.isdir(target_dir):\n dst = shutil.copy(filepath, target_dir)\n logging.debug(f\"saved {str(filepath)} to {str(dst)}\")\n if newname:\n dst_new = os.path.join(target_dir, newname)\n logger.debug(f\"moving {str(dst)} to {str(dst_new)}\")\n os.rename(dst, dst_new)\n else:\n logger.warning(f\"{str(filepath)} or {str(target_dir)} don't exist.\")\n", "path": "src/documents/barcodes.py"}]} | 3,658 | 704 |
gh_patches_debug_56077 | rasdani/github-patches | git_diff | pypa__pip-8124 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'pip cache info' fails when no-cache-dir set
pip version: pip 20.1b1
Python version: CPython 3.8.1
OS: Win 10 64
Testing 20.1 beta, execute 'pip cache info' and crashes. I'm guessing it's due to pip.ini turning off caching.
pip.ini:
```
[global]
no-cache-dir = false
```
Command execution:
```
> pip cache info
ERROR: Exception:
Traceback (most recent call last):
File "c:\program files\python38\lib\site-packages\pip\_internal\cli\base_command.py", line 188, in _main
status = self.run(options, args)
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 62, in run
handlers[action](options, args[1:])
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 74, in get_cache_info
num_packages = len(self._find_wheels(options, '*'))
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 145, in _find_wheels
wheel_dir = self._wheels_cache_dir(options)
File "c:\program files\python38\lib\site-packages\pip\_internal\commands\cache.py", line 141, in _wheels_cache_dir
return os.path.join(options.cache_dir, 'wheels')
File "c:\program files\python38\lib\ntpath.py", line 78, in join
path = os.fspath(path)
TypeError: expected str, bytes or os.PathLike object, not bool
```
</issue>
<code>
[start of src/pip/_internal/commands/cache.py]
1 from __future__ import absolute_import
2
3 import logging
4 import os
5 import textwrap
6
7 import pip._internal.utils.filesystem as filesystem
8 from pip._internal.cli.base_command import Command
9 from pip._internal.cli.status_codes import ERROR, SUCCESS
10 from pip._internal.exceptions import CommandError, PipError
11 from pip._internal.utils.typing import MYPY_CHECK_RUNNING
12
13 if MYPY_CHECK_RUNNING:
14 from optparse import Values
15 from typing import Any, List
16
17
18 logger = logging.getLogger(__name__)
19
20
21 class CacheCommand(Command):
22 """
23 Inspect and manage pip's wheel cache.
24
25 Subcommands:
26
27 info: Show information about the cache.
28 list: List filenames of packages stored in the cache.
29 remove: Remove one or more package from the cache.
30 purge: Remove all items from the cache.
31
32 <pattern> can be a glob expression or a package name.
33 """
34
35 usage = """
36 %prog info
37 %prog list [<pattern>]
38 %prog remove <pattern>
39 %prog purge
40 """
41
42 def run(self, options, args):
43 # type: (Values, List[Any]) -> int
44 handlers = {
45 "info": self.get_cache_info,
46 "list": self.list_cache_items,
47 "remove": self.remove_cache_items,
48 "purge": self.purge_cache,
49 }
50
51 # Determine action
52 if not args or args[0] not in handlers:
53 logger.error("Need an action ({}) to perform.".format(
54 ", ".join(sorted(handlers)))
55 )
56 return ERROR
57
58 action = args[0]
59
60 # Error handling happens here, not in the action-handlers.
61 try:
62 handlers[action](options, args[1:])
63 except PipError as e:
64 logger.error(e.args[0])
65 return ERROR
66
67 return SUCCESS
68
69 def get_cache_info(self, options, args):
70 # type: (Values, List[Any]) -> None
71 if args:
72 raise CommandError('Too many arguments')
73
74 num_packages = len(self._find_wheels(options, '*'))
75
76 cache_location = self._wheels_cache_dir(options)
77 cache_size = filesystem.format_directory_size(cache_location)
78
79 message = textwrap.dedent("""
80 Location: {location}
81 Size: {size}
82 Number of wheels: {package_count}
83 """).format(
84 location=cache_location,
85 package_count=num_packages,
86 size=cache_size,
87 ).strip()
88
89 logger.info(message)
90
91 def list_cache_items(self, options, args):
92 # type: (Values, List[Any]) -> None
93 if len(args) > 1:
94 raise CommandError('Too many arguments')
95
96 if args:
97 pattern = args[0]
98 else:
99 pattern = '*'
100
101 files = self._find_wheels(options, pattern)
102
103 if not files:
104 logger.info('Nothing cached.')
105 return
106
107 results = []
108 for filename in files:
109 wheel = os.path.basename(filename)
110 size = filesystem.format_file_size(filename)
111 results.append(' - {} ({})'.format(wheel, size))
112 logger.info('Cache contents:\n')
113 logger.info('\n'.join(sorted(results)))
114
115 def remove_cache_items(self, options, args):
116 # type: (Values, List[Any]) -> None
117 if len(args) > 1:
118 raise CommandError('Too many arguments')
119
120 if not args:
121 raise CommandError('Please provide a pattern')
122
123 files = self._find_wheels(options, args[0])
124 if not files:
125 raise CommandError('No matching packages')
126
127 for filename in files:
128 os.unlink(filename)
129 logger.debug('Removed %s', filename)
130 logger.info('Files removed: %s', len(files))
131
132 def purge_cache(self, options, args):
133 # type: (Values, List[Any]) -> None
134 if args:
135 raise CommandError('Too many arguments')
136
137 return self.remove_cache_items(options, ['*'])
138
139 def _wheels_cache_dir(self, options):
140 # type: (Values) -> str
141 return os.path.join(options.cache_dir, 'wheels')
142
143 def _find_wheels(self, options, pattern):
144 # type: (Values, str) -> List[str]
145 wheel_dir = self._wheels_cache_dir(options)
146
147 # The wheel filename format, as specified in PEP 427, is:
148 # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
149 #
150 # Additionally, non-alphanumeric values in the distribution are
151 # normalized to underscores (_), meaning hyphens can never occur
152 # before `-{version}`.
153 #
154 # Given that information:
155 # - If the pattern we're given contains a hyphen (-), the user is
156 # providing at least the version. Thus, we can just append `*.whl`
157 # to match the rest of it.
158 # - If the pattern we're given doesn't contain a hyphen (-), the
159 # user is only providing the name. Thus, we append `-*.whl` to
160 # match the hyphen before the version, followed by anything else.
161 #
162 # PEP 427: https://www.python.org/dev/peps/pep-0427/
163 pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
164
165 return filesystem.find_files(wheel_dir, pattern)
166
[end of src/pip/_internal/commands/cache.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/commands/cache.py b/src/pip/_internal/commands/cache.py
--- a/src/pip/_internal/commands/cache.py
+++ b/src/pip/_internal/commands/cache.py
@@ -48,6 +48,11 @@
"purge": self.purge_cache,
}
+ if not options.cache_dir:
+ logger.error("pip cache commands can not "
+ "function since cache is disabled.")
+ return ERROR
+
# Determine action
if not args or args[0] not in handlers:
logger.error("Need an action ({}) to perform.".format(
| {"golden_diff": "diff --git a/src/pip/_internal/commands/cache.py b/src/pip/_internal/commands/cache.py\n--- a/src/pip/_internal/commands/cache.py\n+++ b/src/pip/_internal/commands/cache.py\n@@ -48,6 +48,11 @@\n \"purge\": self.purge_cache,\n }\n \n+ if not options.cache_dir:\n+ logger.error(\"pip cache commands can not \"\n+ \"function since cache is disabled.\")\n+ return ERROR\n+\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n", "issue": "'pip cache info' fails when no-cache-dir set\npip version: pip 20.1b1\r\nPython version: CPython 3.8.1\r\nOS: Win 10 64\r\n\r\nTesting 20.1 beta, execute 'pip cache info' and crashes. I'm guessing it's due to pip.ini turning off caching.\r\n\r\npip.ini:\r\n```\r\n[global]\r\nno-cache-dir = false\r\n```\r\n\r\nCommand execution:\r\n```\r\n> pip cache info\r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\cli\\base_command.py\", line 188, in _main\r\n status = self.run(options, args)\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 62, in run\r\n handlers[action](options, args[1:])\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 74, in get_cache_info\r\n num_packages = len(self._find_wheels(options, '*'))\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 145, in _find_wheels\r\n wheel_dir = self._wheels_cache_dir(options)\r\n File \"c:\\program files\\python38\\lib\\site-packages\\pip\\_internal\\commands\\cache.py\", line 141, in _wheels_cache_dir\r\n return os.path.join(options.cache_dir, 'wheels')\r\n File \"c:\\program files\\python38\\lib\\ntpath.py\", line 78, in join\r\n path = os.fspath(path)\r\nTypeError: expected str, bytes or os.PathLike object, not bool\r\n```\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport os\nimport textwrap\n\nimport pip._internal.utils.filesystem as filesystem\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import ERROR, SUCCESS\nfrom pip._internal.exceptions import CommandError, PipError\nfrom pip._internal.utils.typing import MYPY_CHECK_RUNNING\n\nif MYPY_CHECK_RUNNING:\n from optparse import Values\n from typing import Any, List\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CacheCommand(Command):\n \"\"\"\n Inspect and manage pip's wheel cache.\n\n Subcommands:\n\n info: Show information about the cache.\n list: List filenames of packages stored in the cache.\n remove: Remove one or more package from the cache.\n purge: Remove all items from the cache.\n\n <pattern> can be a glob expression or a package name.\n \"\"\"\n\n usage = \"\"\"\n %prog info\n %prog list [<pattern>]\n %prog remove <pattern>\n %prog purge\n \"\"\"\n\n def run(self, options, args):\n # type: (Values, List[Any]) -> int\n handlers = {\n \"info\": self.get_cache_info,\n \"list\": self.list_cache_items,\n \"remove\": self.remove_cache_items,\n \"purge\": self.purge_cache,\n }\n\n # Determine action\n if not args or args[0] not in handlers:\n logger.error(\"Need an action ({}) to perform.\".format(\n \", \".join(sorted(handlers)))\n )\n return ERROR\n\n action = args[0]\n\n # Error handling happens here, not in the action-handlers.\n try:\n handlers[action](options, args[1:])\n except PipError as e:\n logger.error(e.args[0])\n return ERROR\n\n return SUCCESS\n\n def get_cache_info(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n num_packages = len(self._find_wheels(options, '*'))\n\n cache_location = self._wheels_cache_dir(options)\n cache_size = filesystem.format_directory_size(cache_location)\n\n message = textwrap.dedent(\"\"\"\n Location: {location}\n Size: {size}\n Number of wheels: {package_count}\n \"\"\").format(\n location=cache_location,\n package_count=num_packages,\n size=cache_size,\n ).strip()\n\n logger.info(message)\n\n def list_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if args:\n pattern = args[0]\n else:\n pattern = '*'\n\n files = self._find_wheels(options, pattern)\n\n if not files:\n logger.info('Nothing cached.')\n return\n\n results = []\n for filename in files:\n wheel = os.path.basename(filename)\n size = filesystem.format_file_size(filename)\n results.append(' - {} ({})'.format(wheel, size))\n logger.info('Cache contents:\\n')\n logger.info('\\n'.join(sorted(results)))\n\n def remove_cache_items(self, options, args):\n # type: (Values, List[Any]) -> None\n if len(args) > 1:\n raise CommandError('Too many arguments')\n\n if not args:\n raise CommandError('Please provide a pattern')\n\n files = self._find_wheels(options, args[0])\n if not files:\n raise CommandError('No matching packages')\n\n for filename in files:\n os.unlink(filename)\n logger.debug('Removed %s', filename)\n logger.info('Files removed: %s', len(files))\n\n def purge_cache(self, options, args):\n # type: (Values, List[Any]) -> None\n if args:\n raise CommandError('Too many arguments')\n\n return self.remove_cache_items(options, ['*'])\n\n def _wheels_cache_dir(self, options):\n # type: (Values) -> str\n return os.path.join(options.cache_dir, 'wheels')\n\n def _find_wheels(self, options, pattern):\n # type: (Values, str) -> List[str]\n wheel_dir = self._wheels_cache_dir(options)\n\n # The wheel filename format, as specified in PEP 427, is:\n # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl\n #\n # Additionally, non-alphanumeric values in the distribution are\n # normalized to underscores (_), meaning hyphens can never occur\n # before `-{version}`.\n #\n # Given that information:\n # - If the pattern we're given contains a hyphen (-), the user is\n # providing at least the version. Thus, we can just append `*.whl`\n # to match the rest of it.\n # - If the pattern we're given doesn't contain a hyphen (-), the\n # user is only providing the name. Thus, we append `-*.whl` to\n # match the hyphen before the version, followed by anything else.\n #\n # PEP 427: https://www.python.org/dev/peps/pep-0427/\n pattern = pattern + (\"*.whl\" if \"-\" in pattern else \"-*.whl\")\n\n return filesystem.find_files(wheel_dir, pattern)\n", "path": "src/pip/_internal/commands/cache.py"}]} | 2,537 | 140 |
gh_patches_debug_39865 | rasdani/github-patches | git_diff | streamlink__streamlink-2586 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New WWE Network Plugin stopped working
## Plugin Issue
- [x ] This is a plugin issue and I have read the contribution guidelines.
### Description
Presumably due to updates to the website (I know they have been working on fixing some issues with the new site), the WWE Network plugin is no longer able to find streams.
### Reproduction steps / Explicit stream URLs to test
<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->
1. Install latest streamlink nightly
2. Try using any WWE Network link (i.e. https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316)
### Log output
streamlink https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316 best --loglevel debug
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.6.6
[cli][debug] Streamlink: 1.1.1+74.g0f011ae
[cli][debug] Requests(2.22.0), Socks(1.7.0), Websocket(0.56.0)
[cli][info] Found matching plugin wwenetwork for URL https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316
[cli][debug] Plugin specific arguments:
[cli][debug] --wwenetwork-email= (email)
[cli][debug] --wwenetwork-password= (password)
[plugin.wwenetwork][debug] Attempting login as
[plugin.wwenetwork][debug] API request: POST https://dce-frontoffice.imggaming.com/api/v2/login
[plugin.wwenetwork][debug] Searching for content ID
[plugin.wwenetwork][debug] Loading page config
error: No playable streams found on this URL: https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316
### Additional comments, screenshots, etc.
There had been audio sync problems with the network, although I believe this was across all sources and not a streamlink issue, and their solution for this issue might have caused the plugin to break.
</issue>
<code>
[start of src/streamlink/plugins/wwenetwork.py]
1 from __future__ import print_function
2
3 import json
4 import logging
5 import re
6
7 from streamlink import PluginError
8 from streamlink.plugin import Plugin, PluginArguments, PluginArgument
9 from streamlink.plugin.api import useragents
10 from streamlink.stream import HLSStream
11 from streamlink.utils import memoize
12 from streamlink.compat import urlparse, parse_qsl
13 from streamlink.utils.times import seconds_to_hhmmss
14
15 log = logging.getLogger(__name__)
16
17
18 class WWENetwork(Plugin):
19 url_re = re.compile(r"https?://watch.wwe.com/(channel)?")
20 site_config_re = re.compile(r'''">window.__data = (\{.*?\})</script>''')
21 stream_url = "https://dce-frontoffice.imggaming.com/api/v2/stream/{id}"
22 live_url = "https://dce-frontoffice.imggaming.com/api/v2/event/live"
23 login_url = "https://dce-frontoffice.imggaming.com/api/v2/login"
24 API_KEY = "cca51ea0-7837-40df-a055-75eb6347b2e7"
25
26 customer_id = 16
27 arguments = PluginArguments(
28 PluginArgument(
29 "email",
30 required=True,
31 metavar="EMAIL",
32 requires=["password"],
33 help="""
34 The email associated with your WWE Network account,
35 required to access any WWE Network stream.
36 """
37 ),
38 PluginArgument(
39 "password",
40 sensitive=True,
41 metavar="PASSWORD",
42 help="""
43 A WWE Network account password to use with --wwenetwork-email.
44 """
45 )
46 )
47
48 def __init__(self, url):
49 super(WWENetwork, self).__init__(url)
50 self.session.http.headers.update({"User-Agent": useragents.CHROME})
51 self.auth_token = None
52
53 @classmethod
54 def can_handle_url(cls, url):
55 return cls.url_re.match(url) is not None
56
57 def get_title(self):
58 if self.page_config:
59 for page in self.page_config["cache"]["page"].values():
60 return page['item']['title']
61
62 def request(self, method, url, **kwargs):
63 headers = kwargs.pop("headers", {})
64 headers.update({"x-api-key": self.API_KEY,
65 "Origin": "https://watch.wwe.com",
66 "Referer": "https://watch.wwe.com/signin",
67 "Accept": "application/json",
68 "Realm": "dce.wwe"})
69 if self.auth_token:
70 headers["Authorization"] = "Bearer {0}".format(self.auth_token)
71
72 kwargs["raise_for_status"] = False
73 log.debug("API request: {0} {1}".format(method, url))
74 res = self.session.http.request(method, url, headers=headers, **kwargs)
75 data = self.session.http.json(res)
76
77 if "status" in data and data["status"] != 200:
78 log.debug("API request failed: {0}:{1} ({2})".format(data["status"], data.get("code"), "; ".join(data.get("messages", []))))
79 return data
80
81 def login(self, email, password):
82 self.logger.debug("Attempting login as {0}", email)
83 # sets some required cookies to login
84 data = self.request('POST', self.login_url,
85 data=json.dumps({"id": email, "secret": password}),
86 headers={"Content-Type": "application/json"})
87 if "authorisationToken" in data:
88 self.auth_token = data["authorisationToken"]
89
90 return self.auth_token
91
92 @property
93 @memoize
94 def page_config(self):
95 log.debug("Loading page config")
96 res = self.session.http.get(self.url)
97 m = self.site_config_re.search(res.text)
98 return m and json.loads(m.group(1))
99
100 def _get_media_info(self, content_id):
101 """
102 Get the info about the content, based on the ID
103 :param content_id: contentId for the video
104 :return:
105 """
106 info = self.request('GET', self.stream_url.format(id=content_id))
107 return self.request('GET', info.get("playerUrlCallback"))
108
109 def _get_video_id(self):
110 # check the page to find the contentId
111 log.debug("Searching for content ID")
112 if self.page_config:
113 for page in self.page_config["cache"]["page"].values():
114 try:
115 if page['item']['type'] == "channel":
116 return self._get_live_id()
117 else:
118 return "vod/{id}".format(id=page['item']['customFields']['DiceVideoId'])
119 except KeyError:
120 log.error("Could not find video ID")
121 return
122
123 def _get_live_id(self):
124 log.debug("Loading live event")
125 res = self.request('GET', self.live_url)
126 for event in res.get('events', []):
127 return "event/{sportId}/{propertyId}/{tournamentId}/{id}".format(**event)
128
129 def _get_streams(self):
130 if not self.login(self.get_option("email"), self.get_option("password")):
131 raise PluginError("Login failed")
132
133 try:
134 start_point = int(float(dict(parse_qsl(urlparse(self.url).query)).get("startPoint", 0.0)))
135 if start_point > 0:
136 log.info("Stream will start at {0}".format(seconds_to_hhmmss(start_point)))
137 except ValueError:
138 start_point = 0
139
140 content_id = self._get_video_id()
141
142 if content_id:
143 self.logger.debug("Found content ID: {0}", content_id)
144 info = self._get_media_info(content_id)
145 if info.get("hlsUrl"):
146 for s in HLSStream.parse_variant_playlist(self.session, info["hlsUrl"], start_offset=start_point).items():
147 yield s
148 else:
149 log.error("Could not find the HLS URL")
150
151
152 __plugin__ = WWENetwork
153
[end of src/streamlink/plugins/wwenetwork.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/wwenetwork.py b/src/streamlink/plugins/wwenetwork.py
--- a/src/streamlink/plugins/wwenetwork.py
+++ b/src/streamlink/plugins/wwenetwork.py
@@ -21,6 +21,7 @@
stream_url = "https://dce-frontoffice.imggaming.com/api/v2/stream/{id}"
live_url = "https://dce-frontoffice.imggaming.com/api/v2/event/live"
login_url = "https://dce-frontoffice.imggaming.com/api/v2/login"
+ page_config_url = "https://cdn.watch.wwe.com/api/page"
API_KEY = "cca51ea0-7837-40df-a055-75eb6347b2e7"
customer_id = 16
@@ -55,9 +56,7 @@
return cls.url_re.match(url) is not None
def get_title(self):
- if self.page_config:
- for page in self.page_config["cache"]["page"].values():
- return page['item']['title']
+ return self.item_config['title']
def request(self, method, url, **kwargs):
headers = kwargs.pop("headers", {})
@@ -91,11 +90,22 @@
@property
@memoize
- def page_config(self):
+ def item_config(self):
log.debug("Loading page config")
- res = self.session.http.get(self.url)
- m = self.site_config_re.search(res.text)
- return m and json.loads(m.group(1))
+ p = urlparse(self.url)
+ res = self.session.http.get(self.page_config_url,
+ params=dict(device="web_browser",
+ ff="idp,ldp",
+ item_detail_expand="all",
+ lang="en-US",
+ list_page_size="1",
+ max_list_prefetch="1",
+ path=p.path,
+ segments="es",
+ sub="Registered",
+ text_entry_format="html"))
+ data = self.session.http.json(res)
+ return data["item"]
def _get_media_info(self, content_id):
"""
@@ -109,16 +119,14 @@
def _get_video_id(self):
# check the page to find the contentId
log.debug("Searching for content ID")
- if self.page_config:
- for page in self.page_config["cache"]["page"].values():
- try:
- if page['item']['type'] == "channel":
- return self._get_live_id()
- else:
- return "vod/{id}".format(id=page['item']['customFields']['DiceVideoId'])
- except KeyError:
- log.error("Could not find video ID")
- return
+ try:
+ if self.item_config['type'] == "channel":
+ return self._get_live_id()
+ else:
+ return "vod/{id}".format(id=self.item_config['customFields']['DiceVideoId'])
+ except KeyError:
+ log.error("Could not find video ID")
+ return
def _get_live_id(self):
log.debug("Loading live event")
| {"golden_diff": "diff --git a/src/streamlink/plugins/wwenetwork.py b/src/streamlink/plugins/wwenetwork.py\n--- a/src/streamlink/plugins/wwenetwork.py\n+++ b/src/streamlink/plugins/wwenetwork.py\n@@ -21,6 +21,7 @@\n stream_url = \"https://dce-frontoffice.imggaming.com/api/v2/stream/{id}\"\n live_url = \"https://dce-frontoffice.imggaming.com/api/v2/event/live\"\n login_url = \"https://dce-frontoffice.imggaming.com/api/v2/login\"\n+ page_config_url = \"https://cdn.watch.wwe.com/api/page\"\n API_KEY = \"cca51ea0-7837-40df-a055-75eb6347b2e7\"\n \n customer_id = 16\n@@ -55,9 +56,7 @@\n return cls.url_re.match(url) is not None\n \n def get_title(self):\n- if self.page_config:\n- for page in self.page_config[\"cache\"][\"page\"].values():\n- return page['item']['title']\n+ return self.item_config['title']\n \n def request(self, method, url, **kwargs):\n headers = kwargs.pop(\"headers\", {})\n@@ -91,11 +90,22 @@\n \n @property\n @memoize\n- def page_config(self):\n+ def item_config(self):\n log.debug(\"Loading page config\")\n- res = self.session.http.get(self.url)\n- m = self.site_config_re.search(res.text)\n- return m and json.loads(m.group(1))\n+ p = urlparse(self.url)\n+ res = self.session.http.get(self.page_config_url,\n+ params=dict(device=\"web_browser\",\n+ ff=\"idp,ldp\",\n+ item_detail_expand=\"all\",\n+ lang=\"en-US\",\n+ list_page_size=\"1\",\n+ max_list_prefetch=\"1\",\n+ path=p.path,\n+ segments=\"es\",\n+ sub=\"Registered\",\n+ text_entry_format=\"html\"))\n+ data = self.session.http.json(res)\n+ return data[\"item\"]\n \n def _get_media_info(self, content_id):\n \"\"\"\n@@ -109,16 +119,14 @@\n def _get_video_id(self):\n # check the page to find the contentId\n log.debug(\"Searching for content ID\")\n- if self.page_config:\n- for page in self.page_config[\"cache\"][\"page\"].values():\n- try:\n- if page['item']['type'] == \"channel\":\n- return self._get_live_id()\n- else:\n- return \"vod/{id}\".format(id=page['item']['customFields']['DiceVideoId'])\n- except KeyError:\n- log.error(\"Could not find video ID\")\n- return\n+ try:\n+ if self.item_config['type'] == \"channel\":\n+ return self._get_live_id()\n+ else:\n+ return \"vod/{id}\".format(id=self.item_config['customFields']['DiceVideoId'])\n+ except KeyError:\n+ log.error(\"Could not find video ID\")\n+ return\n \n def _get_live_id(self):\n log.debug(\"Loading live event\")\n", "issue": "New WWE Network Plugin stopped working\n## Plugin Issue\r\n- [x ] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\nPresumably due to updates to the website (I know they have been working on fixing some issues with the new site), the WWE Network plugin is no longer able to find streams.\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n<!-- How can we reproduce this? Please note the exact steps below using the list format supplied. If you need more steps please add them. -->\r\n\r\n1. Install latest streamlink nightly\r\n2. Try using any WWE Network link (i.e. https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316)\r\n\r\n\r\n### Log output\r\n\r\nstreamlink https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316 best --loglevel debug\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.6.6\r\n[cli][debug] Streamlink: 1.1.1+74.g0f011ae\r\n[cli][debug] Requests(2.22.0), Socks(1.7.0), Websocket(0.56.0)\r\n[cli][info] Found matching plugin wwenetwork for URL https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316\r\n[cli][debug] Plugin specific arguments:\r\n[cli][debug] --wwenetwork-email= (email)\r\n[cli][debug] --wwenetwork-password= (password)\r\n[plugin.wwenetwork][debug] Attempting login as \r\n[plugin.wwenetwork][debug] API request: POST https://dce-frontoffice.imggaming.com/api/v2/login\r\n[plugin.wwenetwork][debug] Searching for content ID\r\n[plugin.wwenetwork][debug] Loading page config\r\nerror: No playable streams found on this URL: https://watch.wwe.com/episode/NXT-TakeOver-Toronto-2019-102316\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\nThere had been audio sync problems with the network, although I believe this was across all sources and not a streamlink issue, and their solution for this issue might have caused the plugin to break.\n", "before_files": [{"content": "from __future__ import print_function\n\nimport json\nimport logging\nimport re\n\nfrom streamlink import PluginError\nfrom streamlink.plugin import Plugin, PluginArguments, PluginArgument\nfrom streamlink.plugin.api import useragents\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import memoize\nfrom streamlink.compat import urlparse, parse_qsl\nfrom streamlink.utils.times import seconds_to_hhmmss\n\nlog = logging.getLogger(__name__)\n\n\nclass WWENetwork(Plugin):\n url_re = re.compile(r\"https?://watch.wwe.com/(channel)?\")\n site_config_re = re.compile(r'''\">window.__data = (\\{.*?\\})</script>''')\n stream_url = \"https://dce-frontoffice.imggaming.com/api/v2/stream/{id}\"\n live_url = \"https://dce-frontoffice.imggaming.com/api/v2/event/live\"\n login_url = \"https://dce-frontoffice.imggaming.com/api/v2/login\"\n API_KEY = \"cca51ea0-7837-40df-a055-75eb6347b2e7\"\n\n customer_id = 16\n arguments = PluginArguments(\n PluginArgument(\n \"email\",\n required=True,\n metavar=\"EMAIL\",\n requires=[\"password\"],\n help=\"\"\"\n The email associated with your WWE Network account,\n required to access any WWE Network stream.\n \"\"\"\n ),\n PluginArgument(\n \"password\",\n sensitive=True,\n metavar=\"PASSWORD\",\n help=\"\"\"\n A WWE Network account password to use with --wwenetwork-email.\n \"\"\"\n )\n )\n\n def __init__(self, url):\n super(WWENetwork, self).__init__(url)\n self.session.http.headers.update({\"User-Agent\": useragents.CHROME})\n self.auth_token = None\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def get_title(self):\n if self.page_config:\n for page in self.page_config[\"cache\"][\"page\"].values():\n return page['item']['title']\n\n def request(self, method, url, **kwargs):\n headers = kwargs.pop(\"headers\", {})\n headers.update({\"x-api-key\": self.API_KEY,\n \"Origin\": \"https://watch.wwe.com\",\n \"Referer\": \"https://watch.wwe.com/signin\",\n \"Accept\": \"application/json\",\n \"Realm\": \"dce.wwe\"})\n if self.auth_token:\n headers[\"Authorization\"] = \"Bearer {0}\".format(self.auth_token)\n\n kwargs[\"raise_for_status\"] = False\n log.debug(\"API request: {0} {1}\".format(method, url))\n res = self.session.http.request(method, url, headers=headers, **kwargs)\n data = self.session.http.json(res)\n\n if \"status\" in data and data[\"status\"] != 200:\n log.debug(\"API request failed: {0}:{1} ({2})\".format(data[\"status\"], data.get(\"code\"), \"; \".join(data.get(\"messages\", []))))\n return data\n\n def login(self, email, password):\n self.logger.debug(\"Attempting login as {0}\", email)\n # sets some required cookies to login\n data = self.request('POST', self.login_url,\n data=json.dumps({\"id\": email, \"secret\": password}),\n headers={\"Content-Type\": \"application/json\"})\n if \"authorisationToken\" in data:\n self.auth_token = data[\"authorisationToken\"]\n\n return self.auth_token\n\n @property\n @memoize\n def page_config(self):\n log.debug(\"Loading page config\")\n res = self.session.http.get(self.url)\n m = self.site_config_re.search(res.text)\n return m and json.loads(m.group(1))\n\n def _get_media_info(self, content_id):\n \"\"\"\n Get the info about the content, based on the ID\n :param content_id: contentId for the video\n :return:\n \"\"\"\n info = self.request('GET', self.stream_url.format(id=content_id))\n return self.request('GET', info.get(\"playerUrlCallback\"))\n\n def _get_video_id(self):\n # check the page to find the contentId\n log.debug(\"Searching for content ID\")\n if self.page_config:\n for page in self.page_config[\"cache\"][\"page\"].values():\n try:\n if page['item']['type'] == \"channel\":\n return self._get_live_id()\n else:\n return \"vod/{id}\".format(id=page['item']['customFields']['DiceVideoId'])\n except KeyError:\n log.error(\"Could not find video ID\")\n return\n\n def _get_live_id(self):\n log.debug(\"Loading live event\")\n res = self.request('GET', self.live_url)\n for event in res.get('events', []):\n return \"event/{sportId}/{propertyId}/{tournamentId}/{id}\".format(**event)\n\n def _get_streams(self):\n if not self.login(self.get_option(\"email\"), self.get_option(\"password\")):\n raise PluginError(\"Login failed\")\n\n try:\n start_point = int(float(dict(parse_qsl(urlparse(self.url).query)).get(\"startPoint\", 0.0)))\n if start_point > 0:\n log.info(\"Stream will start at {0}\".format(seconds_to_hhmmss(start_point)))\n except ValueError:\n start_point = 0\n\n content_id = self._get_video_id()\n\n if content_id:\n self.logger.debug(\"Found content ID: {0}\", content_id)\n info = self._get_media_info(content_id)\n if info.get(\"hlsUrl\"):\n for s in HLSStream.parse_variant_playlist(self.session, info[\"hlsUrl\"], start_offset=start_point).items():\n yield s\n else:\n log.error(\"Could not find the HLS URL\")\n\n\n__plugin__ = WWENetwork\n", "path": "src/streamlink/plugins/wwenetwork.py"}]} | 2,699 | 713 |
gh_patches_debug_15710 | rasdani/github-patches | git_diff | biolab__orange3-3454 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OWTSNE crashes when reopening workflow with selected data
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
master
##### Expected behavior
I can open a workflow properly.
##### Actual behavior
```
Traceback (most recent call last):
File "/home/pavlin/dev/orange3/Orange/canvas/scheme/widgetsscheme.py", line 1083, in process_signals_for_widget
widget.handleNewSignals()
File "/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py", line 453, in handleNewSignals
self.commit()
File "/home/pavlin/dev/orange3/Orange/widgets/gui.py", line 2042, in unconditional_commit
do_commit()
File "/home/pavlin/dev/orange3/Orange/widgets/gui.py", line 2050, in do_commit
commit()
File "/home/pavlin/dev/orange3/Orange/widgets/unsupervised/owtsne.py", line 253, in commit
super().commit()
File "/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py", line 511, in commit
self.send_data()
File "/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py", line 516, in send_data
group_sel = np.zeros(len(data), dtype=int)
TypeError: object of type 'NoneType' has no len()
```
##### Steps to reproduce the behavior
1. Connect File to t-SNE and select some data. Save the workflow
2. Reopen the workflow → t-sNE crash
##### Additional info (worksheets, data, screenshots, ...)
</issue>
<code>
[start of Orange/widgets/unsupervised/owtsne.py]
1 import numpy as np
2
3 from AnyQt.QtCore import Qt, QTimer
4 from AnyQt.QtWidgets import QFormLayout
5
6 from Orange.data import Table, Domain
7 from Orange.preprocess.preprocess import Preprocess, ApplyDomain
8 from Orange.projection import PCA, TSNE
9 from Orange.widgets import gui
10 from Orange.widgets.settings import Setting, SettingProvider
11 from Orange.widgets.utils.widgetpreview import WidgetPreview
12 from Orange.widgets.visualize.owscatterplotgraph import OWScatterPlotBase
13 from Orange.widgets.visualize.utils.widget import OWDataProjectionWidget
14 from Orange.widgets.widget import Msg, Output
15
16
17 def compute_tsne(data, perplexity, iter, init):
18 negative_gradient_method = 'fft' if len(data.X) > 10000 else 'bh'
19 neighbor_method = 'approx' if len(data.X) > 10000 else 'exact'
20 tsne = TSNE(
21 perplexity=perplexity, n_iter=iter, initialization=init, theta=.8,
22 early_exaggeration_iter=0, negative_gradient_method=negative_gradient_method,
23 neighbors=neighbor_method, random_state=0
24 )
25 return tsne(data)
26
27
28 class OWtSNEGraph(OWScatterPlotBase):
29 def update_coordinates(self):
30 super().update_coordinates()
31 if self.scatterplot_item is not None:
32 self.view_box.setAspectLocked(True, 1)
33
34
35 class OWtSNE(OWDataProjectionWidget):
36 name = "t-SNE"
37 description = "Two-dimensional data projection with t-SNE."
38 icon = "icons/TSNE.svg"
39 priority = 920
40 keywords = ["tsne"]
41
42 settings_version = 3
43 max_iter = Setting(300)
44 perplexity = Setting(30)
45 pca_components = Setting(20)
46
47 GRAPH_CLASS = OWtSNEGraph
48 graph = SettingProvider(OWtSNEGraph)
49 embedding_variables_names = ("t-SNE-x", "t-SNE-y")
50
51 #: Runtime state
52 Running, Finished, Waiting = 1, 2, 3
53
54 class Outputs(OWDataProjectionWidget.Outputs):
55 preprocessor = Output("Preprocessor", Preprocess)
56
57 class Error(OWDataProjectionWidget.Error):
58 not_enough_rows = Msg("Input data needs at least 2 rows")
59 constant_data = Msg("Input data is constant")
60 no_attributes = Msg("Data has no attributes")
61 out_of_memory = Msg("Out of memory")
62 optimization_error = Msg("Error during optimization\n{}")
63 no_valid_data = Msg("No projection due to no valid data")
64
65 def __init__(self):
66 super().__init__()
67 self.pca_data = None
68 self.projection = None
69 self.__update_loop = None
70 # timer for scheduling updates
71 self.__timer = QTimer(self, singleShot=True, interval=1,
72 timeout=self.__next_step)
73 self.__state = OWtSNE.Waiting
74 self.__in_next_step = False
75 self.__draw_similar_pairs = False
76
77 def _add_controls(self):
78 self._add_controls_start_box()
79 super()._add_controls()
80 # Because sc data frequently has many genes,
81 # showing all attributes in combo boxes can cause problems
82 # QUICKFIX: Remove a separator and attributes from order
83 # (leaving just the class and metas)
84 self.models = self.gui.points_models
85 for model in self.models:
86 model.order = model.order[:-2]
87
88 def _add_controls_start_box(self):
89 box = gui.vBox(self.controlArea, True)
90 form = QFormLayout(
91 labelAlignment=Qt.AlignLeft,
92 formAlignment=Qt.AlignLeft,
93 fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow,
94 verticalSpacing=10
95 )
96
97 form.addRow(
98 "Max iterations:",
99 gui.spin(box, self, "max_iter", 1, 2000, step=50))
100
101 form.addRow(
102 "Perplexity:",
103 gui.spin(box, self, "perplexity", 1, 100, step=1))
104
105 box.layout().addLayout(form)
106
107 gui.separator(box, 10)
108 self.runbutton = gui.button(box, self, "Run", callback=self._toggle_run)
109
110 gui.separator(box, 10)
111 gui.hSlider(box, self, "pca_components", label="PCA components:",
112 minValue=2, maxValue=50, step=1)
113
114 def check_data(self):
115 def error(err):
116 err()
117 self.data = None
118
119 super().check_data()
120 if self.data is not None:
121 if len(self.data) < 2:
122 error(self.Error.not_enough_rows)
123 elif not self.data.domain.attributes:
124 error(self.Error.no_attributes)
125 elif not self.data.is_sparse() and \
126 np.allclose(self.data.X - self.data.X[0], 0):
127 error(self.Error.constant_data)
128 elif not self.data.is_sparse() and \
129 np.all(~np.isfinite(self.data.X)):
130 error(self.Error.no_valid_data)
131
132 def get_embedding(self):
133 if self.data is None:
134 self.valid_data = None
135 return None
136 elif self.projection is None:
137 embedding = np.random.normal(size=(len(self.data), 2))
138 else:
139 embedding = self.projection.embedding.X
140 self.valid_data = np.ones(len(embedding), dtype=bool)
141 return embedding
142
143 def _toggle_run(self):
144 if self.__state == OWtSNE.Running:
145 self.stop()
146 self.commit()
147 else:
148 self.start()
149
150 def start(self):
151 if not self.data or self.__state == OWtSNE.Running:
152 self.graph.update_coordinates()
153 elif self.__state in (OWtSNE.Finished, OWtSNE.Waiting):
154 self.__start()
155
156 def stop(self):
157 if self.__state == OWtSNE.Running:
158 self.__set_update_loop(None)
159
160 def pca_preprocessing(self):
161 if self.pca_data is not None and \
162 self.pca_data.X.shape[1] == self.pca_components:
163 return
164 pca = PCA(n_components=self.pca_components, random_state=0)
165 model = pca(self.data)
166 self.pca_data = model(self.data)
167
168 def __start(self):
169 self.pca_preprocessing()
170 initial = 'random' if self.projection is None \
171 else self.projection.embedding.X
172 step_size = 50
173
174 def update_loop(data, max_iter, step, embedding):
175 # NOTE: this code MUST NOT call into QApplication.processEvents
176 done = False
177 iterations_done = 0
178
179 while not done:
180 step_iter = min(max_iter - iterations_done, step)
181 projection = compute_tsne(
182 data, self.perplexity, step_iter, embedding)
183 embedding = projection.embedding.X
184 iterations_done += step_iter
185 if iterations_done >= max_iter:
186 done = True
187
188 yield projection, iterations_done / max_iter
189
190 self.__set_update_loop(update_loop(
191 self.pca_data, self.max_iter, step_size, initial))
192 self.progressBarInit(processEvents=None)
193
194 def __set_update_loop(self, loop):
195 if self.__update_loop is not None:
196 self.__update_loop.close()
197 self.__update_loop = None
198 self.progressBarFinished(processEvents=None)
199
200 self.__update_loop = loop
201
202 if loop is not None:
203 self.setBlocking(True)
204 self.progressBarInit(processEvents=None)
205 self.setStatusMessage("Running")
206 self.runbutton.setText("Stop")
207 self.__state = OWtSNE.Running
208 self.__timer.start()
209 else:
210 self.setBlocking(False)
211 self.setStatusMessage("")
212 self.runbutton.setText("Start")
213 self.__state = OWtSNE.Finished
214 self.__timer.stop()
215
216 def __next_step(self):
217 if self.__update_loop is None:
218 return
219
220 assert not self.__in_next_step
221 self.__in_next_step = True
222
223 loop = self.__update_loop
224 self.Error.out_of_memory.clear()
225 self.Error.optimization_error.clear()
226 try:
227 projection, progress = next(self.__update_loop)
228 assert self.__update_loop is loop
229 except StopIteration:
230 self.__set_update_loop(None)
231 self.unconditional_commit()
232 except MemoryError:
233 self.Error.out_of_memory()
234 self.__set_update_loop(None)
235 except Exception as exc:
236 self.Error.optimization_error(str(exc))
237 self.__set_update_loop(None)
238 else:
239 self.progressBarSet(100.0 * progress, processEvents=None)
240 self.projection = projection
241 self.graph.update_coordinates()
242 self.graph.update_density()
243 # schedule next update
244 self.__timer.start()
245
246 self.__in_next_step = False
247
248 def setup_plot(self):
249 super().setup_plot()
250 self.start()
251
252 def commit(self):
253 super().commit()
254 self.send_preprocessor()
255
256 def _get_projection_data(self):
257 if self.data is None or self.projection is None:
258 return None
259 data = self.data.transform(
260 Domain(self.data.domain.attributes,
261 self.data.domain.class_vars,
262 self.data.domain.metas + self.projection.domain.attributes))
263 data.metas[:, -2:] = self.get_embedding()
264 return data
265
266 def send_preprocessor(self):
267 prep = None
268 if self.data is not None and self.projection is not None:
269 prep = ApplyDomain(self.projection.domain, self.projection.name)
270 self.Outputs.preprocessor.send(prep)
271
272 def clear(self):
273 super().clear()
274 self.__set_update_loop(None)
275 self.__state = OWtSNE.Waiting
276 self.pca_data = None
277 self.projection = None
278
279 @classmethod
280 def migrate_settings(cls, settings, version):
281 if version < 3:
282 if "selection_indices" in settings:
283 settings["selection"] = settings["selection_indices"]
284
285 @classmethod
286 def migrate_context(cls, context, version):
287 if version < 3:
288 values = context.values
289 values["attr_color"] = values["graph"]["attr_color"]
290 values["attr_size"] = values["graph"]["attr_size"]
291 values["attr_shape"] = values["graph"]["attr_shape"]
292 values["attr_label"] = values["graph"]["attr_label"]
293
294
295 if __name__ == "__main__":
296 data = Table("iris")
297 WidgetPreview(OWtSNE).run(
298 set_data=data,
299 set_subset_data=data[np.random.choice(len(data), 10)])
300
[end of Orange/widgets/unsupervised/owtsne.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Orange/widgets/unsupervised/owtsne.py b/Orange/widgets/unsupervised/owtsne.py
--- a/Orange/widgets/unsupervised/owtsne.py
+++ b/Orange/widgets/unsupervised/owtsne.py
@@ -254,12 +254,16 @@
self.send_preprocessor()
def _get_projection_data(self):
- if self.data is None or self.projection is None:
+ if self.data is None:
return None
+ if self.projection is None:
+ variables = self._get_projection_variables()
+ else:
+ variables = self.projection.domain.attributes
data = self.data.transform(
Domain(self.data.domain.attributes,
self.data.domain.class_vars,
- self.data.domain.metas + self.projection.domain.attributes))
+ self.data.domain.metas + variables))
data.metas[:, -2:] = self.get_embedding()
return data
| {"golden_diff": "diff --git a/Orange/widgets/unsupervised/owtsne.py b/Orange/widgets/unsupervised/owtsne.py\n--- a/Orange/widgets/unsupervised/owtsne.py\n+++ b/Orange/widgets/unsupervised/owtsne.py\n@@ -254,12 +254,16 @@\n self.send_preprocessor()\n \n def _get_projection_data(self):\n- if self.data is None or self.projection is None:\n+ if self.data is None:\n return None\n+ if self.projection is None:\n+ variables = self._get_projection_variables()\n+ else:\n+ variables = self.projection.domain.attributes\n data = self.data.transform(\n Domain(self.data.domain.attributes,\n self.data.domain.class_vars,\n- self.data.domain.metas + self.projection.domain.attributes))\n+ self.data.domain.metas + variables))\n data.metas[:, -2:] = self.get_embedding()\n return data\n", "issue": "OWTSNE crashes when reopening workflow with selected data\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\nmaster\r\n\r\n##### Expected behavior\r\nI can open a workflow properly.\r\n\r\n\r\n##### Actual behavior\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/pavlin/dev/orange3/Orange/canvas/scheme/widgetsscheme.py\", line 1083, in process_signals_for_widget\r\n widget.handleNewSignals()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py\", line 453, in handleNewSignals\r\n self.commit()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/gui.py\", line 2042, in unconditional_commit\r\n do_commit()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/gui.py\", line 2050, in do_commit\r\n commit()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/unsupervised/owtsne.py\", line 253, in commit\r\n super().commit()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py\", line 511, in commit\r\n self.send_data()\r\n File \"/home/pavlin/dev/orange3/Orange/widgets/visualize/utils/widget.py\", line 516, in send_data\r\n group_sel = np.zeros(len(data), dtype=int)\r\nTypeError: object of type 'NoneType' has no len()\r\n\r\n```\r\n\r\n\r\n##### Steps to reproduce the behavior\r\n1. Connect File to t-SNE and select some data. Save the workflow\r\n2. Reopen the workflow \u2192 t-sNE crash\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom AnyQt.QtCore import Qt, QTimer\nfrom AnyQt.QtWidgets import QFormLayout\n\nfrom Orange.data import Table, Domain\nfrom Orange.preprocess.preprocess import Preprocess, ApplyDomain\nfrom Orange.projection import PCA, TSNE\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting, SettingProvider\nfrom Orange.widgets.utils.widgetpreview import WidgetPreview\nfrom Orange.widgets.visualize.owscatterplotgraph import OWScatterPlotBase\nfrom Orange.widgets.visualize.utils.widget import OWDataProjectionWidget\nfrom Orange.widgets.widget import Msg, Output\n\n\ndef compute_tsne(data, perplexity, iter, init):\n negative_gradient_method = 'fft' if len(data.X) > 10000 else 'bh'\n neighbor_method = 'approx' if len(data.X) > 10000 else 'exact'\n tsne = TSNE(\n perplexity=perplexity, n_iter=iter, initialization=init, theta=.8,\n early_exaggeration_iter=0, negative_gradient_method=negative_gradient_method,\n neighbors=neighbor_method, random_state=0\n )\n return tsne(data)\n\n\nclass OWtSNEGraph(OWScatterPlotBase):\n def update_coordinates(self):\n super().update_coordinates()\n if self.scatterplot_item is not None:\n self.view_box.setAspectLocked(True, 1)\n\n\nclass OWtSNE(OWDataProjectionWidget):\n name = \"t-SNE\"\n description = \"Two-dimensional data projection with t-SNE.\"\n icon = \"icons/TSNE.svg\"\n priority = 920\n keywords = [\"tsne\"]\n\n settings_version = 3\n max_iter = Setting(300)\n perplexity = Setting(30)\n pca_components = Setting(20)\n\n GRAPH_CLASS = OWtSNEGraph\n graph = SettingProvider(OWtSNEGraph)\n embedding_variables_names = (\"t-SNE-x\", \"t-SNE-y\")\n\n #: Runtime state\n Running, Finished, Waiting = 1, 2, 3\n\n class Outputs(OWDataProjectionWidget.Outputs):\n preprocessor = Output(\"Preprocessor\", Preprocess)\n\n class Error(OWDataProjectionWidget.Error):\n not_enough_rows = Msg(\"Input data needs at least 2 rows\")\n constant_data = Msg(\"Input data is constant\")\n no_attributes = Msg(\"Data has no attributes\")\n out_of_memory = Msg(\"Out of memory\")\n optimization_error = Msg(\"Error during optimization\\n{}\")\n no_valid_data = Msg(\"No projection due to no valid data\")\n\n def __init__(self):\n super().__init__()\n self.pca_data = None\n self.projection = None\n self.__update_loop = None\n # timer for scheduling updates\n self.__timer = QTimer(self, singleShot=True, interval=1,\n timeout=self.__next_step)\n self.__state = OWtSNE.Waiting\n self.__in_next_step = False\n self.__draw_similar_pairs = False\n\n def _add_controls(self):\n self._add_controls_start_box()\n super()._add_controls()\n # Because sc data frequently has many genes,\n # showing all attributes in combo boxes can cause problems\n # QUICKFIX: Remove a separator and attributes from order\n # (leaving just the class and metas)\n self.models = self.gui.points_models\n for model in self.models:\n model.order = model.order[:-2]\n\n def _add_controls_start_box(self):\n box = gui.vBox(self.controlArea, True)\n form = QFormLayout(\n labelAlignment=Qt.AlignLeft,\n formAlignment=Qt.AlignLeft,\n fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow,\n verticalSpacing=10\n )\n\n form.addRow(\n \"Max iterations:\",\n gui.spin(box, self, \"max_iter\", 1, 2000, step=50))\n\n form.addRow(\n \"Perplexity:\",\n gui.spin(box, self, \"perplexity\", 1, 100, step=1))\n\n box.layout().addLayout(form)\n\n gui.separator(box, 10)\n self.runbutton = gui.button(box, self, \"Run\", callback=self._toggle_run)\n\n gui.separator(box, 10)\n gui.hSlider(box, self, \"pca_components\", label=\"PCA components:\",\n minValue=2, maxValue=50, step=1)\n\n def check_data(self):\n def error(err):\n err()\n self.data = None\n\n super().check_data()\n if self.data is not None:\n if len(self.data) < 2:\n error(self.Error.not_enough_rows)\n elif not self.data.domain.attributes:\n error(self.Error.no_attributes)\n elif not self.data.is_sparse() and \\\n np.allclose(self.data.X - self.data.X[0], 0):\n error(self.Error.constant_data)\n elif not self.data.is_sparse() and \\\n np.all(~np.isfinite(self.data.X)):\n error(self.Error.no_valid_data)\n\n def get_embedding(self):\n if self.data is None:\n self.valid_data = None\n return None\n elif self.projection is None:\n embedding = np.random.normal(size=(len(self.data), 2))\n else:\n embedding = self.projection.embedding.X\n self.valid_data = np.ones(len(embedding), dtype=bool)\n return embedding\n\n def _toggle_run(self):\n if self.__state == OWtSNE.Running:\n self.stop()\n self.commit()\n else:\n self.start()\n\n def start(self):\n if not self.data or self.__state == OWtSNE.Running:\n self.graph.update_coordinates()\n elif self.__state in (OWtSNE.Finished, OWtSNE.Waiting):\n self.__start()\n\n def stop(self):\n if self.__state == OWtSNE.Running:\n self.__set_update_loop(None)\n\n def pca_preprocessing(self):\n if self.pca_data is not None and \\\n self.pca_data.X.shape[1] == self.pca_components:\n return\n pca = PCA(n_components=self.pca_components, random_state=0)\n model = pca(self.data)\n self.pca_data = model(self.data)\n\n def __start(self):\n self.pca_preprocessing()\n initial = 'random' if self.projection is None \\\n else self.projection.embedding.X\n step_size = 50\n\n def update_loop(data, max_iter, step, embedding):\n # NOTE: this code MUST NOT call into QApplication.processEvents\n done = False\n iterations_done = 0\n\n while not done:\n step_iter = min(max_iter - iterations_done, step)\n projection = compute_tsne(\n data, self.perplexity, step_iter, embedding)\n embedding = projection.embedding.X\n iterations_done += step_iter\n if iterations_done >= max_iter:\n done = True\n\n yield projection, iterations_done / max_iter\n\n self.__set_update_loop(update_loop(\n self.pca_data, self.max_iter, step_size, initial))\n self.progressBarInit(processEvents=None)\n\n def __set_update_loop(self, loop):\n if self.__update_loop is not None:\n self.__update_loop.close()\n self.__update_loop = None\n self.progressBarFinished(processEvents=None)\n\n self.__update_loop = loop\n\n if loop is not None:\n self.setBlocking(True)\n self.progressBarInit(processEvents=None)\n self.setStatusMessage(\"Running\")\n self.runbutton.setText(\"Stop\")\n self.__state = OWtSNE.Running\n self.__timer.start()\n else:\n self.setBlocking(False)\n self.setStatusMessage(\"\")\n self.runbutton.setText(\"Start\")\n self.__state = OWtSNE.Finished\n self.__timer.stop()\n\n def __next_step(self):\n if self.__update_loop is None:\n return\n\n assert not self.__in_next_step\n self.__in_next_step = True\n\n loop = self.__update_loop\n self.Error.out_of_memory.clear()\n self.Error.optimization_error.clear()\n try:\n projection, progress = next(self.__update_loop)\n assert self.__update_loop is loop\n except StopIteration:\n self.__set_update_loop(None)\n self.unconditional_commit()\n except MemoryError:\n self.Error.out_of_memory()\n self.__set_update_loop(None)\n except Exception as exc:\n self.Error.optimization_error(str(exc))\n self.__set_update_loop(None)\n else:\n self.progressBarSet(100.0 * progress, processEvents=None)\n self.projection = projection\n self.graph.update_coordinates()\n self.graph.update_density()\n # schedule next update\n self.__timer.start()\n\n self.__in_next_step = False\n\n def setup_plot(self):\n super().setup_plot()\n self.start()\n\n def commit(self):\n super().commit()\n self.send_preprocessor()\n\n def _get_projection_data(self):\n if self.data is None or self.projection is None:\n return None\n data = self.data.transform(\n Domain(self.data.domain.attributes,\n self.data.domain.class_vars,\n self.data.domain.metas + self.projection.domain.attributes))\n data.metas[:, -2:] = self.get_embedding()\n return data\n\n def send_preprocessor(self):\n prep = None\n if self.data is not None and self.projection is not None:\n prep = ApplyDomain(self.projection.domain, self.projection.name)\n self.Outputs.preprocessor.send(prep)\n\n def clear(self):\n super().clear()\n self.__set_update_loop(None)\n self.__state = OWtSNE.Waiting\n self.pca_data = None\n self.projection = None\n\n @classmethod\n def migrate_settings(cls, settings, version):\n if version < 3:\n if \"selection_indices\" in settings:\n settings[\"selection\"] = settings[\"selection_indices\"]\n\n @classmethod\n def migrate_context(cls, context, version):\n if version < 3:\n values = context.values\n values[\"attr_color\"] = values[\"graph\"][\"attr_color\"]\n values[\"attr_size\"] = values[\"graph\"][\"attr_size\"]\n values[\"attr_shape\"] = values[\"graph\"][\"attr_shape\"]\n values[\"attr_label\"] = values[\"graph\"][\"attr_label\"]\n\n\nif __name__ == \"__main__\":\n data = Table(\"iris\")\n WidgetPreview(OWtSNE).run(\n set_data=data,\n set_subset_data=data[np.random.choice(len(data), 10)])\n", "path": "Orange/widgets/unsupervised/owtsne.py"}]} | 4,036 | 212 |
gh_patches_debug_16097 | rasdani/github-patches | git_diff | quantumlib__Cirq-3016 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Changing qubits in PointOptimizer causes ValueError
The cirq.PointOptimizer class assumes that an optimizer won't change qubits (or at least won't change qubits in a way that pushes the new gate backwards). If two gates are modified in the same moment to use the same qubits, a ValueError is raised.
A check similar to https://github.com/quantumlib/Cirq/blob/master/cirq/circuits/optimization_pass.py#L128
needs to be added at or around Line 148 to detect this condition and adjust the 'i' index or frontier accordingly.
A minimal example that causes the issue is below.
```
class EverythingIs42(cirq.PointOptimizer):
def optimization_at(self, circuit, index, op):
if len(op.qubits) == 1:
gate = op.gate
new_op = op.gate(cirq.LineQubit(42))
return cirq.PointOptimizationSummary(clear_span=1,
clear_qubits=op.qubits,
new_operations=new_op)
c=cirq.Circuit(cirq.X(cirq.LineQubit(0)), cirq.X(cirq.LineQubit(1)))
EverythingIs42().optimize_circuit(c)
```
</issue>
<code>
[start of cirq/circuits/optimization_pass.py]
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Defines the OptimizationPass type."""
16 from typing import (Dict, Callable, Iterable, Optional, Sequence, TYPE_CHECKING,
17 Tuple, cast)
18
19 import abc
20 from collections import defaultdict
21
22 from cirq import ops
23 from cirq.circuits.circuit import Circuit
24
25 if TYPE_CHECKING:
26 import cirq
27 from cirq.ops import Qid
28
29
30 class PointOptimizationSummary:
31 """A description of a local optimization to perform."""
32
33 def __init__(self,
34 clear_span: int,
35 clear_qubits: Iterable['cirq.Qid'],
36 new_operations: 'cirq.OP_TREE',
37 preserve_moments: bool = False) -> None:
38 """
39 Args:
40 clear_span: Defines the range of moments to affect. Specifically,
41 refers to the indices in range(start, start+clear_span) where
42 start is an index known from surrounding context.
43 clear_qubits: Defines the set of qubits that should be cleared
44 with each affected moment.
45 new_operations: The operations to replace the cleared out
46 operations with.
47 preserve_moments: If set, `cirq.Moment` instances within
48 `new_operations` will be preserved exactly. Normally the
49 operations would be repacked to fit better into the
50 target space, which may move them between moments.
51 Please be advised that a PointOptimizer consuming this
52 summary will flatten operations no matter what,
53 see https://github.com/quantumlib/Cirq/issues/2406.
54 """
55 self.new_operations = tuple(
56 ops.flatten_op_tree(new_operations,
57 preserve_moments=preserve_moments))
58 self.clear_span = clear_span
59 self.clear_qubits = tuple(clear_qubits)
60
61 def __eq__(self, other):
62 if not isinstance(other, type(self)):
63 return NotImplemented
64 return (self.clear_span == other.clear_span and
65 self.clear_qubits == other.clear_qubits and
66 self.new_operations == other.new_operations)
67
68 def __ne__(self, other):
69 return not self == other
70
71 def __hash__(self) -> int:
72 return hash((PointOptimizationSummary,
73 self.clear_span,
74 self.clear_qubits,
75 self.new_operations))
76
77 def __repr__(self) -> str:
78 return (f'cirq.PointOptimizationSummary({self.clear_span!r}, '
79 f'{self.clear_qubits!r}, {self.new_operations!r})')
80
81
82 class PointOptimizer:
83 """Makes circuit improvements focused on a specific location."""
84
85 def __init__(self,
86 post_clean_up: Callable[[Sequence['cirq.Operation']], ops.
87 OP_TREE] = lambda op_list: op_list
88 ) -> None:
89 """
90 Args:
91 post_clean_up: This function is called on each set of optimized
92 operations before they are put into the circuit to replace the
93 old operations.
94 """
95 self.post_clean_up = post_clean_up
96
97 def __call__(self, circuit: Circuit):
98 return self.optimize_circuit(circuit)
99
100 @abc.abstractmethod
101 def optimization_at(self, circuit: Circuit, index: int, op: 'cirq.Operation'
102 ) -> Optional[PointOptimizationSummary]:
103 """Describes how to change operations near the given location.
104
105 For example, this method could realize that the given operation is an
106 X gate and that in the very next moment there is a Z gate. It would
107 indicate that they should be combined into a Y gate by returning
108 PointOptimizationSummary(clear_span=2,
109 clear_qubits=op.qubits,
110 new_operations=cirq.Y(op.qubits[0]))
111
112 Args:
113 circuit: The circuit to improve.
114 index: The index of the moment with the operation to focus on.
115 op: The operation to focus improvements upon.
116
117 Returns:
118 A description of the optimization to perform, or else None if no
119 change should be made.
120 """
121
122 def optimize_circuit(self, circuit: Circuit):
123 frontier: Dict['Qid', int] = defaultdict(lambda: 0)
124 i = 0
125 while i < len(circuit): # Note: circuit may mutate as we go.
126 for op in circuit[i].operations:
127 # Don't touch stuff inserted by previous optimizations.
128 if any(frontier[q] > i for q in op.qubits):
129 continue
130
131 # Skip if an optimization removed the circuit underneath us.
132 if i >= len(circuit):
133 continue
134 # Skip if an optimization removed the op we're considering.
135 if op not in circuit[i].operations:
136 continue
137 opt = self.optimization_at(circuit, i, op)
138 # Skip if the optimization did nothing.
139 if opt is None:
140 continue
141
142 # Clear target area, and insert new operations.
143 circuit.clear_operations_touching(
144 opt.clear_qubits,
145 [e for e in range(i, i + opt.clear_span)])
146 new_operations = self.post_clean_up(
147 cast(Tuple[ops.Operation], opt.new_operations))
148 circuit.insert_at_frontier(new_operations, i, frontier)
149
150 i += 1
151
[end of cirq/circuits/optimization_pass.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq/circuits/optimization_pass.py b/cirq/circuits/optimization_pass.py
--- a/cirq/circuits/optimization_pass.py
+++ b/cirq/circuits/optimization_pass.py
@@ -145,6 +145,18 @@
[e for e in range(i, i + opt.clear_span)])
new_operations = self.post_clean_up(
cast(Tuple[ops.Operation], opt.new_operations))
- circuit.insert_at_frontier(new_operations, i, frontier)
+ flat_new_operations = tuple(ops.flatten_to_ops(new_operations))
+
+ new_qubits = set()
+ for flat_op in flat_new_operations:
+ for q in flat_op.qubits:
+ new_qubits.add(q)
+
+ if not new_qubits.issubset(set(opt.clear_qubits)):
+ raise ValueError(
+ 'New operations in PointOptimizer should not act on new'
+ ' qubits.')
+
+ circuit.insert_at_frontier(flat_new_operations, i, frontier)
i += 1
| {"golden_diff": "diff --git a/cirq/circuits/optimization_pass.py b/cirq/circuits/optimization_pass.py\n--- a/cirq/circuits/optimization_pass.py\n+++ b/cirq/circuits/optimization_pass.py\n@@ -145,6 +145,18 @@\n [e for e in range(i, i + opt.clear_span)])\n new_operations = self.post_clean_up(\n cast(Tuple[ops.Operation], opt.new_operations))\n- circuit.insert_at_frontier(new_operations, i, frontier)\n \n+ flat_new_operations = tuple(ops.flatten_to_ops(new_operations))\n+\n+ new_qubits = set()\n+ for flat_op in flat_new_operations:\n+ for q in flat_op.qubits:\n+ new_qubits.add(q)\n+\n+ if not new_qubits.issubset(set(opt.clear_qubits)):\n+ raise ValueError(\n+ 'New operations in PointOptimizer should not act on new'\n+ ' qubits.')\n+\n+ circuit.insert_at_frontier(flat_new_operations, i, frontier)\n i += 1\n", "issue": "Changing qubits in PointOptimizer causes ValueError\nThe cirq.PointOptimizer class assumes that an optimizer won't change qubits (or at least won't change qubits in a way that pushes the new gate backwards). If two gates are modified in the same moment to use the same qubits, a ValueError is raised.\r\n\r\nA check similar to https://github.com/quantumlib/Cirq/blob/master/cirq/circuits/optimization_pass.py#L128\r\nneeds to be added at or around Line 148 to detect this condition and adjust the 'i' index or frontier accordingly.\r\n\r\nA minimal example that causes the issue is below.\r\n```\r\nclass EverythingIs42(cirq.PointOptimizer):\r\n def optimization_at(self, circuit, index, op):\r\n if len(op.qubits) == 1:\r\n gate = op.gate\r\n new_op = op.gate(cirq.LineQubit(42))\r\n return cirq.PointOptimizationSummary(clear_span=1,\r\n clear_qubits=op.qubits,\r\n new_operations=new_op)\r\n\r\nc=cirq.Circuit(cirq.X(cirq.LineQubit(0)), cirq.X(cirq.LineQubit(1)))\r\nEverythingIs42().optimize_circuit(c)\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Defines the OptimizationPass type.\"\"\"\nfrom typing import (Dict, Callable, Iterable, Optional, Sequence, TYPE_CHECKING,\n Tuple, cast)\n\nimport abc\nfrom collections import defaultdict\n\nfrom cirq import ops\nfrom cirq.circuits.circuit import Circuit\n\nif TYPE_CHECKING:\n import cirq\n from cirq.ops import Qid\n\n\nclass PointOptimizationSummary:\n \"\"\"A description of a local optimization to perform.\"\"\"\n\n def __init__(self,\n clear_span: int,\n clear_qubits: Iterable['cirq.Qid'],\n new_operations: 'cirq.OP_TREE',\n preserve_moments: bool = False) -> None:\n \"\"\"\n Args:\n clear_span: Defines the range of moments to affect. Specifically,\n refers to the indices in range(start, start+clear_span) where\n start is an index known from surrounding context.\n clear_qubits: Defines the set of qubits that should be cleared\n with each affected moment.\n new_operations: The operations to replace the cleared out\n operations with.\n preserve_moments: If set, `cirq.Moment` instances within\n `new_operations` will be preserved exactly. Normally the\n operations would be repacked to fit better into the\n target space, which may move them between moments.\n Please be advised that a PointOptimizer consuming this\n summary will flatten operations no matter what,\n see https://github.com/quantumlib/Cirq/issues/2406.\n \"\"\"\n self.new_operations = tuple(\n ops.flatten_op_tree(new_operations,\n preserve_moments=preserve_moments))\n self.clear_span = clear_span\n self.clear_qubits = tuple(clear_qubits)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return (self.clear_span == other.clear_span and\n self.clear_qubits == other.clear_qubits and\n self.new_operations == other.new_operations)\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self) -> int:\n return hash((PointOptimizationSummary,\n self.clear_span,\n self.clear_qubits,\n self.new_operations))\n\n def __repr__(self) -> str:\n return (f'cirq.PointOptimizationSummary({self.clear_span!r}, '\n f'{self.clear_qubits!r}, {self.new_operations!r})')\n\n\nclass PointOptimizer:\n \"\"\"Makes circuit improvements focused on a specific location.\"\"\"\n\n def __init__(self,\n post_clean_up: Callable[[Sequence['cirq.Operation']], ops.\n OP_TREE] = lambda op_list: op_list\n ) -> None:\n \"\"\"\n Args:\n post_clean_up: This function is called on each set of optimized\n operations before they are put into the circuit to replace the\n old operations.\n \"\"\"\n self.post_clean_up = post_clean_up\n\n def __call__(self, circuit: Circuit):\n return self.optimize_circuit(circuit)\n\n @abc.abstractmethod\n def optimization_at(self, circuit: Circuit, index: int, op: 'cirq.Operation'\n ) -> Optional[PointOptimizationSummary]:\n \"\"\"Describes how to change operations near the given location.\n\n For example, this method could realize that the given operation is an\n X gate and that in the very next moment there is a Z gate. It would\n indicate that they should be combined into a Y gate by returning\n PointOptimizationSummary(clear_span=2,\n clear_qubits=op.qubits,\n new_operations=cirq.Y(op.qubits[0]))\n\n Args:\n circuit: The circuit to improve.\n index: The index of the moment with the operation to focus on.\n op: The operation to focus improvements upon.\n\n Returns:\n A description of the optimization to perform, or else None if no\n change should be made.\n \"\"\"\n\n def optimize_circuit(self, circuit: Circuit):\n frontier: Dict['Qid', int] = defaultdict(lambda: 0)\n i = 0\n while i < len(circuit): # Note: circuit may mutate as we go.\n for op in circuit[i].operations:\n # Don't touch stuff inserted by previous optimizations.\n if any(frontier[q] > i for q in op.qubits):\n continue\n\n # Skip if an optimization removed the circuit underneath us.\n if i >= len(circuit):\n continue\n # Skip if an optimization removed the op we're considering.\n if op not in circuit[i].operations:\n continue\n opt = self.optimization_at(circuit, i, op)\n # Skip if the optimization did nothing.\n if opt is None:\n continue\n\n # Clear target area, and insert new operations.\n circuit.clear_operations_touching(\n opt.clear_qubits,\n [e for e in range(i, i + opt.clear_span)])\n new_operations = self.post_clean_up(\n cast(Tuple[ops.Operation], opt.new_operations))\n circuit.insert_at_frontier(new_operations, i, frontier)\n\n i += 1\n", "path": "cirq/circuits/optimization_pass.py"}]} | 2,388 | 236 |
gh_patches_debug_4121 | rasdani/github-patches | git_diff | aio-libs__aiohttp-5012 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Connections are not correctly kept after HEAD requests
When the client makes a HEAD request, it parses the response as if it was a regular message.
But the HTTP specs has a nice trick: HEAD requests `SHOULD` respond with the exact same headers than the corresponding GET request. This includes `Content-Length` or `Transfer-Encoding: chunked` headers.
This means that when the client receives a HEAD response with a `Content-Length: 5` response, it should not expect any body and `payload.is_eof()` should be True immediately, not after receiving 5 more bytes.
Currently `ResponseHandler.should_close` is True for HEAD requests with a `Content-Length: …` because `payload.is_eof()` is False when it should be True, hence the underlying connection is closed and not re-used when it could be.
This is only inefficient on the client side, not at all a bug, and does not produce any wrong results.
(also, I was mistaken a few minutes ago: returning a Content-Length header with an empty body is perfectly valid, and the `allow_head=True` parameter of the `add_get` method works perfectly well, no problem at all on the server side)
reproducer:
==========
https://github.com/aio-libs/aiohttp/issues/1769#issuecomment-614775045
</issue>
<code>
[start of aiohttp/client_proto.py]
1 import asyncio
2 from contextlib import suppress
3 from typing import Any, Optional, Tuple
4
5 from .base_protocol import BaseProtocol
6 from .client_exceptions import (
7 ClientOSError,
8 ClientPayloadError,
9 ServerDisconnectedError,
10 ServerTimeoutError,
11 )
12 from .helpers import BaseTimerContext, set_exception, set_result
13 from .http import HttpResponseParser, RawResponseMessage
14 from .streams import EMPTY_PAYLOAD, DataQueue, StreamReader
15
16
17 class ResponseHandler(BaseProtocol,
18 DataQueue[Tuple[RawResponseMessage, StreamReader]]):
19 """Helper class to adapt between Protocol and StreamReader."""
20
21 def __init__(self,
22 loop: asyncio.AbstractEventLoop) -> None:
23 BaseProtocol.__init__(self, loop=loop)
24 DataQueue.__init__(self, loop)
25
26 self._should_close = False
27
28 self._payload = None
29 self._skip_payload = False
30 self._payload_parser = None
31
32 self._timer = None
33
34 self._tail = b''
35 self._upgraded = False
36 self._parser = None # type: Optional[HttpResponseParser]
37
38 self._read_timeout = None # type: Optional[float]
39 self._read_timeout_handle = None # type: Optional[asyncio.TimerHandle]
40
41 self.closed = self._loop.create_future() # type: asyncio.Future[None]
42
43 @property
44 def upgraded(self) -> bool:
45 return self._upgraded
46
47 @property
48 def should_close(self) -> bool:
49 if (self._payload is not None and
50 not self._payload.is_eof() or self._upgraded):
51 return True
52
53 return (self._should_close or self._upgraded or
54 self.exception() is not None or
55 self._payload_parser is not None or
56 len(self) > 0 or bool(self._tail))
57
58 def force_close(self) -> None:
59 self._should_close = True
60
61 def close(self) -> None:
62 transport = self.transport
63 if transport is not None:
64 transport.close()
65 self.transport = None
66 self._payload = None
67 self._drop_timeout()
68
69 def is_connected(self) -> bool:
70 return self.transport is not None
71
72 def connection_lost(self, exc: Optional[BaseException]) -> None:
73 self._drop_timeout()
74
75 if exc is not None:
76 set_exception(self.closed, exc)
77 else:
78 set_result(self.closed, None)
79
80 if self._payload_parser is not None:
81 with suppress(Exception):
82 self._payload_parser.feed_eof()
83
84 uncompleted = None
85 if self._parser is not None:
86 try:
87 uncompleted = self._parser.feed_eof()
88 except Exception:
89 if self._payload is not None:
90 self._payload.set_exception(
91 ClientPayloadError(
92 'Response payload is not completed'))
93
94 if not self.is_eof():
95 if isinstance(exc, OSError):
96 exc = ClientOSError(*exc.args)
97 if exc is None:
98 exc = ServerDisconnectedError(uncompleted)
99 # assigns self._should_close to True as side effect,
100 # we do it anyway below
101 self.set_exception(exc)
102
103 self._should_close = True
104 self._parser = None
105 self._payload = None
106 self._payload_parser = None
107 self._reading_paused = False
108
109 super().connection_lost(exc)
110
111 def eof_received(self) -> None:
112 # should call parser.feed_eof() most likely
113 self._drop_timeout()
114
115 def pause_reading(self) -> None:
116 super().pause_reading()
117 self._drop_timeout()
118
119 def resume_reading(self) -> None:
120 super().resume_reading()
121 self._reschedule_timeout()
122
123 def set_exception(self, exc: BaseException) -> None:
124 self._should_close = True
125 self._drop_timeout()
126 super().set_exception(exc)
127
128 def set_parser(self, parser: Any, payload: Any) -> None:
129 # TODO: actual types are:
130 # parser: WebSocketReader
131 # payload: FlowControlDataQueue
132 # but they are not generi enough
133 # Need an ABC for both types
134 self._payload = payload
135 self._payload_parser = parser
136
137 self._drop_timeout()
138
139 if self._tail:
140 data, self._tail = self._tail, b''
141 self.data_received(data)
142
143 def set_response_params(self, *, timer: BaseTimerContext=None,
144 skip_payload: bool=False,
145 read_until_eof: bool=False,
146 auto_decompress: bool=True,
147 read_timeout: Optional[float]=None) -> None:
148 self._skip_payload = skip_payload
149
150 self._read_timeout = read_timeout
151 self._reschedule_timeout()
152
153 self._parser = HttpResponseParser(
154 self, self._loop, timer=timer,
155 payload_exception=ClientPayloadError,
156 read_until_eof=read_until_eof,
157 auto_decompress=auto_decompress)
158
159 if self._tail:
160 data, self._tail = self._tail, b''
161 self.data_received(data)
162
163 def _drop_timeout(self) -> None:
164 if self._read_timeout_handle is not None:
165 self._read_timeout_handle.cancel()
166 self._read_timeout_handle = None
167
168 def _reschedule_timeout(self) -> None:
169 timeout = self._read_timeout
170 if self._read_timeout_handle is not None:
171 self._read_timeout_handle.cancel()
172
173 if timeout:
174 self._read_timeout_handle = self._loop.call_later(
175 timeout, self._on_read_timeout)
176 else:
177 self._read_timeout_handle = None
178
179 def _on_read_timeout(self) -> None:
180 exc = ServerTimeoutError("Timeout on reading data from socket")
181 self.set_exception(exc)
182 if self._payload is not None:
183 self._payload.set_exception(exc)
184
185 def data_received(self, data: bytes) -> None:
186 self._reschedule_timeout()
187
188 if not data:
189 return
190
191 # custom payload parser
192 if self._payload_parser is not None:
193 eof, tail = self._payload_parser.feed_data(data)
194 if eof:
195 self._payload = None
196 self._payload_parser = None
197
198 if tail:
199 self.data_received(tail)
200 return
201 else:
202 if self._upgraded or self._parser is None:
203 # i.e. websocket connection, websocket parser is not set yet
204 self._tail += data
205 else:
206 # parse http messages
207 try:
208 messages, upgraded, tail = self._parser.feed_data(data)
209 except BaseException as exc:
210 if self.transport is not None:
211 # connection.release() could be called BEFORE
212 # data_received(), the transport is already
213 # closed in this case
214 self.transport.close()
215 # should_close is True after the call
216 self.set_exception(exc)
217 return
218
219 self._upgraded = upgraded
220
221 payload = None
222 for message, payload in messages:
223 if message.should_close:
224 self._should_close = True
225
226 self._payload = payload
227
228 if self._skip_payload or message.code in (204, 304):
229 self.feed_data((message, EMPTY_PAYLOAD), 0) # type: ignore # noqa
230 else:
231 self.feed_data((message, payload), 0)
232 if payload is not None:
233 # new message(s) was processed
234 # register timeout handler unsubscribing
235 # either on end-of-stream or immediately for
236 # EMPTY_PAYLOAD
237 if payload is not EMPTY_PAYLOAD:
238 payload.on_eof(self._drop_timeout)
239 else:
240 self._drop_timeout()
241
242 if tail:
243 if upgraded:
244 self.data_received(tail)
245 else:
246 self._tail = tail
247
[end of aiohttp/client_proto.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/aiohttp/client_proto.py b/aiohttp/client_proto.py
--- a/aiohttp/client_proto.py
+++ b/aiohttp/client_proto.py
@@ -153,6 +153,7 @@
self._parser = HttpResponseParser(
self, self._loop, timer=timer,
payload_exception=ClientPayloadError,
+ response_with_body=not skip_payload,
read_until_eof=read_until_eof,
auto_decompress=auto_decompress)
| {"golden_diff": "diff --git a/aiohttp/client_proto.py b/aiohttp/client_proto.py\n--- a/aiohttp/client_proto.py\n+++ b/aiohttp/client_proto.py\n@@ -153,6 +153,7 @@\n self._parser = HttpResponseParser(\n self, self._loop, timer=timer,\n payload_exception=ClientPayloadError,\n+ response_with_body=not skip_payload,\n read_until_eof=read_until_eof,\n auto_decompress=auto_decompress)\n", "issue": "Connections are not correctly kept after HEAD requests\nWhen the client makes a HEAD request, it parses the response as if it was a regular message.\r\n\r\nBut the HTTP specs has a nice trick: HEAD requests `SHOULD` respond with the exact same headers than the corresponding GET request. This includes `Content-Length` or `Transfer-Encoding: chunked` headers.\r\n\r\nThis means that when the client receives a HEAD response with a `Content-Length: 5` response, it should not expect any body and `payload.is_eof()` should be True immediately, not after receiving 5\u00a0more bytes.\r\n\r\nCurrently `ResponseHandler.should_close` is True for HEAD requests with a `Content-Length: \u2026` because `payload.is_eof()` is False when it should be True, hence the underlying connection is closed and not re-used when it could be.\r\n\r\n\r\nThis is only inefficient on the client side, not at all a bug, and does not produce any wrong results.\r\n\r\n\r\n(also, I was mistaken a few minutes ago: returning a Content-Length header with an empty body is perfectly valid, and the `allow_head=True` parameter of the `add_get` method works perfectly well, no problem at all on the server side)\r\n\r\n\r\nreproducer:\r\n==========\r\n\r\nhttps://github.com/aio-libs/aiohttp/issues/1769#issuecomment-614775045\n", "before_files": [{"content": "import asyncio\nfrom contextlib import suppress\nfrom typing import Any, Optional, Tuple\n\nfrom .base_protocol import BaseProtocol\nfrom .client_exceptions import (\n ClientOSError,\n ClientPayloadError,\n ServerDisconnectedError,\n ServerTimeoutError,\n)\nfrom .helpers import BaseTimerContext, set_exception, set_result\nfrom .http import HttpResponseParser, RawResponseMessage\nfrom .streams import EMPTY_PAYLOAD, DataQueue, StreamReader\n\n\nclass ResponseHandler(BaseProtocol,\n DataQueue[Tuple[RawResponseMessage, StreamReader]]):\n \"\"\"Helper class to adapt between Protocol and StreamReader.\"\"\"\n\n def __init__(self,\n loop: asyncio.AbstractEventLoop) -> None:\n BaseProtocol.__init__(self, loop=loop)\n DataQueue.__init__(self, loop)\n\n self._should_close = False\n\n self._payload = None\n self._skip_payload = False\n self._payload_parser = None\n\n self._timer = None\n\n self._tail = b''\n self._upgraded = False\n self._parser = None # type: Optional[HttpResponseParser]\n\n self._read_timeout = None # type: Optional[float]\n self._read_timeout_handle = None # type: Optional[asyncio.TimerHandle]\n\n self.closed = self._loop.create_future() # type: asyncio.Future[None]\n\n @property\n def upgraded(self) -> bool:\n return self._upgraded\n\n @property\n def should_close(self) -> bool:\n if (self._payload is not None and\n not self._payload.is_eof() or self._upgraded):\n return True\n\n return (self._should_close or self._upgraded or\n self.exception() is not None or\n self._payload_parser is not None or\n len(self) > 0 or bool(self._tail))\n\n def force_close(self) -> None:\n self._should_close = True\n\n def close(self) -> None:\n transport = self.transport\n if transport is not None:\n transport.close()\n self.transport = None\n self._payload = None\n self._drop_timeout()\n\n def is_connected(self) -> bool:\n return self.transport is not None\n\n def connection_lost(self, exc: Optional[BaseException]) -> None:\n self._drop_timeout()\n\n if exc is not None:\n set_exception(self.closed, exc)\n else:\n set_result(self.closed, None)\n\n if self._payload_parser is not None:\n with suppress(Exception):\n self._payload_parser.feed_eof()\n\n uncompleted = None\n if self._parser is not None:\n try:\n uncompleted = self._parser.feed_eof()\n except Exception:\n if self._payload is not None:\n self._payload.set_exception(\n ClientPayloadError(\n 'Response payload is not completed'))\n\n if not self.is_eof():\n if isinstance(exc, OSError):\n exc = ClientOSError(*exc.args)\n if exc is None:\n exc = ServerDisconnectedError(uncompleted)\n # assigns self._should_close to True as side effect,\n # we do it anyway below\n self.set_exception(exc)\n\n self._should_close = True\n self._parser = None\n self._payload = None\n self._payload_parser = None\n self._reading_paused = False\n\n super().connection_lost(exc)\n\n def eof_received(self) -> None:\n # should call parser.feed_eof() most likely\n self._drop_timeout()\n\n def pause_reading(self) -> None:\n super().pause_reading()\n self._drop_timeout()\n\n def resume_reading(self) -> None:\n super().resume_reading()\n self._reschedule_timeout()\n\n def set_exception(self, exc: BaseException) -> None:\n self._should_close = True\n self._drop_timeout()\n super().set_exception(exc)\n\n def set_parser(self, parser: Any, payload: Any) -> None:\n # TODO: actual types are:\n # parser: WebSocketReader\n # payload: FlowControlDataQueue\n # but they are not generi enough\n # Need an ABC for both types\n self._payload = payload\n self._payload_parser = parser\n\n self._drop_timeout()\n\n if self._tail:\n data, self._tail = self._tail, b''\n self.data_received(data)\n\n def set_response_params(self, *, timer: BaseTimerContext=None,\n skip_payload: bool=False,\n read_until_eof: bool=False,\n auto_decompress: bool=True,\n read_timeout: Optional[float]=None) -> None:\n self._skip_payload = skip_payload\n\n self._read_timeout = read_timeout\n self._reschedule_timeout()\n\n self._parser = HttpResponseParser(\n self, self._loop, timer=timer,\n payload_exception=ClientPayloadError,\n read_until_eof=read_until_eof,\n auto_decompress=auto_decompress)\n\n if self._tail:\n data, self._tail = self._tail, b''\n self.data_received(data)\n\n def _drop_timeout(self) -> None:\n if self._read_timeout_handle is not None:\n self._read_timeout_handle.cancel()\n self._read_timeout_handle = None\n\n def _reschedule_timeout(self) -> None:\n timeout = self._read_timeout\n if self._read_timeout_handle is not None:\n self._read_timeout_handle.cancel()\n\n if timeout:\n self._read_timeout_handle = self._loop.call_later(\n timeout, self._on_read_timeout)\n else:\n self._read_timeout_handle = None\n\n def _on_read_timeout(self) -> None:\n exc = ServerTimeoutError(\"Timeout on reading data from socket\")\n self.set_exception(exc)\n if self._payload is not None:\n self._payload.set_exception(exc)\n\n def data_received(self, data: bytes) -> None:\n self._reschedule_timeout()\n\n if not data:\n return\n\n # custom payload parser\n if self._payload_parser is not None:\n eof, tail = self._payload_parser.feed_data(data)\n if eof:\n self._payload = None\n self._payload_parser = None\n\n if tail:\n self.data_received(tail)\n return\n else:\n if self._upgraded or self._parser is None:\n # i.e. websocket connection, websocket parser is not set yet\n self._tail += data\n else:\n # parse http messages\n try:\n messages, upgraded, tail = self._parser.feed_data(data)\n except BaseException as exc:\n if self.transport is not None:\n # connection.release() could be called BEFORE\n # data_received(), the transport is already\n # closed in this case\n self.transport.close()\n # should_close is True after the call\n self.set_exception(exc)\n return\n\n self._upgraded = upgraded\n\n payload = None\n for message, payload in messages:\n if message.should_close:\n self._should_close = True\n\n self._payload = payload\n\n if self._skip_payload or message.code in (204, 304):\n self.feed_data((message, EMPTY_PAYLOAD), 0) # type: ignore # noqa\n else:\n self.feed_data((message, payload), 0)\n if payload is not None:\n # new message(s) was processed\n # register timeout handler unsubscribing\n # either on end-of-stream or immediately for\n # EMPTY_PAYLOAD\n if payload is not EMPTY_PAYLOAD:\n payload.on_eof(self._drop_timeout)\n else:\n self._drop_timeout()\n\n if tail:\n if upgraded:\n self.data_received(tail)\n else:\n self._tail = tail\n", "path": "aiohttp/client_proto.py"}]} | 3,135 | 104 |
gh_patches_debug_22367 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-tf-515 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Subword tokenisation spacer can mark the beginning of word
Certain sequence noising operations need to retrieve a list of words from the raw list of subword tokens. For example:
* Decoding with word removal/reordering to produce noisy back-translations as in [Scaling BT paper](https://arxiv.org/abs/1808.09381)
* Word omission to support the new contrastive learning feature as in the [contrastive learning paper](https://www.aclweb.org/anthology/P19-1623.pdf)
* Presumably more features relying on word level noise might come up in the future
In these cases the user should specify some details for the sub-tokenisation process:
1. What subword tokens was used? (`decoding_subword_token`)
2. Was that token a joiner or a spacer? (`decoding_subword_token_is_spacer`)
When the user specifies (explicitly or implicitly) a spacer, the framework assumes that the spacer symbol appears at the beginning of each word, similar to what SentencePiece does. However this does not have to be the case, the spacer could also appear at the end of each word - for example [this one does](https://github.com/kovalevfm/SubTokenizer). If that extra sub-tokenisation flexibility is desired, we can add this configuration parameter. A sample implementation could look like [this](https://github.com/steremma/OpenNMT-tf/commit/d109af49911431e424b28def575fb94f07bfec47).
I realise that most user's rely on standard tools that are covered by the current implementation. If there is a user base for which the extra flexibility is desired, I can submit a PR that reads this option from the YAML.
</issue>
<code>
[start of opennmt/data/text.py]
1 # -*- coding: utf-8 -*-
2
3 """Text manipulation."""
4
5 import tensorflow as tf
6
7
8 def tokens_to_chars(tokens):
9 """Splits tokens into unicode characters.
10
11 Args:
12 tokens: A string ``tf.Tensor`` of shape :math:`[T]`.
13
14 Returns:
15 The characters as a 2D string ``tf.RaggedTensor``.
16 """
17 return tf.strings.unicode_split(tokens, "UTF-8")
18
19 def tokens_to_words(tokens, subword_token="■", is_spacer=None):
20 """Converts a sequence of tokens to a sequence of words.
21
22 For example, if a BPE tokenization produces this sequence:
23
24 ["He@@", "llo", "W@@", "orld", "@@!"]
25
26 this function will return the tensor:
27
28 [["He@@", "llo", ""], ["W@@", "orld", "@@!"]]
29
30 Args:
31 tokens: A 1D string ``tf.Tensor``.
32 subword_token: The special token used by the subword tokenizer.
33 is_spacer: Whether :obj:`subword_token` is used as a spacer (as in
34 SentencePiece) or a joiner (as in BPE). If ``None``, will infer
35 directly from :obj:`subword_token`.
36
37 Returns:
38 The words as a 2D string ``tf.RaggedTensor``.
39 """
40 if is_spacer is None:
41 is_spacer = subword_token == "▁"
42 if is_spacer:
43 subword = tf.strings.regex_full_match(tokens, "[^%s].*" % subword_token)
44 else:
45 right = tf.strings.regex_full_match(tokens, ".*%s" % subword_token)
46 left = tf.strings.regex_full_match(tokens, "%s.*" % subword_token)
47 subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)
48 start = tf.logical_not(subword)
49 start_indices = tf.squeeze(tf.where(start), -1)
50 return tf.RaggedTensor.from_row_starts(tokens, start_indices)
51
52 def alignment_matrix_from_pharaoh(alignment_line,
53 source_length,
54 target_length,
55 dtype=tf.float32):
56 """Parse Pharaoh alignments into an alignment matrix.
57
58 Args:
59 alignment_line: A string ``tf.Tensor`` in the Pharaoh format.
60 source_length: The length of the source sentence, without special symbols.
61 target_length: The length of the target sentence, without special symbols.
62 dtype: The output matrix dtype. Defaults to ``tf.float32`` for convenience
63 when computing the guided alignment loss.
64
65 Returns:
66 The alignment matrix as a 2-D ``tf.Tensor`` of type :obj:`dtype` and shape
67 ``[target_length, source_length]``, where ``[i, j] = 1`` if the ``i`` th
68 target word is aligned with the ``j`` th source word.
69 """
70 align_pairs_str = tf.strings.split([alignment_line]).values
71 align_pairs_flat_str = tf.strings.split(align_pairs_str, sep="-").values
72 align_pairs_flat = tf.strings.to_number(align_pairs_flat_str, out_type=tf.int64)
73 sparse_indices = tf.reshape(align_pairs_flat, [-1, 2])
74 sparse_values = tf.ones([tf.shape(sparse_indices)[0]], dtype=dtype)
75 source_length = tf.cast(source_length, tf.int64)
76 target_length = tf.cast(target_length, tf.int64)
77 alignment_matrix_sparse = tf.sparse.SparseTensor(
78 sparse_indices, sparse_values, [source_length, target_length])
79 alignment_matrix = tf.sparse.to_dense(alignment_matrix_sparse, validate_indices=False)
80 return tf.transpose(alignment_matrix)
81
[end of opennmt/data/text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opennmt/data/text.py b/opennmt/data/text.py
--- a/opennmt/data/text.py
+++ b/opennmt/data/text.py
@@ -40,13 +40,18 @@
if is_spacer is None:
is_spacer = subword_token == "▁"
if is_spacer:
- subword = tf.strings.regex_full_match(tokens, "[^%s].*" % subword_token)
+ # First token implicitly starts with a spacer.
+ left_and_single = tf.logical_or(
+ tf.strings.regex_full_match(tokens, "%s.*" % subword_token),
+ tf.one_hot(0, tf.shape(tokens)[0], on_value=True, off_value=False))
+ right = tf.strings.regex_full_match(tokens, ".+%s" % subword_token)
+ word_start = tf.logical_or(tf.roll(right, shift=1, axis=0), left_and_single)
else:
right = tf.strings.regex_full_match(tokens, ".*%s" % subword_token)
left = tf.strings.regex_full_match(tokens, "%s.*" % subword_token)
subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)
- start = tf.logical_not(subword)
- start_indices = tf.squeeze(tf.where(start), -1)
+ word_start = tf.logical_not(subword)
+ start_indices = tf.squeeze(tf.where(word_start), -1)
return tf.RaggedTensor.from_row_starts(tokens, start_indices)
def alignment_matrix_from_pharaoh(alignment_line,
| {"golden_diff": "diff --git a/opennmt/data/text.py b/opennmt/data/text.py\n--- a/opennmt/data/text.py\n+++ b/opennmt/data/text.py\n@@ -40,13 +40,18 @@\n if is_spacer is None:\n is_spacer = subword_token == \"\u2581\"\n if is_spacer:\n- subword = tf.strings.regex_full_match(tokens, \"[^%s].*\" % subword_token)\n+ # First token implicitly starts with a spacer.\n+ left_and_single = tf.logical_or(\n+ tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token),\n+ tf.one_hot(0, tf.shape(tokens)[0], on_value=True, off_value=False))\n+ right = tf.strings.regex_full_match(tokens, \".+%s\" % subword_token)\n+ word_start = tf.logical_or(tf.roll(right, shift=1, axis=0), left_and_single)\n else:\n right = tf.strings.regex_full_match(tokens, \".*%s\" % subword_token)\n left = tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token)\n subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)\n- start = tf.logical_not(subword)\n- start_indices = tf.squeeze(tf.where(start), -1)\n+ word_start = tf.logical_not(subword)\n+ start_indices = tf.squeeze(tf.where(word_start), -1)\n return tf.RaggedTensor.from_row_starts(tokens, start_indices)\n \n def alignment_matrix_from_pharaoh(alignment_line,\n", "issue": "Subword tokenisation spacer can mark the beginning of word\nCertain sequence noising operations need to retrieve a list of words from the raw list of subword tokens. For example:\r\n\r\n* Decoding with word removal/reordering to produce noisy back-translations as in [Scaling BT paper](https://arxiv.org/abs/1808.09381)\r\n\r\n* Word omission to support the new contrastive learning feature as in the [contrastive learning paper](https://www.aclweb.org/anthology/P19-1623.pdf)\r\n\r\n* Presumably more features relying on word level noise might come up in the future\r\n\r\nIn these cases the user should specify some details for the sub-tokenisation process: \r\n1. What subword tokens was used? (`decoding_subword_token`)\r\n2. Was that token a joiner or a spacer? (`decoding_subword_token_is_spacer`)\r\n\r\nWhen the user specifies (explicitly or implicitly) a spacer, the framework assumes that the spacer symbol appears at the beginning of each word, similar to what SentencePiece does. However this does not have to be the case, the spacer could also appear at the end of each word - for example [this one does](https://github.com/kovalevfm/SubTokenizer). If that extra sub-tokenisation flexibility is desired, we can add this configuration parameter. A sample implementation could look like [this](https://github.com/steremma/OpenNMT-tf/commit/d109af49911431e424b28def575fb94f07bfec47).\r\n\r\nI realise that most user's rely on standard tools that are covered by the current implementation. If there is a user base for which the extra flexibility is desired, I can submit a PR that reads this option from the YAML.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Text manipulation.\"\"\"\n\nimport tensorflow as tf\n\n\ndef tokens_to_chars(tokens):\n \"\"\"Splits tokens into unicode characters.\n\n Args:\n tokens: A string ``tf.Tensor`` of shape :math:`[T]`.\n\n Returns:\n The characters as a 2D string ``tf.RaggedTensor``.\n \"\"\"\n return tf.strings.unicode_split(tokens, \"UTF-8\")\n\ndef tokens_to_words(tokens, subword_token=\"\uffed\", is_spacer=None):\n \"\"\"Converts a sequence of tokens to a sequence of words.\n\n For example, if a BPE tokenization produces this sequence:\n\n [\"He@@\", \"llo\", \"W@@\", \"orld\", \"@@!\"]\n\n this function will return the tensor:\n\n [[\"He@@\", \"llo\", \"\"], [\"W@@\", \"orld\", \"@@!\"]]\n\n Args:\n tokens: A 1D string ``tf.Tensor``.\n subword_token: The special token used by the subword tokenizer.\n is_spacer: Whether :obj:`subword_token` is used as a spacer (as in\n SentencePiece) or a joiner (as in BPE). If ``None``, will infer\n directly from :obj:`subword_token`.\n\n Returns:\n The words as a 2D string ``tf.RaggedTensor``.\n \"\"\"\n if is_spacer is None:\n is_spacer = subword_token == \"\u2581\"\n if is_spacer:\n subword = tf.strings.regex_full_match(tokens, \"[^%s].*\" % subword_token)\n else:\n right = tf.strings.regex_full_match(tokens, \".*%s\" % subword_token)\n left = tf.strings.regex_full_match(tokens, \"%s.*\" % subword_token)\n subword = tf.logical_or(tf.roll(right, shift=1, axis=0), left)\n start = tf.logical_not(subword)\n start_indices = tf.squeeze(tf.where(start), -1)\n return tf.RaggedTensor.from_row_starts(tokens, start_indices)\n\ndef alignment_matrix_from_pharaoh(alignment_line,\n source_length,\n target_length,\n dtype=tf.float32):\n \"\"\"Parse Pharaoh alignments into an alignment matrix.\n\n Args:\n alignment_line: A string ``tf.Tensor`` in the Pharaoh format.\n source_length: The length of the source sentence, without special symbols.\n target_length: The length of the target sentence, without special symbols.\n dtype: The output matrix dtype. Defaults to ``tf.float32`` for convenience\n when computing the guided alignment loss.\n\n Returns:\n The alignment matrix as a 2-D ``tf.Tensor`` of type :obj:`dtype` and shape\n ``[target_length, source_length]``, where ``[i, j] = 1`` if the ``i`` th\n target word is aligned with the ``j`` th source word.\n \"\"\"\n align_pairs_str = tf.strings.split([alignment_line]).values\n align_pairs_flat_str = tf.strings.split(align_pairs_str, sep=\"-\").values\n align_pairs_flat = tf.strings.to_number(align_pairs_flat_str, out_type=tf.int64)\n sparse_indices = tf.reshape(align_pairs_flat, [-1, 2])\n sparse_values = tf.ones([tf.shape(sparse_indices)[0]], dtype=dtype)\n source_length = tf.cast(source_length, tf.int64)\n target_length = tf.cast(target_length, tf.int64)\n alignment_matrix_sparse = tf.sparse.SparseTensor(\n sparse_indices, sparse_values, [source_length, target_length])\n alignment_matrix = tf.sparse.to_dense(alignment_matrix_sparse, validate_indices=False)\n return tf.transpose(alignment_matrix)\n", "path": "opennmt/data/text.py"}]} | 1,880 | 343 |
gh_patches_debug_35389 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-206 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Mycroft should only repeat the query in wolfram when short to speak
When the estimated time to speak "I am searching for blah blah blah blah" will take longer than the estimated time to get the result from wolfram, only the result should be said rather than the redundant "I am searching for ...".
</issue>
<code>
[start of mycroft/skills/wolfram_alpha/__init__.py]
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 from StringIO import StringIO
20 from os.path import dirname
21
22 import re
23 import requests
24 import wolframalpha
25 from six.moves import urllib
26
27 from mycroft.identity import IdentityManager
28 from mycroft.skills.core import MycroftSkill
29 from mycroft.util import CerberusAccessDenied
30 from mycroft.util.log import getLogger
31 from mycroft.messagebus.message import Message
32
33 __author__ = 'seanfitz'
34
35 logger = getLogger(__name__)
36
37
38 class EnglishQuestionParser(object):
39 """
40 Poor-man's english question parser. Not even close to conclusive, but
41 appears to construct some decent w|a queries and responses.
42 """
43
44 def __init__(self):
45 self.regexes = [
46 re.compile(
47 ".*(?P<QuestionWord>who|what|when|where|why|which) "
48 "(?P<Query1>.*) (?P<QuestionVerb>is|are|was|were) "
49 "(?P<Query2>.*)"),
50 re.compile(
51 ".*(?P<QuestionWord>who|what|when|where|why|which) "
52 "(?P<QuestionVerb>\w+) (?P<Query>.*)")
53 ]
54
55 def _normalize(self, groupdict):
56 if 'Query' in groupdict:
57 return groupdict
58 elif 'Query1' and 'Query2' in groupdict:
59 return {
60 'QuestionWord': groupdict.get('QuestionWord'),
61 'QuestionVerb': groupdict.get('QuestionVerb'),
62 'Query': ' '.join([groupdict.get('Query1'), groupdict.get(
63 'Query2')])
64 }
65
66 def parse(self, utterance):
67 for regex in self.regexes:
68 match = regex.match(utterance)
69 if match:
70 return self._normalize(match.groupdict())
71 return None
72
73
74 class CerberusWolframAlphaClient(object):
75 """
76 Wolfram|Alpha v2.0 client
77 """
78
79 def query(self, query):
80 """
81 Query Wolfram|Alpha with query using the v2.0 API
82 """
83 identity = IdentityManager().get()
84 bearer_token = 'Bearer %s:%s' % (identity.device_id, identity.token)
85 query = urllib.parse.urlencode(dict(input=query))
86 url = 'https://cerberus.mycroft.ai/wolframalpha/v2/query?' + query
87 headers = {'Authorization': bearer_token}
88 response = requests.get(url, headers=headers)
89 if response.status_code == 401:
90 raise CerberusAccessDenied()
91 logger.debug(response.content)
92 return wolframalpha.Result(StringIO(response.content))
93
94
95 class WolframAlphaSkill(MycroftSkill):
96 def __init__(self):
97 MycroftSkill.__init__(self, name="WolframAlphaSkill")
98 self.__init_client()
99 self.question_parser = EnglishQuestionParser()
100
101 def __init_client(self):
102 key = self.config.get('api_key')
103 if key:
104 self.client = wolframalpha.Client(key)
105 else:
106 self.client = CerberusWolframAlphaClient()
107
108 def initialize(self):
109 self.init_dialog(dirname(__file__))
110 self.emitter.on('intent_failure', self.handle_fallback)
111
112 def get_result(self, res):
113 result = None
114 try:
115 result = next(res.results).text
116 return result
117 except:
118 try:
119 result = self.__find_pod_id(res.pods, 'Value')
120 if not result:
121 result = self.__find_pod_id(
122 res.pods, 'NotableFacts:PeopleData')
123 if not result:
124 result = self.__find_pod_id(
125 res.pods, 'BasicInformation:PeopleData')
126 if not result:
127 result = self.__find_pod_id(res.pods, 'Definition')
128 if not result:
129 result = self.__find_pod_id(
130 res.pods, 'DecimalApproximation')
131 if result:
132 result = result[:5]
133 else:
134 result = self.__find_num(
135 res.pods, '200')
136 return result
137 except:
138 return result
139
140 def handle_fallback(self, message):
141 logger.debug(
142 "Could not determine intent, falling back to WolframAlpha Skill!")
143 utterance = message.metadata.get('utterance')
144 parsed_question = self.question_parser.parse(utterance)
145
146 # biding some time
147 if parsed_question:
148 self.speak("I am searching for " + parsed_question.get('Query'))
149 else:
150 self.speak("I am searching for " + utterance)
151 query = utterance
152 if parsed_question:
153 query = "%s %s %s" % (parsed_question.get('QuestionWord'),
154 parsed_question.get('QuestionVerb'),
155 parsed_question.get('Query'))
156
157 try:
158 res = self.client.query(query)
159 result = self.get_result(res)
160 others = self._find_did_you_mean(res)
161 except CerberusAccessDenied as e:
162 self.speak_dialog('not.paired')
163 return
164 except Exception as e:
165 logger.exception(e)
166 self.speak_dialog("not.understood")
167 return
168
169 if result:
170 input_interpretation = self.__find_pod_id(res.pods, 'Input')
171 verb = "is"
172 structured_syntax_regex = re.compile(".*(\||\[|\\\\|\]).*")
173 if parsed_question:
174 if not input_interpretation or structured_syntax_regex.match(
175 input_interpretation):
176 input_interpretation = parsed_question.get('Query')
177 verb = parsed_question.get('QuestionVerb')
178
179 if "|" in result: # Assuming "|" indicates a list of items
180 verb = ":"
181
182 result = self.process_wolfram_string(result)
183 input_interpretation = \
184 self.process_wolfram_string(input_interpretation)
185 response = "%s %s %s" % (input_interpretation, verb, result)
186
187 self.speak(response)
188 else:
189 if len(others) > 0:
190 self.speak_dialog('search.again',
191 data={'utterance': utterance, 'alternative':
192 others[0]})
193 self.handle_fallback(Message('intent_failure',
194 metadata={'utterance':
195 others[0]}))
196 else:
197 self.speak_dialog("not.understood")
198
199 @staticmethod
200 def __find_pod_id(pods, pod_id):
201 for pod in pods:
202 if pod_id in pod.id:
203 return pod.text
204 return None
205
206 @staticmethod
207 def __find_num(pods, pod_num):
208 for pod in pods:
209 if pod.node.attrib['position'] == pod_num:
210 return pod.text
211 return None
212
213 @staticmethod
214 def _find_did_you_mean(res):
215 value = []
216 root = res.tree.find('didyoumeans')
217 if root is not None:
218 for result in root:
219 value.append(result.text)
220 return value
221
222 @staticmethod
223 def process_wolfram_string(text):
224 # Remove extra whitespace
225 text = re.sub(r" \s+", r" ", text)
226
227 # Convert | symbols to commas
228 text = re.sub(r" \| ", r", ", text)
229
230 # Convert newlines to commas
231 text = re.sub(r"\n", r", ", text)
232
233 # Convert !s to factorial
234 text = re.sub(r"!", r",factorial", text)
235 return text
236
237 def stop(self):
238 pass
239
240
241 def create_skill():
242 return WolframAlphaSkill()
243
[end of mycroft/skills/wolfram_alpha/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mycroft/skills/wolfram_alpha/__init__.py b/mycroft/skills/wolfram_alpha/__init__.py
--- a/mycroft/skills/wolfram_alpha/__init__.py
+++ b/mycroft/skills/wolfram_alpha/__init__.py
@@ -138,21 +138,22 @@
return result
def handle_fallback(self, message):
+ self.enclosure.mouth_think()
logger.debug(
"Could not determine intent, falling back to WolframAlpha Skill!")
utterance = message.metadata.get('utterance')
parsed_question = self.question_parser.parse(utterance)
- # biding some time
- if parsed_question:
- self.speak("I am searching for " + parsed_question.get('Query'))
- else:
- self.speak("I am searching for " + utterance)
query = utterance
if parsed_question:
- query = "%s %s %s" % (parsed_question.get('QuestionWord'),
- parsed_question.get('QuestionVerb'),
- parsed_question.get('Query'))
+ # Try to store pieces of utterance (None if not parsed_question)
+ utt_word = parsed_question.get('QuestionWord')
+ utt_verb = parsed_question.get('QuestionVerb')
+ utt_query = parsed_question.get('Query')
+ query = "%s %s %s" % (utt_word, utt_verb, utt_query)
+ phrase = "know %s %s %s" % (utt_word, utt_query, utt_verb)
+ else: # TODO: Localization
+ phrase = "understand the phrase " + utterance
try:
res = self.client.query(query)
@@ -163,7 +164,7 @@
return
except Exception as e:
logger.exception(e)
- self.speak_dialog("not.understood")
+ self.speak_dialog("not.understood", data={'phrase': phrase})
return
if result:
@@ -194,7 +195,7 @@
metadata={'utterance':
others[0]}))
else:
- self.speak_dialog("not.understood")
+ self.speak_dialog("not.understood", data={'phrase': phrase})
@staticmethod
def __find_pod_id(pods, pod_id):
| {"golden_diff": "diff --git a/mycroft/skills/wolfram_alpha/__init__.py b/mycroft/skills/wolfram_alpha/__init__.py\n--- a/mycroft/skills/wolfram_alpha/__init__.py\n+++ b/mycroft/skills/wolfram_alpha/__init__.py\n@@ -138,21 +138,22 @@\n return result\n \n def handle_fallback(self, message):\n+ self.enclosure.mouth_think()\n logger.debug(\n \"Could not determine intent, falling back to WolframAlpha Skill!\")\n utterance = message.metadata.get('utterance')\n parsed_question = self.question_parser.parse(utterance)\n \n- # biding some time\n- if parsed_question:\n- self.speak(\"I am searching for \" + parsed_question.get('Query'))\n- else:\n- self.speak(\"I am searching for \" + utterance)\n query = utterance\n if parsed_question:\n- query = \"%s %s %s\" % (parsed_question.get('QuestionWord'),\n- parsed_question.get('QuestionVerb'),\n- parsed_question.get('Query'))\n+ # Try to store pieces of utterance (None if not parsed_question)\n+ utt_word = parsed_question.get('QuestionWord')\n+ utt_verb = parsed_question.get('QuestionVerb')\n+ utt_query = parsed_question.get('Query')\n+ query = \"%s %s %s\" % (utt_word, utt_verb, utt_query)\n+ phrase = \"know %s %s %s\" % (utt_word, utt_query, utt_verb)\n+ else: # TODO: Localization\n+ phrase = \"understand the phrase \" + utterance\n \n try:\n res = self.client.query(query)\n@@ -163,7 +164,7 @@\n return\n except Exception as e:\n logger.exception(e)\n- self.speak_dialog(\"not.understood\")\n+ self.speak_dialog(\"not.understood\", data={'phrase': phrase})\n return\n \n if result:\n@@ -194,7 +195,7 @@\n metadata={'utterance':\n others[0]}))\n else:\n- self.speak_dialog(\"not.understood\")\n+ self.speak_dialog(\"not.understood\", data={'phrase': phrase})\n \n @staticmethod\n def __find_pod_id(pods, pod_id):\n", "issue": "Mycroft should only repeat the query in wolfram when short to speak\nWhen the estimated time to speak \"I am searching for blah blah blah blah\" will take longer than the estimated time to get the result from wolfram, only the result should be said rather than the redundant \"I am searching for ...\".\n\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nfrom StringIO import StringIO\nfrom os.path import dirname\n\nimport re\nimport requests\nimport wolframalpha\nfrom six.moves import urllib\n\nfrom mycroft.identity import IdentityManager\nfrom mycroft.skills.core import MycroftSkill\nfrom mycroft.util import CerberusAccessDenied\nfrom mycroft.util.log import getLogger\nfrom mycroft.messagebus.message import Message\n\n__author__ = 'seanfitz'\n\nlogger = getLogger(__name__)\n\n\nclass EnglishQuestionParser(object):\n \"\"\"\n Poor-man's english question parser. Not even close to conclusive, but\n appears to construct some decent w|a queries and responses.\n \"\"\"\n\n def __init__(self):\n self.regexes = [\n re.compile(\n \".*(?P<QuestionWord>who|what|when|where|why|which) \"\n \"(?P<Query1>.*) (?P<QuestionVerb>is|are|was|were) \"\n \"(?P<Query2>.*)\"),\n re.compile(\n \".*(?P<QuestionWord>who|what|when|where|why|which) \"\n \"(?P<QuestionVerb>\\w+) (?P<Query>.*)\")\n ]\n\n def _normalize(self, groupdict):\n if 'Query' in groupdict:\n return groupdict\n elif 'Query1' and 'Query2' in groupdict:\n return {\n 'QuestionWord': groupdict.get('QuestionWord'),\n 'QuestionVerb': groupdict.get('QuestionVerb'),\n 'Query': ' '.join([groupdict.get('Query1'), groupdict.get(\n 'Query2')])\n }\n\n def parse(self, utterance):\n for regex in self.regexes:\n match = regex.match(utterance)\n if match:\n return self._normalize(match.groupdict())\n return None\n\n\nclass CerberusWolframAlphaClient(object):\n \"\"\"\n Wolfram|Alpha v2.0 client\n \"\"\"\n\n def query(self, query):\n \"\"\"\n Query Wolfram|Alpha with query using the v2.0 API\n \"\"\"\n identity = IdentityManager().get()\n bearer_token = 'Bearer %s:%s' % (identity.device_id, identity.token)\n query = urllib.parse.urlencode(dict(input=query))\n url = 'https://cerberus.mycroft.ai/wolframalpha/v2/query?' + query\n headers = {'Authorization': bearer_token}\n response = requests.get(url, headers=headers)\n if response.status_code == 401:\n raise CerberusAccessDenied()\n logger.debug(response.content)\n return wolframalpha.Result(StringIO(response.content))\n\n\nclass WolframAlphaSkill(MycroftSkill):\n def __init__(self):\n MycroftSkill.__init__(self, name=\"WolframAlphaSkill\")\n self.__init_client()\n self.question_parser = EnglishQuestionParser()\n\n def __init_client(self):\n key = self.config.get('api_key')\n if key:\n self.client = wolframalpha.Client(key)\n else:\n self.client = CerberusWolframAlphaClient()\n\n def initialize(self):\n self.init_dialog(dirname(__file__))\n self.emitter.on('intent_failure', self.handle_fallback)\n\n def get_result(self, res):\n result = None\n try:\n result = next(res.results).text\n return result\n except:\n try:\n result = self.__find_pod_id(res.pods, 'Value')\n if not result:\n result = self.__find_pod_id(\n res.pods, 'NotableFacts:PeopleData')\n if not result:\n result = self.__find_pod_id(\n res.pods, 'BasicInformation:PeopleData')\n if not result:\n result = self.__find_pod_id(res.pods, 'Definition')\n if not result:\n result = self.__find_pod_id(\n res.pods, 'DecimalApproximation')\n if result:\n result = result[:5]\n else:\n result = self.__find_num(\n res.pods, '200')\n return result\n except:\n return result\n\n def handle_fallback(self, message):\n logger.debug(\n \"Could not determine intent, falling back to WolframAlpha Skill!\")\n utterance = message.metadata.get('utterance')\n parsed_question = self.question_parser.parse(utterance)\n\n # biding some time\n if parsed_question:\n self.speak(\"I am searching for \" + parsed_question.get('Query'))\n else:\n self.speak(\"I am searching for \" + utterance)\n query = utterance\n if parsed_question:\n query = \"%s %s %s\" % (parsed_question.get('QuestionWord'),\n parsed_question.get('QuestionVerb'),\n parsed_question.get('Query'))\n\n try:\n res = self.client.query(query)\n result = self.get_result(res)\n others = self._find_did_you_mean(res)\n except CerberusAccessDenied as e:\n self.speak_dialog('not.paired')\n return\n except Exception as e:\n logger.exception(e)\n self.speak_dialog(\"not.understood\")\n return\n\n if result:\n input_interpretation = self.__find_pod_id(res.pods, 'Input')\n verb = \"is\"\n structured_syntax_regex = re.compile(\".*(\\||\\[|\\\\\\\\|\\]).*\")\n if parsed_question:\n if not input_interpretation or structured_syntax_regex.match(\n input_interpretation):\n input_interpretation = parsed_question.get('Query')\n verb = parsed_question.get('QuestionVerb')\n\n if \"|\" in result: # Assuming \"|\" indicates a list of items\n verb = \":\"\n\n result = self.process_wolfram_string(result)\n input_interpretation = \\\n self.process_wolfram_string(input_interpretation)\n response = \"%s %s %s\" % (input_interpretation, verb, result)\n\n self.speak(response)\n else:\n if len(others) > 0:\n self.speak_dialog('search.again',\n data={'utterance': utterance, 'alternative':\n others[0]})\n self.handle_fallback(Message('intent_failure',\n metadata={'utterance':\n others[0]}))\n else:\n self.speak_dialog(\"not.understood\")\n\n @staticmethod\n def __find_pod_id(pods, pod_id):\n for pod in pods:\n if pod_id in pod.id:\n return pod.text\n return None\n\n @staticmethod\n def __find_num(pods, pod_num):\n for pod in pods:\n if pod.node.attrib['position'] == pod_num:\n return pod.text\n return None\n\n @staticmethod\n def _find_did_you_mean(res):\n value = []\n root = res.tree.find('didyoumeans')\n if root is not None:\n for result in root:\n value.append(result.text)\n return value\n\n @staticmethod\n def process_wolfram_string(text):\n # Remove extra whitespace\n text = re.sub(r\" \\s+\", r\" \", text)\n\n # Convert | symbols to commas\n text = re.sub(r\" \\| \", r\", \", text)\n\n # Convert newlines to commas\n text = re.sub(r\"\\n\", r\", \", text)\n\n # Convert !s to factorial\n text = re.sub(r\"!\", r\",factorial\", text)\n return text\n\n def stop(self):\n pass\n\n\ndef create_skill():\n return WolframAlphaSkill()\n", "path": "mycroft/skills/wolfram_alpha/__init__.py"}]} | 3,030 | 525 |
gh_patches_debug_29109 | rasdani/github-patches | git_diff | saleor__saleor-8874 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
management/commands errors
There are some queries that reference deleted fields in this module (examples below). I was wondering does it matter if this module is updated because it seems like this file hasn't been updated in a while, or are there other reasons that these queries still exist? Thanks.
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L45
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L51
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L52
https://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L53
</issue>
<code>
[start of saleor/core/management/commands/change_currency.py]
1 from babel.numbers import UnknownCurrencyError, validate_currency
2 from django.core.management.base import BaseCommand, CommandError
3
4 from ....checkout.models import Checkout
5 from ....discount.models import Voucher
6 from ....giftcard.models import GiftCard
7 from ....order.models import Order, OrderLine
8 from ....payment.models import Payment, Transaction
9 from ....product.models import Product, ProductVariant
10 from ....shipping.models import ShippingMethod
11
12
13 class Command(BaseCommand):
14 help = (
15 "Change currency in all models in the database. "
16 "Note, that this command only changes currency code "
17 "without doing any conversion. "
18 "Currency set by this command must match "
19 "with the value set in DEFAULT_CURRENCY environment variable."
20 )
21
22 def add_arguments(self, parser):
23 parser.add_argument("currency", type=str)
24
25 parser.add_argument(
26 "--force",
27 action="store_true",
28 help="Allows running command without validation.",
29 )
30
31 def handle(self, **options):
32 force = options.get("force", False)
33 currency = options["currency"]
34
35 if not force:
36 try:
37 validate_currency(currency)
38 except UnknownCurrencyError:
39 raise CommandError(
40 "Unknown currency. "
41 "Use `--force` flag to force migration currencies."
42 )
43
44 Checkout.objects.update(currency=currency)
45 Voucher.objects.update(currency=currency)
46 GiftCard.objects.update(currency=currency)
47 Order.objects.update(currency=currency)
48 OrderLine.objects.update(currency=currency)
49 Payment.objects.update(currency=currency)
50 Transaction.objects.update(currency=currency)
51 Product.objects.update(currency=currency)
52 ProductVariant.objects.update(currency=currency)
53 ShippingMethod.objects.update(currency=currency)
54
[end of saleor/core/management/commands/change_currency.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/core/management/commands/change_currency.py b/saleor/core/management/commands/change_currency.py
deleted file mode 100644
--- a/saleor/core/management/commands/change_currency.py
+++ /dev/null
@@ -1,53 +0,0 @@
-from babel.numbers import UnknownCurrencyError, validate_currency
-from django.core.management.base import BaseCommand, CommandError
-
-from ....checkout.models import Checkout
-from ....discount.models import Voucher
-from ....giftcard.models import GiftCard
-from ....order.models import Order, OrderLine
-from ....payment.models import Payment, Transaction
-from ....product.models import Product, ProductVariant
-from ....shipping.models import ShippingMethod
-
-
-class Command(BaseCommand):
- help = (
- "Change currency in all models in the database. "
- "Note, that this command only changes currency code "
- "without doing any conversion. "
- "Currency set by this command must match "
- "with the value set in DEFAULT_CURRENCY environment variable."
- )
-
- def add_arguments(self, parser):
- parser.add_argument("currency", type=str)
-
- parser.add_argument(
- "--force",
- action="store_true",
- help="Allows running command without validation.",
- )
-
- def handle(self, **options):
- force = options.get("force", False)
- currency = options["currency"]
-
- if not force:
- try:
- validate_currency(currency)
- except UnknownCurrencyError:
- raise CommandError(
- "Unknown currency. "
- "Use `--force` flag to force migration currencies."
- )
-
- Checkout.objects.update(currency=currency)
- Voucher.objects.update(currency=currency)
- GiftCard.objects.update(currency=currency)
- Order.objects.update(currency=currency)
- OrderLine.objects.update(currency=currency)
- Payment.objects.update(currency=currency)
- Transaction.objects.update(currency=currency)
- Product.objects.update(currency=currency)
- ProductVariant.objects.update(currency=currency)
- ShippingMethod.objects.update(currency=currency)
| {"golden_diff": "diff --git a/saleor/core/management/commands/change_currency.py b/saleor/core/management/commands/change_currency.py\ndeleted file mode 100644\n--- a/saleor/core/management/commands/change_currency.py\n+++ /dev/null\n@@ -1,53 +0,0 @@\n-from babel.numbers import UnknownCurrencyError, validate_currency\n-from django.core.management.base import BaseCommand, CommandError\n-\n-from ....checkout.models import Checkout\n-from ....discount.models import Voucher\n-from ....giftcard.models import GiftCard\n-from ....order.models import Order, OrderLine\n-from ....payment.models import Payment, Transaction\n-from ....product.models import Product, ProductVariant\n-from ....shipping.models import ShippingMethod\n-\n-\n-class Command(BaseCommand):\n- help = (\n- \"Change currency in all models in the database. \"\n- \"Note, that this command only changes currency code \"\n- \"without doing any conversion. \"\n- \"Currency set by this command must match \"\n- \"with the value set in DEFAULT_CURRENCY environment variable.\"\n- )\n-\n- def add_arguments(self, parser):\n- parser.add_argument(\"currency\", type=str)\n-\n- parser.add_argument(\n- \"--force\",\n- action=\"store_true\",\n- help=\"Allows running command without validation.\",\n- )\n-\n- def handle(self, **options):\n- force = options.get(\"force\", False)\n- currency = options[\"currency\"]\n-\n- if not force:\n- try:\n- validate_currency(currency)\n- except UnknownCurrencyError:\n- raise CommandError(\n- \"Unknown currency. \"\n- \"Use `--force` flag to force migration currencies.\"\n- )\n-\n- Checkout.objects.update(currency=currency)\n- Voucher.objects.update(currency=currency)\n- GiftCard.objects.update(currency=currency)\n- Order.objects.update(currency=currency)\n- OrderLine.objects.update(currency=currency)\n- Payment.objects.update(currency=currency)\n- Transaction.objects.update(currency=currency)\n- Product.objects.update(currency=currency)\n- ProductVariant.objects.update(currency=currency)\n- ShippingMethod.objects.update(currency=currency)\n", "issue": "management/commands errors\nThere are some queries that reference deleted fields in this module (examples below). I was wondering does it matter if this module is updated because it seems like this file hasn't been updated in a while, or are there other reasons that these queries still exist? Thanks.\r\n\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L45\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L51\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L52\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/core/management/commands/change_currency.py#L53\n", "before_files": [{"content": "from babel.numbers import UnknownCurrencyError, validate_currency\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom ....checkout.models import Checkout\nfrom ....discount.models import Voucher\nfrom ....giftcard.models import GiftCard\nfrom ....order.models import Order, OrderLine\nfrom ....payment.models import Payment, Transaction\nfrom ....product.models import Product, ProductVariant\nfrom ....shipping.models import ShippingMethod\n\n\nclass Command(BaseCommand):\n help = (\n \"Change currency in all models in the database. \"\n \"Note, that this command only changes currency code \"\n \"without doing any conversion. \"\n \"Currency set by this command must match \"\n \"with the value set in DEFAULT_CURRENCY environment variable.\"\n )\n\n def add_arguments(self, parser):\n parser.add_argument(\"currency\", type=str)\n\n parser.add_argument(\n \"--force\",\n action=\"store_true\",\n help=\"Allows running command without validation.\",\n )\n\n def handle(self, **options):\n force = options.get(\"force\", False)\n currency = options[\"currency\"]\n\n if not force:\n try:\n validate_currency(currency)\n except UnknownCurrencyError:\n raise CommandError(\n \"Unknown currency. \"\n \"Use `--force` flag to force migration currencies.\"\n )\n\n Checkout.objects.update(currency=currency)\n Voucher.objects.update(currency=currency)\n GiftCard.objects.update(currency=currency)\n Order.objects.update(currency=currency)\n OrderLine.objects.update(currency=currency)\n Payment.objects.update(currency=currency)\n Transaction.objects.update(currency=currency)\n Product.objects.update(currency=currency)\n ProductVariant.objects.update(currency=currency)\n ShippingMethod.objects.update(currency=currency)\n", "path": "saleor/core/management/commands/change_currency.py"}]} | 1,176 | 462 |
gh_patches_debug_11801 | rasdani/github-patches | git_diff | getmoto__moto-399 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 2.6 issues with wheels and dependencies
My Travis tests failed on Python 2.6: piskvorky/smart_open#15 .
After some digging around it appears this is because of `moto`. Moto apparently depends on some `ordereddict` package, but that package is not installed (nor mentioned anywhere in the docs, AFAICS).
Do you think you could make `ordereddict` a dependency for moto, when installing on Python 2.6?
In other words, after I successfully run `pip install moto`, I'd expect moto to work, even on Python 2.6.
And thanks for the great package!
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 from __future__ import unicode_literals
3 from setuptools import setup, find_packages
4
5 install_requires = [
6 "Jinja2",
7 "boto>=2.20.0",
8 "flask",
9 "httpretty>=0.6.1",
10 "requests",
11 "xmltodict",
12 "six",
13 "werkzeug",
14 ]
15
16 import sys
17
18 if sys.version_info < (2, 7):
19 # No buildint OrderedDict before 2.7
20 install_requires.append('ordereddict')
21
22 setup(
23 name='moto',
24 version='0.4.10',
25 description='A library that allows your python tests to easily'
26 ' mock out the boto library',
27 author='Steve Pulec',
28 author_email='spulec@gmail',
29 url='https://github.com/spulec/moto',
30 entry_points={
31 'console_scripts': [
32 'moto_server = moto.server:main',
33 ],
34 },
35 packages=find_packages(exclude=("tests", "tests.*")),
36 install_requires=install_requires,
37 license="Apache",
38 test_suite="tests",
39 classifiers=[
40 "Programming Language :: Python :: 2",
41 "Programming Language :: Python :: 2.6",
42 "Programming Language :: Python :: 2.7",
43 "Programming Language :: Python :: 3",
44 "Programming Language :: Python :: 3.3",
45 "License :: OSI Approved :: Apache Software License",
46 "Topic :: Software Development :: Testing",
47 ],
48 )
49
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -13,11 +13,10 @@
"werkzeug",
]
-import sys
-
-if sys.version_info < (2, 7):
- # No buildint OrderedDict before 2.7
- install_requires.append('ordereddict')
+extras_require = {
+ # No builtin OrderedDict before 2.7
+ ':python_version=="2.6"': ['ordereddict'],
+}
setup(
name='moto',
@@ -34,6 +33,7 @@
},
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=install_requires,
+ extras_require=extras_require,
license="Apache",
test_suite="tests",
classifiers=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,11 +13,10 @@\n \"werkzeug\",\n ]\n \n-import sys\n-\n-if sys.version_info < (2, 7):\n- # No buildint OrderedDict before 2.7\n- install_requires.append('ordereddict')\n+extras_require = {\n+ # No builtin OrderedDict before 2.7\n+ ':python_version==\"2.6\"': ['ordereddict'],\n+}\n \n setup(\n name='moto',\n@@ -34,6 +33,7 @@\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n+ extras_require=extras_require,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n", "issue": "Python 2.6 issues with wheels and dependencies\nMy Travis tests failed on Python 2.6: piskvorky/smart_open#15 .\n\nAfter some digging around it appears this is because of `moto`. Moto apparently depends on some `ordereddict` package, but that package is not installed (nor mentioned anywhere in the docs, AFAICS).\n\nDo you think you could make `ordereddict` a dependency for moto, when installing on Python 2.6?\n\nIn other words, after I successfully run `pip install moto`, I'd expect moto to work, even on Python 2.6.\n\nAnd thanks for the great package!\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import unicode_literals\nfrom setuptools import setup, find_packages\n\ninstall_requires = [\n \"Jinja2\",\n \"boto>=2.20.0\",\n \"flask\",\n \"httpretty>=0.6.1\",\n \"requests\",\n \"xmltodict\",\n \"six\",\n \"werkzeug\",\n]\n\nimport sys\n\nif sys.version_info < (2, 7):\n # No buildint OrderedDict before 2.7\n install_requires.append('ordereddict')\n\nsetup(\n name='moto',\n version='0.4.10',\n description='A library that allows your python tests to easily'\n ' mock out the boto library',\n author='Steve Pulec',\n author_email='spulec@gmail',\n url='https://github.com/spulec/moto',\n entry_points={\n 'console_scripts': [\n 'moto_server = moto.server:main',\n ],\n },\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n install_requires=install_requires,\n license=\"Apache\",\n test_suite=\"tests\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Software Development :: Testing\",\n ],\n)\n", "path": "setup.py"}]} | 1,080 | 177 |
gh_patches_debug_67224 | rasdani/github-patches | git_diff | svthalia__concrexit-1677 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: 'view site' from event detail admin forwards to wrong url
In GitLab by @JobDoesburg on Dec 4, 2019, 19:43
<!--
You want something new.
-->
### One-sentence description
Improve navigation between admin views for event admin
### Motivation
Currently it works terrible.
### Desired functionality
On save, go to the view that makes sense instead of very often go back to the event overview.
Also maybe provide buttons that take you to the event detail overview page, or to the frontend view, etc etc
I don't yet have clear suggestions but I think everyone can think of their own what makes sense.
### Suggested implementation
<!--
If you have any notes on how we could achieve this feature,
share them here.
-->
</issue>
<code>
[start of website/events/admin_views.py]
1 import csv
2
3 from django.conf import settings
4 from django.contrib import messages
5 from django.contrib.admin import helpers
6 from django.contrib.admin.views.decorators import staff_member_required
7 from django.contrib.auth.mixins import PermissionRequiredMixin
8 from django.http import HttpResponse
9 from django.shortcuts import get_object_or_404, redirect
10 from django.utils import timezone
11 from django.utils.decorators import method_decorator
12 from django.utils.text import slugify
13 from django.utils.translation import pgettext_lazy
14 from django.utils.translation import gettext_lazy as _
15 from django.views import View
16 from django.views.generic import DetailView, FormView
17
18 from events import services
19 from events.decorators import organiser_only
20 from events.exceptions import RegistrationError
21 from events.forms import FieldsForm, EventMessageForm
22 from payments.models import Payment
23 from pushnotifications.models import Message, Category
24 from .models import Event, EventRegistration
25
26
27 @method_decorator(staff_member_required, name="dispatch")
28 @method_decorator(organiser_only, name="dispatch")
29 class EventAdminDetails(DetailView, PermissionRequiredMixin):
30 """Render an overview of registrations for the specified event."""
31
32 template_name = "events/admin/details.html"
33 model = Event
34 context_object_name = "event"
35 permission_required = "events.change_event"
36
37 def get_context_data(self, **kwargs):
38 context = super().get_context_data(**kwargs)
39
40 context.update({"payment": Payment, "has_permission": True, "site_url": True})
41
42 return context
43
44
45 @method_decorator(staff_member_required, name="dispatch")
46 @method_decorator(organiser_only, name="dispatch")
47 class RegistrationAdminFields(FormView):
48 """Render a form that allows the user to change the details of their registration.
49
50 The user should be authenticated.
51 """
52
53 form_class = FieldsForm
54 template_name = "admin/change_form.html"
55 registration = None
56 admin = None
57
58 def get_context_data(self, **kwargs):
59 context = super().get_context_data(**kwargs)
60 context.update(
61 {
62 **self.admin.admin_site.each_context(self.request),
63 "add": False,
64 "change": True,
65 "has_view_permission": True,
66 "has_add_permission": False,
67 "has_change_permission": self.request.user.has_perms(
68 "events.change_registration"
69 ),
70 "has_delete_permission": False,
71 "has_editable_inline_admin_formsets": False,
72 "app_label": "events",
73 "opts": self.registration._meta,
74 "is_popup": False,
75 "save_as": False,
76 "save_on_top": False,
77 "original": self.registration,
78 "obj_id": self.registration.pk,
79 "title": _("Change registration fields"),
80 "adminform": helpers.AdminForm(
81 context["form"],
82 ((None, {"fields": context["form"].fields.keys()}),),
83 {},
84 ),
85 }
86 )
87 return context
88
89 def get_form_kwargs(self):
90 kwargs = super().get_form_kwargs()
91 kwargs["fields"] = services.registration_fields(
92 self.request, registration=self.registration
93 )
94 return kwargs
95
96 def form_valid(self, form):
97 values = form.field_values()
98 try:
99 services.update_registration(
100 registration=self.registration, field_values=values
101 )
102 messages.success(self.request, _("Registration successfully saved."))
103 if "_save" in self.request.POST:
104 return redirect(
105 "admin:events_eventregistration_change", self.registration.pk
106 )
107 except RegistrationError as e:
108 messages.error(self.request, e)
109 return self.render_to_response(self.get_context_data(form=form))
110
111 def dispatch(self, request, *args, **kwargs):
112 self.registration = get_object_or_404(
113 EventRegistration, pk=self.kwargs["registration"]
114 )
115 try:
116 if self.registration.event.has_fields:
117 return super().dispatch(request, *args, **kwargs)
118 except RegistrationError:
119 pass
120 return redirect("admin:events_eventregistration_change", self.registration.pk)
121
122
123 @method_decorator(staff_member_required, name="dispatch")
124 @method_decorator(organiser_only, name="dispatch")
125 class EventMessage(FormView):
126 """Renders a form that allows the user to create a push notification for all users registers to the event."""
127
128 form_class = EventMessageForm
129 template_name = "events/admin/message_form.html"
130 admin = None
131 event = None
132
133 def get_context_data(self, **kwargs):
134 context = super().get_context_data(**kwargs)
135 context.update(
136 {
137 **self.admin.admin_site.each_context(self.request),
138 "add": False,
139 "change": True,
140 "has_view_permission": True,
141 "has_add_permission": False,
142 "has_change_permission": self.request.user.has_perms(
143 "events.change_event"
144 ),
145 "has_delete_permission": False,
146 "has_editable_inline_admin_formsets": False,
147 "app_label": "events",
148 "opts": self.event._meta,
149 "is_popup": False,
150 "save_as": False,
151 "save_on_top": False,
152 "original": self.event,
153 "obj_id": self.event.pk,
154 "title": _("Send push notification"),
155 "adminform": helpers.AdminForm(
156 context["form"],
157 ((None, {"fields": context["form"].fields.keys()}),),
158 {},
159 ),
160 }
161 )
162 return context
163
164 def form_valid(self, form):
165 values = form.cleaned_data
166 if not values["url"]:
167 values["url"] = settings.BASE_URL + self.event.get_absolute_url()
168 message = Message(
169 title_en=values["title_en"],
170 body_en=values["body_en"],
171 url=values["url"],
172 category=Category.objects.get(key=Category.EVENT),
173 )
174 message.save()
175 message.users.set([r.member for r in self.event.participants if r.member])
176 message.send()
177
178 messages.success(self.request, _("Message sent successfully."))
179 if "_save" in self.request.POST:
180 return redirect("admin:events_event_details", self.event.pk)
181 return super().form_valid(form)
182
183 def dispatch(self, request, *args, **kwargs):
184 self.event = get_object_or_404(Event, pk=self.kwargs["pk"])
185 return super().dispatch(request, *args, **kwargs)
186
187
188 @method_decorator(staff_member_required, name="dispatch")
189 @method_decorator(organiser_only, name="dispatch")
190 class EventRegistrationsExport(View, PermissionRequiredMixin):
191 """View to export registrations."""
192
193 template_name = "events/admin/details.html"
194 permission_required = "events.change_event"
195
196 def get(self, request, pk):
197 """Export the registration of a specified event.
198
199 :param request: the request object
200 :param pk: the primary key of the event
201 :return: A CSV containing all registrations for the event
202 """
203 event = get_object_or_404(Event, pk=pk)
204 extra_fields = event.registrationinformationfield_set.all()
205 registrations = event.eventregistration_set.all()
206
207 header_fields = (
208 [
209 _("Name"),
210 _("Email"),
211 _("Paid"),
212 _("Present"),
213 _("Status"),
214 _("Phone number"),
215 ]
216 + [field.name for field in extra_fields]
217 + [_("Date"), _("Date cancelled")]
218 )
219
220 rows = []
221 if event.price == 0:
222 header_fields.remove(_("Paid"))
223 for registration in registrations:
224 if registration.member:
225 name = registration.member.get_full_name()
226 else:
227 name = registration.name
228 status = pgettext_lazy("registration status", "registered").capitalize()
229 cancelled = None
230 if registration.date_cancelled:
231
232 if registration.is_late_cancellation():
233 status = pgettext_lazy(
234 "registration status", "late cancellation"
235 ).capitalize()
236 else:
237 status = pgettext_lazy(
238 "registration status", "cancelled"
239 ).capitalize()
240 cancelled = timezone.localtime(registration.date_cancelled)
241
242 elif registration.queue_position:
243 status = pgettext_lazy("registration status", "waiting")
244 data = {
245 _("Name"): name,
246 _("Date"): timezone.localtime(registration.date),
247 _("Present"): _("Yes") if registration.present else "",
248 _("Phone number"): (
249 registration.member.profile.phone_number
250 if registration.member
251 else ""
252 ),
253 _("Email"): (registration.member.email if registration.member else ""),
254 _("Status"): status,
255 _("Date cancelled"): cancelled,
256 }
257 if event.price > 0:
258 if registration.is_paid():
259 data[_("Paid")] = registration.payment.get_type_display()
260 else:
261 data[_("Paid")] = _("No")
262
263 data.update(
264 {
265 field["field"].name: field["value"]
266 for field in registration.information_fields
267 }
268 )
269 rows.append(data)
270
271 response = HttpResponse(content_type="text/csv")
272 writer = csv.DictWriter(response, header_fields)
273 writer.writeheader()
274
275 rows = sorted(
276 rows,
277 key=lambda row: (
278 row[_("Status")]
279 == pgettext_lazy(
280 "registration status", "late cancellation"
281 ).capitalize(),
282 row[_("Date")],
283 ),
284 reverse=True,
285 )
286
287 for row in rows:
288 writer.writerow(row)
289
290 response["Content-Disposition"] = 'attachment; filename="{}.csv"'.format(
291 slugify(event.title)
292 )
293 return response
294
[end of website/events/admin_views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/events/admin_views.py b/website/events/admin_views.py
--- a/website/events/admin_views.py
+++ b/website/events/admin_views.py
@@ -37,7 +37,7 @@
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
- context.update({"payment": Payment, "has_permission": True, "site_url": True})
+ context.update({"payment": Payment, "has_permission": True, "site_url": "/"})
return context
| {"golden_diff": "diff --git a/website/events/admin_views.py b/website/events/admin_views.py\n--- a/website/events/admin_views.py\n+++ b/website/events/admin_views.py\n@@ -37,7 +37,7 @@\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n \n- context.update({\"payment\": Payment, \"has_permission\": True, \"site_url\": True})\n+ context.update({\"payment\": Payment, \"has_permission\": True, \"site_url\": \"/\"})\n \n return context\n", "issue": "Bug: 'view site' from event detail admin forwards to wrong url\nIn GitLab by @JobDoesburg on Dec 4, 2019, 19:43\n\n<!--\n You want something new.\n-->\n\n### One-sentence description\n\nImprove navigation between admin views for event admin\n\n### Motivation\n\nCurrently it works terrible. \n\n### Desired functionality\n\nOn save, go to the view that makes sense instead of very often go back to the event overview. \n\nAlso maybe provide buttons that take you to the event detail overview page, or to the frontend view, etc etc\n\nI don't yet have clear suggestions but I think everyone can think of their own what makes sense.\n\n\n### Suggested implementation\n\n<!--\n If you have any notes on how we could achieve this feature,\n share them here.\n-->\n", "before_files": [{"content": "import csv\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin import helpers\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.text import slugify\nfrom django.utils.translation import pgettext_lazy\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\nfrom django.views.generic import DetailView, FormView\n\nfrom events import services\nfrom events.decorators import organiser_only\nfrom events.exceptions import RegistrationError\nfrom events.forms import FieldsForm, EventMessageForm\nfrom payments.models import Payment\nfrom pushnotifications.models import Message, Category\nfrom .models import Event, EventRegistration\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventAdminDetails(DetailView, PermissionRequiredMixin):\n \"\"\"Render an overview of registrations for the specified event.\"\"\"\n\n template_name = \"events/admin/details.html\"\n model = Event\n context_object_name = \"event\"\n permission_required = \"events.change_event\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context.update({\"payment\": Payment, \"has_permission\": True, \"site_url\": True})\n\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass RegistrationAdminFields(FormView):\n \"\"\"Render a form that allows the user to change the details of their registration.\n\n The user should be authenticated.\n \"\"\"\n\n form_class = FieldsForm\n template_name = \"admin/change_form.html\"\n registration = None\n admin = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_registration\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.registration._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.registration,\n \"obj_id\": self.registration.pk,\n \"title\": _(\"Change registration fields\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs[\"fields\"] = services.registration_fields(\n self.request, registration=self.registration\n )\n return kwargs\n\n def form_valid(self, form):\n values = form.field_values()\n try:\n services.update_registration(\n registration=self.registration, field_values=values\n )\n messages.success(self.request, _(\"Registration successfully saved.\"))\n if \"_save\" in self.request.POST:\n return redirect(\n \"admin:events_eventregistration_change\", self.registration.pk\n )\n except RegistrationError as e:\n messages.error(self.request, e)\n return self.render_to_response(self.get_context_data(form=form))\n\n def dispatch(self, request, *args, **kwargs):\n self.registration = get_object_or_404(\n EventRegistration, pk=self.kwargs[\"registration\"]\n )\n try:\n if self.registration.event.has_fields:\n return super().dispatch(request, *args, **kwargs)\n except RegistrationError:\n pass\n return redirect(\"admin:events_eventregistration_change\", self.registration.pk)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventMessage(FormView):\n \"\"\"Renders a form that allows the user to create a push notification for all users registers to the event.\"\"\"\n\n form_class = EventMessageForm\n template_name = \"events/admin/message_form.html\"\n admin = None\n event = None\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\n **self.admin.admin_site.each_context(self.request),\n \"add\": False,\n \"change\": True,\n \"has_view_permission\": True,\n \"has_add_permission\": False,\n \"has_change_permission\": self.request.user.has_perms(\n \"events.change_event\"\n ),\n \"has_delete_permission\": False,\n \"has_editable_inline_admin_formsets\": False,\n \"app_label\": \"events\",\n \"opts\": self.event._meta,\n \"is_popup\": False,\n \"save_as\": False,\n \"save_on_top\": False,\n \"original\": self.event,\n \"obj_id\": self.event.pk,\n \"title\": _(\"Send push notification\"),\n \"adminform\": helpers.AdminForm(\n context[\"form\"],\n ((None, {\"fields\": context[\"form\"].fields.keys()}),),\n {},\n ),\n }\n )\n return context\n\n def form_valid(self, form):\n values = form.cleaned_data\n if not values[\"url\"]:\n values[\"url\"] = settings.BASE_URL + self.event.get_absolute_url()\n message = Message(\n title_en=values[\"title_en\"],\n body_en=values[\"body_en\"],\n url=values[\"url\"],\n category=Category.objects.get(key=Category.EVENT),\n )\n message.save()\n message.users.set([r.member for r in self.event.participants if r.member])\n message.send()\n\n messages.success(self.request, _(\"Message sent successfully.\"))\n if \"_save\" in self.request.POST:\n return redirect(\"admin:events_event_details\", self.event.pk)\n return super().form_valid(form)\n\n def dispatch(self, request, *args, **kwargs):\n self.event = get_object_or_404(Event, pk=self.kwargs[\"pk\"])\n return super().dispatch(request, *args, **kwargs)\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(organiser_only, name=\"dispatch\")\nclass EventRegistrationsExport(View, PermissionRequiredMixin):\n \"\"\"View to export registrations.\"\"\"\n\n template_name = \"events/admin/details.html\"\n permission_required = \"events.change_event\"\n\n def get(self, request, pk):\n \"\"\"Export the registration of a specified event.\n\n :param request: the request object\n :param pk: the primary key of the event\n :return: A CSV containing all registrations for the event\n \"\"\"\n event = get_object_or_404(Event, pk=pk)\n extra_fields = event.registrationinformationfield_set.all()\n registrations = event.eventregistration_set.all()\n\n header_fields = (\n [\n _(\"Name\"),\n _(\"Email\"),\n _(\"Paid\"),\n _(\"Present\"),\n _(\"Status\"),\n _(\"Phone number\"),\n ]\n + [field.name for field in extra_fields]\n + [_(\"Date\"), _(\"Date cancelled\")]\n )\n\n rows = []\n if event.price == 0:\n header_fields.remove(_(\"Paid\"))\n for registration in registrations:\n if registration.member:\n name = registration.member.get_full_name()\n else:\n name = registration.name\n status = pgettext_lazy(\"registration status\", \"registered\").capitalize()\n cancelled = None\n if registration.date_cancelled:\n\n if registration.is_late_cancellation():\n status = pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize()\n else:\n status = pgettext_lazy(\n \"registration status\", \"cancelled\"\n ).capitalize()\n cancelled = timezone.localtime(registration.date_cancelled)\n\n elif registration.queue_position:\n status = pgettext_lazy(\"registration status\", \"waiting\")\n data = {\n _(\"Name\"): name,\n _(\"Date\"): timezone.localtime(registration.date),\n _(\"Present\"): _(\"Yes\") if registration.present else \"\",\n _(\"Phone number\"): (\n registration.member.profile.phone_number\n if registration.member\n else \"\"\n ),\n _(\"Email\"): (registration.member.email if registration.member else \"\"),\n _(\"Status\"): status,\n _(\"Date cancelled\"): cancelled,\n }\n if event.price > 0:\n if registration.is_paid():\n data[_(\"Paid\")] = registration.payment.get_type_display()\n else:\n data[_(\"Paid\")] = _(\"No\")\n\n data.update(\n {\n field[\"field\"].name: field[\"value\"]\n for field in registration.information_fields\n }\n )\n rows.append(data)\n\n response = HttpResponse(content_type=\"text/csv\")\n writer = csv.DictWriter(response, header_fields)\n writer.writeheader()\n\n rows = sorted(\n rows,\n key=lambda row: (\n row[_(\"Status\")]\n == pgettext_lazy(\n \"registration status\", \"late cancellation\"\n ).capitalize(),\n row[_(\"Date\")],\n ),\n reverse=True,\n )\n\n for row in rows:\n writer.writerow(row)\n\n response[\"Content-Disposition\"] = 'attachment; filename=\"{}.csv\"'.format(\n slugify(event.title)\n )\n return response\n", "path": "website/events/admin_views.py"}]} | 3,471 | 117 |
gh_patches_debug_2230 | rasdani/github-patches | git_diff | getsentry__sentry-18644 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BufferError: Local: Queue full
I am receiving this error once every 2-4 days and I need to restart Sentry to fix it. This started after moving to the Docker version of Sentry.
I never noticed this being an issue on 9.1.2 also with Clickhouse and Snuba running, but without Kafka.
> https://observ.app/share/issue/4e4f208a500d48cc898770930706959a/
I am not sure where to look / poke / monitor to see this queue that is being spoken of and how I can flush it / enlarge it if needed.
`sentry queues list` showed all 0's so it's not looking like there is a massive backlog of events.
Any help is appreciated!
</issue>
<code>
[start of src/sentry/utils/pubsub.py]
1 from __future__ import absolute_import
2
3 import redis
4 import logging
5
6 from threading import Thread
7 from six.moves.queue import Queue, Full
8
9
10 class QueuedPublisherService(object):
11 """
12 A publisher that queues items locally and publishes them to a
13 remote pubsub service on a background thread.
14
15 Maintains a lossy internal queue for posting, will discard the
16 value if the queue is full or not immediately available. Will also
17 drop items if the publish operation to the remote service fails.
18 """
19
20 def __init__(self, publisher):
21 self._started = False
22 self.publisher = publisher
23
24 def _start(self):
25 if self._started:
26 return True
27
28 self.q = q = Queue(maxsize=100)
29
30 def worker():
31 while True:
32 (channel, key, value) = q.get()
33 try:
34 self.publisher.publish(channel, key=key, value=value)
35 except Exception as e:
36 logger = logging.getLogger("sentry.errors")
37 logger.debug("could not submit event to pubsub: %s" % e)
38 finally:
39 q.task_done()
40
41 t = Thread(target=worker)
42 t.setDaemon(True)
43 t.start()
44
45 self._started = True
46 return True
47
48 def publish(self, channel, value, key=None):
49 if not self._start():
50 return
51
52 try:
53 self.q.put((channel, key, value), block=False)
54 except Full:
55 return
56
57
58 class RedisPublisher(object):
59 def __init__(self, connection):
60 self.rds = None if connection is None else redis.StrictRedis(**connection)
61
62 def publish(self, channel, value, key=None):
63 if self.rds is not None:
64 self.rds.publish(channel, value)
65
66
67 class KafkaPublisher(object):
68 def __init__(self, connection, asynchronous=True):
69 from confluent_kafka import Producer
70
71 self.producer = Producer(connection or {})
72 self.asynchronous = asynchronous
73
74 def publish(self, channel, value, key=None):
75 self.producer.produce(topic=channel, value=value, key=key)
76 if not self.asynchronous:
77 self.producer.flush()
78
[end of src/sentry/utils/pubsub.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/utils/pubsub.py b/src/sentry/utils/pubsub.py
--- a/src/sentry/utils/pubsub.py
+++ b/src/sentry/utils/pubsub.py
@@ -73,5 +73,7 @@
def publish(self, channel, value, key=None):
self.producer.produce(topic=channel, value=value, key=key)
- if not self.asynchronous:
+ if self.asynchronous:
+ self.producer.poll(0)
+ else:
self.producer.flush()
| {"golden_diff": "diff --git a/src/sentry/utils/pubsub.py b/src/sentry/utils/pubsub.py\n--- a/src/sentry/utils/pubsub.py\n+++ b/src/sentry/utils/pubsub.py\n@@ -73,5 +73,7 @@\n \n def publish(self, channel, value, key=None):\n self.producer.produce(topic=channel, value=value, key=key)\n- if not self.asynchronous:\n+ if self.asynchronous:\n+ self.producer.poll(0)\n+ else:\n self.producer.flush()\n", "issue": "BufferError: Local: Queue full\nI am receiving this error once every 2-4 days and I need to restart Sentry to fix it. This started after moving to the Docker version of Sentry.\r\n\r\nI never noticed this being an issue on 9.1.2 also with Clickhouse and Snuba running, but without Kafka.\r\n\r\n> https://observ.app/share/issue/4e4f208a500d48cc898770930706959a/\r\n\r\nI am not sure where to look / poke / monitor to see this queue that is being spoken of and how I can flush it / enlarge it if needed.\r\n\r\n`sentry queues list` showed all 0's so it's not looking like there is a massive backlog of events.\r\n\r\nAny help is appreciated!\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport redis\nimport logging\n\nfrom threading import Thread\nfrom six.moves.queue import Queue, Full\n\n\nclass QueuedPublisherService(object):\n \"\"\"\n A publisher that queues items locally and publishes them to a\n remote pubsub service on a background thread.\n\n Maintains a lossy internal queue for posting, will discard the\n value if the queue is full or not immediately available. Will also\n drop items if the publish operation to the remote service fails.\n \"\"\"\n\n def __init__(self, publisher):\n self._started = False\n self.publisher = publisher\n\n def _start(self):\n if self._started:\n return True\n\n self.q = q = Queue(maxsize=100)\n\n def worker():\n while True:\n (channel, key, value) = q.get()\n try:\n self.publisher.publish(channel, key=key, value=value)\n except Exception as e:\n logger = logging.getLogger(\"sentry.errors\")\n logger.debug(\"could not submit event to pubsub: %s\" % e)\n finally:\n q.task_done()\n\n t = Thread(target=worker)\n t.setDaemon(True)\n t.start()\n\n self._started = True\n return True\n\n def publish(self, channel, value, key=None):\n if not self._start():\n return\n\n try:\n self.q.put((channel, key, value), block=False)\n except Full:\n return\n\n\nclass RedisPublisher(object):\n def __init__(self, connection):\n self.rds = None if connection is None else redis.StrictRedis(**connection)\n\n def publish(self, channel, value, key=None):\n if self.rds is not None:\n self.rds.publish(channel, value)\n\n\nclass KafkaPublisher(object):\n def __init__(self, connection, asynchronous=True):\n from confluent_kafka import Producer\n\n self.producer = Producer(connection or {})\n self.asynchronous = asynchronous\n\n def publish(self, channel, value, key=None):\n self.producer.produce(topic=channel, value=value, key=key)\n if not self.asynchronous:\n self.producer.flush()\n", "path": "src/sentry/utils/pubsub.py"}]} | 1,329 | 115 |
gh_patches_debug_28661 | rasdani/github-patches | git_diff | Kinto__kinto-696 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Inconsistency with 404 response on empty collections
- Set `read_only` to true
- Give readonly access to the postgresql user
- Give `read` permission to everyone on a bucket `foo`
- Going to `/buckets/foo/collections/unknown` gives 404
- Going to `/buckets/foo/collections/unknown/records` gives 503
Listing the records of an unknown collection should definitely give 404, except with the `default` bucket plugin.
Very related to https://github.com/Kinto/kinto/issues/558
</issue>
<code>
[start of kinto/views/records.py]
1 import copy
2
3 import jsonschema
4 from kinto.core import resource
5 from kinto.core.errors import raise_invalid
6 from jsonschema import exceptions as jsonschema_exceptions
7 from pyramid.security import Authenticated
8 from pyramid.settings import asbool
9
10 from kinto.views import RelaxedUUID, object_exists_or_404
11
12
13 class RecordSchema(resource.ResourceSchema):
14 class Options:
15 preserve_unknown = True
16
17
18 _parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'
19
20
21 @resource.register(name='record',
22 collection_path=_parent_path + '/records',
23 record_path=_parent_path + '/records/{{id}}')
24 class Record(resource.ShareableResource):
25
26 mapping = RecordSchema()
27 schema_field = 'schema'
28
29 def __init__(self, *args, **kwargs):
30 super(Record, self).__init__(*args, **kwargs)
31
32 self.model.id_generator = RelaxedUUID()
33
34 # Check if already fetched before (in batch).
35 collections = self.request.bound_data.setdefault('collections', {})
36 collection_uri = self.get_parent_id(self.request)
37 if collection_uri not in collections:
38 # Unknown yet, fetch from storage.
39 collection_parent_id = '/buckets/%s' % self.bucket_id
40 collection = object_exists_or_404(self.request,
41 collection_id='collection',
42 parent_id=collection_parent_id,
43 object_id=self.collection_id)
44 collections[collection_uri] = collection
45
46 self._collection = collections[collection_uri]
47
48 def get_parent_id(self, request):
49 self.bucket_id = request.matchdict['bucket_id']
50 self.collection_id = request.matchdict['collection_id']
51 return '/buckets/%s/collections/%s' % (self.bucket_id,
52 self.collection_id)
53
54 def is_known_field(self, field_name):
55 """Without schema, any field is considered as known."""
56 return True
57
58 def process_record(self, new, old=None):
59 """Validate records against collection schema, if any."""
60 new = super(Record, self).process_record(new, old)
61
62 schema = self._collection.get('schema')
63 settings = self.request.registry.settings
64 schema_validation = 'experimental_collection_schema_validation'
65 if not schema or not asbool(settings.get(schema_validation)):
66 return new
67
68 collection_timestamp = self._collection[self.model.modified_field]
69
70 try:
71 stripped = copy.deepcopy(new)
72 stripped.pop(self.model.id_field, None)
73 stripped.pop(self.model.modified_field, None)
74 stripped.pop(self.model.permissions_field, None)
75 stripped.pop(self.schema_field, None)
76 jsonschema.validate(stripped, schema)
77 except jsonschema_exceptions.ValidationError as e:
78 try:
79 field = e.path.pop() if e.path else e.validator_value.pop()
80 except AttributeError:
81 field = None
82 raise_invalid(self.request, name=field, description=e.message)
83
84 new[self.schema_field] = collection_timestamp
85 return new
86
87 def collection_get(self):
88 result = super(Record, self).collection_get()
89 self._handle_cache_expires(self.request.response)
90 return result
91
92 def get(self):
93 result = super(Record, self).get()
94 self._handle_cache_expires(self.request.response)
95 return result
96
97 def _handle_cache_expires(self, response):
98 """If the parent collection defines a ``cache_expires`` attribute,
99 then cache-control response headers are sent.
100
101 .. note::
102
103 Those headers are also sent if the
104 ``kinto.record_cache_expires_seconds`` setting is defined.
105 """
106 is_anonymous = Authenticated not in self.request.effective_principals
107 if not is_anonymous:
108 return
109
110 cache_expires = self._collection.get('cache_expires')
111 if cache_expires is None:
112 by_bucket = '%s_record_cache_expires_seconds' % (self.bucket_id)
113 by_collection = '%s_%s_record_cache_expires_seconds' % (
114 self.bucket_id, self.collection_id)
115 settings = self.request.registry.settings
116 cache_expires = settings.get(by_collection,
117 settings.get(by_bucket))
118
119 if cache_expires is not None:
120 response.cache_expires(seconds=int(cache_expires))
121
[end of kinto/views/records.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/views/records.py b/kinto/views/records.py
--- a/kinto/views/records.py
+++ b/kinto/views/records.py
@@ -26,23 +26,22 @@
mapping = RecordSchema()
schema_field = 'schema'
- def __init__(self, *args, **kwargs):
- super(Record, self).__init__(*args, **kwargs)
-
- self.model.id_generator = RelaxedUUID()
-
+ def __init__(self, request, **kwargs):
+ # Before all, first check that the parent collection exists.
# Check if already fetched before (in batch).
- collections = self.request.bound_data.setdefault('collections', {})
- collection_uri = self.get_parent_id(self.request)
+ collections = request.bound_data.setdefault('collections', {})
+ collection_uri = self.get_parent_id(request)
if collection_uri not in collections:
# Unknown yet, fetch from storage.
collection_parent_id = '/buckets/%s' % self.bucket_id
- collection = object_exists_or_404(self.request,
+ collection = object_exists_or_404(request,
collection_id='collection',
parent_id=collection_parent_id,
object_id=self.collection_id)
collections[collection_uri] = collection
+ super(Record, self).__init__(request, **kwargs)
+ self.model.id_generator = RelaxedUUID()
self._collection = collections[collection_uri]
def get_parent_id(self, request):
| {"golden_diff": "diff --git a/kinto/views/records.py b/kinto/views/records.py\n--- a/kinto/views/records.py\n+++ b/kinto/views/records.py\n@@ -26,23 +26,22 @@\n mapping = RecordSchema()\n schema_field = 'schema'\n \n- def __init__(self, *args, **kwargs):\n- super(Record, self).__init__(*args, **kwargs)\n-\n- self.model.id_generator = RelaxedUUID()\n-\n+ def __init__(self, request, **kwargs):\n+ # Before all, first check that the parent collection exists.\n # Check if already fetched before (in batch).\n- collections = self.request.bound_data.setdefault('collections', {})\n- collection_uri = self.get_parent_id(self.request)\n+ collections = request.bound_data.setdefault('collections', {})\n+ collection_uri = self.get_parent_id(request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n- collection = object_exists_or_404(self.request,\n+ collection = object_exists_or_404(request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n \n+ super(Record, self).__init__(request, **kwargs)\n+ self.model.id_generator = RelaxedUUID()\n self._collection = collections[collection_uri]\n \n def get_parent_id(self, request):\n", "issue": "Inconsistency with 404 response on empty collections\n- Set `read_only` to true\n- Give readonly access to the postgresql user\n- Give `read` permission to everyone on a bucket `foo`\n- Going to `/buckets/foo/collections/unknown` gives 404\n- Going to `/buckets/foo/collections/unknown/records` gives 503\n\nListing the records of an unknown collection should definitely give 404, except with the `default` bucket plugin.\n\nVery related to https://github.com/Kinto/kinto/issues/558\n\n", "before_files": [{"content": "import copy\n\nimport jsonschema\nfrom kinto.core import resource\nfrom kinto.core.errors import raise_invalid\nfrom jsonschema import exceptions as jsonschema_exceptions\nfrom pyramid.security import Authenticated\nfrom pyramid.settings import asbool\n\nfrom kinto.views import RelaxedUUID, object_exists_or_404\n\n\nclass RecordSchema(resource.ResourceSchema):\n class Options:\n preserve_unknown = True\n\n\n_parent_path = '/buckets/{{bucket_id}}/collections/{{collection_id}}'\n\n\[email protected](name='record',\n collection_path=_parent_path + '/records',\n record_path=_parent_path + '/records/{{id}}')\nclass Record(resource.ShareableResource):\n\n mapping = RecordSchema()\n schema_field = 'schema'\n\n def __init__(self, *args, **kwargs):\n super(Record, self).__init__(*args, **kwargs)\n\n self.model.id_generator = RelaxedUUID()\n\n # Check if already fetched before (in batch).\n collections = self.request.bound_data.setdefault('collections', {})\n collection_uri = self.get_parent_id(self.request)\n if collection_uri not in collections:\n # Unknown yet, fetch from storage.\n collection_parent_id = '/buckets/%s' % self.bucket_id\n collection = object_exists_or_404(self.request,\n collection_id='collection',\n parent_id=collection_parent_id,\n object_id=self.collection_id)\n collections[collection_uri] = collection\n\n self._collection = collections[collection_uri]\n\n def get_parent_id(self, request):\n self.bucket_id = request.matchdict['bucket_id']\n self.collection_id = request.matchdict['collection_id']\n return '/buckets/%s/collections/%s' % (self.bucket_id,\n self.collection_id)\n\n def is_known_field(self, field_name):\n \"\"\"Without schema, any field is considered as known.\"\"\"\n return True\n\n def process_record(self, new, old=None):\n \"\"\"Validate records against collection schema, if any.\"\"\"\n new = super(Record, self).process_record(new, old)\n\n schema = self._collection.get('schema')\n settings = self.request.registry.settings\n schema_validation = 'experimental_collection_schema_validation'\n if not schema or not asbool(settings.get(schema_validation)):\n return new\n\n collection_timestamp = self._collection[self.model.modified_field]\n\n try:\n stripped = copy.deepcopy(new)\n stripped.pop(self.model.id_field, None)\n stripped.pop(self.model.modified_field, None)\n stripped.pop(self.model.permissions_field, None)\n stripped.pop(self.schema_field, None)\n jsonschema.validate(stripped, schema)\n except jsonschema_exceptions.ValidationError as e:\n try:\n field = e.path.pop() if e.path else e.validator_value.pop()\n except AttributeError:\n field = None\n raise_invalid(self.request, name=field, description=e.message)\n\n new[self.schema_field] = collection_timestamp\n return new\n\n def collection_get(self):\n result = super(Record, self).collection_get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def get(self):\n result = super(Record, self).get()\n self._handle_cache_expires(self.request.response)\n return result\n\n def _handle_cache_expires(self, response):\n \"\"\"If the parent collection defines a ``cache_expires`` attribute,\n then cache-control response headers are sent.\n\n .. note::\n\n Those headers are also sent if the\n ``kinto.record_cache_expires_seconds`` setting is defined.\n \"\"\"\n is_anonymous = Authenticated not in self.request.effective_principals\n if not is_anonymous:\n return\n\n cache_expires = self._collection.get('cache_expires')\n if cache_expires is None:\n by_bucket = '%s_record_cache_expires_seconds' % (self.bucket_id)\n by_collection = '%s_%s_record_cache_expires_seconds' % (\n self.bucket_id, self.collection_id)\n settings = self.request.registry.settings\n cache_expires = settings.get(by_collection,\n settings.get(by_bucket))\n\n if cache_expires is not None:\n response.cache_expires(seconds=int(cache_expires))\n", "path": "kinto/views/records.py"}]} | 1,811 | 329 |
gh_patches_debug_39616 | rasdani/github-patches | git_diff | wagtail__wagtail-1516 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tabbed interface in admin doesn't work with i18n

I can't toggle tabs in the page editor when wagtail is configured with USE_I18N = True and language isn't english (russian, in my case). It seems that tabbed_interface template uses slugify template tag to produce ids to elements, it works fine unless other than english language is used. In that case slugify produces empty output, and js bindings do not work correctly.
</issue>
<code>
[start of wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py]
1 from __future__ import unicode_literals
2
3 from django.conf import settings
4 from django import template
5 from django.contrib.humanize.templatetags.humanize import intcomma
6
7 from wagtail.wagtailcore import hooks
8 from wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy, PageViewRestriction
9 from wagtail.wagtailcore.utils import camelcase_to_underscore, escape_script
10 from wagtail.wagtailadmin.menu import admin_menu
11
12
13 register = template.Library()
14
15 register.filter('intcomma', intcomma)
16
17 @register.inclusion_tag('wagtailadmin/shared/explorer_nav.html')
18 def explorer_nav():
19 return {
20 'nodes': get_navigation_menu_items()
21 }
22
23
24 @register.inclusion_tag('wagtailadmin/shared/explorer_nav_child.html')
25 def explorer_subnav(nodes):
26 return {
27 'nodes': nodes
28 }
29
30
31 @register.inclusion_tag('wagtailadmin/shared/main_nav.html', takes_context=True)
32 def main_nav(context):
33 request = context['request']
34
35 return {
36 'menu_html': admin_menu.render_html(request),
37 'request': request,
38 }
39
40 @register.simple_tag
41 def main_nav_js():
42 return admin_menu.media['js']
43
44
45 @register.filter("ellipsistrim")
46 def ellipsistrim(value, max_length):
47 if len(value) > max_length:
48 truncd_val = value[:max_length]
49 if not len(value) == (max_length + 1) and value[max_length + 1] != " ":
50 truncd_val = truncd_val[:truncd_val.rfind(" ")]
51 return truncd_val + "..."
52 return value
53
54
55 @register.filter
56 def fieldtype(bound_field):
57 try:
58 return camelcase_to_underscore(bound_field.field.__class__.__name__)
59 except AttributeError:
60 try:
61 return camelcase_to_underscore(bound_field.__class__.__name__)
62 except AttributeError:
63 return ""
64
65
66 @register.filter
67 def widgettype(bound_field):
68 try:
69 return camelcase_to_underscore(bound_field.field.widget.__class__.__name__)
70 except AttributeError:
71 try:
72 return camelcase_to_underscore(bound_field.widget.__class__.__name__)
73 except AttributeError:
74 return ""
75
76
77
78 @register.filter
79 def meta_description(model):
80 try:
81 return model.model_class()._meta.description
82 except:
83 return ""
84
85
86 @register.assignment_tag(takes_context=True)
87 def page_permissions(context, page):
88 """
89 Usage: {% page_permissions page as page_perms %}
90 Sets the variable 'page_perms' to a PagePermissionTester object that can be queried to find out
91 what actions the current logged-in user can perform on the given page.
92 """
93 # Create a UserPagePermissionsProxy object to represent the user's global permissions, and
94 # cache it in the context for the duration of the page request, if one does not exist already
95 if 'user_page_permissions' not in context:
96 context['user_page_permissions'] = UserPagePermissionsProxy(context['request'].user)
97
98 # Now retrieve a PagePermissionTester from it, specific to the given page
99 return context['user_page_permissions'].for_page(page)
100
101
102 @register.assignment_tag(takes_context=True)
103 def test_page_is_public(context, page):
104 """
105 Usage: {% test_page_is_public page as is_public %}
106 Sets 'is_public' to True iff there are no page view restrictions in place on
107 this page.
108 Caches the list of page view restrictions in the context, to avoid repeated
109 DB queries on repeated calls.
110 """
111 if 'all_page_view_restriction_paths' not in context:
112 context['all_page_view_restriction_paths'] = PageViewRestriction.objects.select_related('page').values_list('page__path', flat=True)
113
114 is_private = any([
115 page.path.startswith(restricted_path)
116 for restricted_path in context['all_page_view_restriction_paths']
117 ])
118
119 return not is_private
120
121
122 @register.simple_tag
123 def hook_output(hook_name):
124 """
125 Example: {% hook_output 'insert_editor_css' %}
126 Whenever we have a hook whose functions take no parameters and return a string, this tag can be used
127 to output the concatenation of all of those return values onto the page.
128 Note that the output is not escaped - it is the hook function's responsibility to escape unsafe content.
129 """
130 snippets = [fn() for fn in hooks.get_hooks(hook_name)]
131 return ''.join(snippets)
132
133
134 @register.assignment_tag
135 def usage_count_enabled():
136 return getattr(settings, 'WAGTAIL_USAGE_COUNT_ENABLED', False)
137
138
139 @register.assignment_tag
140 def base_url_setting():
141 return getattr(settings, 'BASE_URL', None)
142
143
144 class EscapeScriptNode(template.Node):
145 TAG_NAME = 'escapescript'
146
147 def __init__(self, nodelist):
148 super(EscapeScriptNode, self).__init__()
149 self.nodelist = nodelist
150
151 def render(self, context):
152 out = self.nodelist.render(context)
153 return escape_script(out)
154
155 @classmethod
156 def handle(cls, parser, token):
157 nodelist = parser.parse(('end' + EscapeScriptNode.TAG_NAME,))
158 parser.delete_first_token()
159 return cls(nodelist)
160
161 register.tag(EscapeScriptNode.TAG_NAME, EscapeScriptNode.handle)
162
163
164 # Helpers for Widget.render_with_errors, our extension to the Django widget API that allows widgets to
165 # take on the responsibility of rendering their own error messages
166 @register.filter
167 def render_with_errors(bound_field):
168 """
169 Usage: {{ field|render_with_errors }} as opposed to {{ field }}.
170 If the field (a BoundField instance) has errors on it, and the associated widget implements
171 a render_with_errors method, call that; otherwise, call the regular widget rendering mechanism.
172 """
173 widget = bound_field.field.widget
174 if bound_field.errors and hasattr(widget, 'render_with_errors'):
175 return widget.render_with_errors(bound_field.html_name, bound_field.value(), attrs={'id': bound_field.auto_id}, errors=bound_field.errors)
176 else:
177 return bound_field.as_widget()
178
179 @register.filter
180 def has_unrendered_errors(bound_field):
181 """
182 Return true if this field has errors that were not accounted for by render_with_errors, because
183 the widget does not support the render_with_errors method
184 """
185 return bound_field.errors and not hasattr(bound_field.field.widget, 'render_with_errors')
186
[end of wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py]
[start of wagtail/wagtailcore/utils.py]
1 import re
2 from six import string_types
3
4 from django.db.models import Model
5 from django.apps import apps
6
7
8 def camelcase_to_underscore(str):
9 # http://djangosnippets.org/snippets/585/
10 return re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', str).lower().strip('_')
11
12
13 def resolve_model_string(model_string, default_app=None):
14 """
15 Resolve an 'app_label.model_name' string into an actual model class.
16 If a model class is passed in, just return that.
17 """
18 if isinstance(model_string, string_types):
19 try:
20 app_label, model_name = model_string.split(".")
21 except ValueError:
22 if default_app is not None:
23 # If we can't split, assume a model in current app
24 app_label = default_app
25 model_name = model_string
26 else:
27 raise ValueError("Can not resolve {0!r} into a model. Model names "
28 "should be in the form app_label.model_name".format(
29 model_string), model_string)
30
31 return apps.get_model(app_label, model_name)
32
33 elif isinstance(model_string, type) and issubclass(model_string, Model):
34 return model_string
35
36 else:
37 raise LookupError("Can not resolve {0!r} into a model".format(model_string), model_string)
38
39
40 SCRIPT_RE = re.compile(r'<(-*)/script>')
41 def escape_script(text):
42 """
43 Escape `</script>` tags in 'text' so that it can be placed within a `<script>` block without
44 accidentally closing it. A '-' character will be inserted for each time it is escaped:
45 `<-/script>`, `<--/script>` etc.
46 """
47 return SCRIPT_RE.sub(r'<-\1/script>', text)
48
[end of wagtail/wagtailcore/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py b/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py
--- a/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py
+++ b/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py
@@ -3,10 +3,12 @@
from django.conf import settings
from django import template
from django.contrib.humanize.templatetags.humanize import intcomma
+from django.template.defaultfilters import stringfilter
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy, PageViewRestriction
from wagtail.wagtailcore.utils import camelcase_to_underscore, escape_script
+from wagtail.wagtailcore.utils import cautious_slugify as _cautious_slugify
from wagtail.wagtailadmin.menu import admin_menu
@@ -183,3 +185,9 @@
the widget does not support the render_with_errors method
"""
return bound_field.errors and not hasattr(bound_field.field.widget, 'render_with_errors')
+
+
[email protected](is_safe=True)
+@stringfilter
+def cautious_slugify(value):
+ return _cautious_slugify(value)
diff --git a/wagtail/wagtailcore/utils.py b/wagtail/wagtailcore/utils.py
--- a/wagtail/wagtailcore/utils.py
+++ b/wagtail/wagtailcore/utils.py
@@ -1,8 +1,11 @@
import re
+import unicodedata
from six import string_types
from django.db.models import Model
from django.apps import apps
+from django.utils.encoding import force_text
+from django.utils.text import slugify
def camelcase_to_underscore(str):
@@ -45,3 +48,38 @@
`<-/script>`, `<--/script>` etc.
"""
return SCRIPT_RE.sub(r'<-\1/script>', text)
+
+
+SLUGIFY_RE = re.compile(r'[^\w\s-]', re.UNICODE)
+
+
+def cautious_slugify(value):
+ """
+ Convert a string to ASCII exactly as Django's slugify does, with the exception
+ that any non-ASCII alphanumeric characters (that cannot be ASCIIfied under Unicode
+ normalisation) are escaped into codes like 'u0421' instead of being deleted entirely.
+
+ This ensures that the result of slugifying e.g. Cyrillic text will not be an empty
+ string, and can thus be safely used as an identifier (albeit not a human-readable one).
+ """
+ value = force_text(value)
+
+ # Normalize the string to decomposed unicode form. This causes accented Latin
+ # characters to be split into 'base character' + 'accent modifier'; the latter will
+ # be stripped out by the regexp, resulting in an ASCII-clean character that doesn't
+ # need to be escaped
+ value = unicodedata.normalize('NFKD', value)
+
+ # Strip out characters that aren't letterlike, underscores or hyphens,
+ # using the same regexp that slugify uses. This ensures that non-ASCII non-letters
+ # (e.g. accent modifiers, fancy punctuation) get stripped rather than escaped
+ value = SLUGIFY_RE.sub('', value)
+
+ # Encode as ASCII, escaping non-ASCII characters with backslashreplace, then convert
+ # back to a unicode string (which is what slugify expects)
+ value = value.encode('ascii', 'backslashreplace').decode('ascii')
+
+ # Pass to slugify to perform final conversion (whitespace stripping, applying
+ # mark_safe); this will also strip out the backslashes from the 'backslashreplace'
+ # conversion
+ return slugify(value)
| {"golden_diff": "diff --git a/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py b/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py\n--- a/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py\n+++ b/wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py\n@@ -3,10 +3,12 @@\n from django.conf import settings\n from django import template\n from django.contrib.humanize.templatetags.humanize import intcomma\n+from django.template.defaultfilters import stringfilter\n \n from wagtail.wagtailcore import hooks\n from wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy, PageViewRestriction\n from wagtail.wagtailcore.utils import camelcase_to_underscore, escape_script\n+from wagtail.wagtailcore.utils import cautious_slugify as _cautious_slugify\n from wagtail.wagtailadmin.menu import admin_menu\n \n \n@@ -183,3 +185,9 @@\n the widget does not support the render_with_errors method\n \"\"\"\n return bound_field.errors and not hasattr(bound_field.field.widget, 'render_with_errors')\n+\n+\[email protected](is_safe=True)\n+@stringfilter\n+def cautious_slugify(value):\n+ return _cautious_slugify(value)\ndiff --git a/wagtail/wagtailcore/utils.py b/wagtail/wagtailcore/utils.py\n--- a/wagtail/wagtailcore/utils.py\n+++ b/wagtail/wagtailcore/utils.py\n@@ -1,8 +1,11 @@\n import re\n+import unicodedata\n from six import string_types\n \n from django.db.models import Model\n from django.apps import apps\n+from django.utils.encoding import force_text\n+from django.utils.text import slugify\n \n \n def camelcase_to_underscore(str):\n@@ -45,3 +48,38 @@\n `<-/script>`, `<--/script>` etc.\n \"\"\"\n return SCRIPT_RE.sub(r'<-\\1/script>', text)\n+\n+\n+SLUGIFY_RE = re.compile(r'[^\\w\\s-]', re.UNICODE)\n+\n+\n+def cautious_slugify(value):\n+ \"\"\"\n+ Convert a string to ASCII exactly as Django's slugify does, with the exception\n+ that any non-ASCII alphanumeric characters (that cannot be ASCIIfied under Unicode\n+ normalisation) are escaped into codes like 'u0421' instead of being deleted entirely.\n+\n+ This ensures that the result of slugifying e.g. Cyrillic text will not be an empty\n+ string, and can thus be safely used as an identifier (albeit not a human-readable one).\n+ \"\"\"\n+ value = force_text(value)\n+\n+ # Normalize the string to decomposed unicode form. This causes accented Latin\n+ # characters to be split into 'base character' + 'accent modifier'; the latter will\n+ # be stripped out by the regexp, resulting in an ASCII-clean character that doesn't\n+ # need to be escaped\n+ value = unicodedata.normalize('NFKD', value)\n+\n+ # Strip out characters that aren't letterlike, underscores or hyphens,\n+ # using the same regexp that slugify uses. This ensures that non-ASCII non-letters\n+ # (e.g. accent modifiers, fancy punctuation) get stripped rather than escaped\n+ value = SLUGIFY_RE.sub('', value)\n+\n+ # Encode as ASCII, escaping non-ASCII characters with backslashreplace, then convert\n+ # back to a unicode string (which is what slugify expects)\n+ value = value.encode('ascii', 'backslashreplace').decode('ascii')\n+\n+ # Pass to slugify to perform final conversion (whitespace stripping, applying\n+ # mark_safe); this will also strip out the backslashes from the 'backslashreplace'\n+ # conversion\n+ return slugify(value)\n", "issue": "Tabbed interface in admin doesn't work with i18n\n\nI can't toggle tabs in the page editor when wagtail is configured with USE_I18N = True and language isn't english (russian, in my case). It seems that tabbed_interface template uses slugify template tag to produce ids to elements, it works fine unless other than english language is used. In that case slugify produces empty output, and js bindings do not work correctly.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django import template\nfrom django.contrib.humanize.templatetags.humanize import intcomma\n\nfrom wagtail.wagtailcore import hooks\nfrom wagtail.wagtailcore.models import get_navigation_menu_items, UserPagePermissionsProxy, PageViewRestriction\nfrom wagtail.wagtailcore.utils import camelcase_to_underscore, escape_script\nfrom wagtail.wagtailadmin.menu import admin_menu\n\n\nregister = template.Library()\n\nregister.filter('intcomma', intcomma)\n\[email protected]_tag('wagtailadmin/shared/explorer_nav.html')\ndef explorer_nav():\n return {\n 'nodes': get_navigation_menu_items()\n }\n\n\[email protected]_tag('wagtailadmin/shared/explorer_nav_child.html')\ndef explorer_subnav(nodes):\n return {\n 'nodes': nodes\n }\n\n\[email protected]_tag('wagtailadmin/shared/main_nav.html', takes_context=True)\ndef main_nav(context):\n request = context['request']\n\n return {\n 'menu_html': admin_menu.render_html(request),\n 'request': request,\n }\n\[email protected]_tag\ndef main_nav_js():\n return admin_menu.media['js']\n\n\[email protected](\"ellipsistrim\")\ndef ellipsistrim(value, max_length):\n if len(value) > max_length:\n truncd_val = value[:max_length]\n if not len(value) == (max_length + 1) and value[max_length + 1] != \" \":\n truncd_val = truncd_val[:truncd_val.rfind(\" \")]\n return truncd_val + \"...\"\n return value\n\n\[email protected]\ndef fieldtype(bound_field):\n try:\n return camelcase_to_underscore(bound_field.field.__class__.__name__)\n except AttributeError:\n try:\n return camelcase_to_underscore(bound_field.__class__.__name__)\n except AttributeError:\n return \"\"\n\n\[email protected]\ndef widgettype(bound_field):\n try:\n return camelcase_to_underscore(bound_field.field.widget.__class__.__name__)\n except AttributeError:\n try:\n return camelcase_to_underscore(bound_field.widget.__class__.__name__)\n except AttributeError:\n return \"\"\n\n\n\[email protected]\ndef meta_description(model):\n try:\n return model.model_class()._meta.description\n except:\n return \"\"\n\n\[email protected]_tag(takes_context=True)\ndef page_permissions(context, page):\n \"\"\"\n Usage: {% page_permissions page as page_perms %}\n Sets the variable 'page_perms' to a PagePermissionTester object that can be queried to find out\n what actions the current logged-in user can perform on the given page.\n \"\"\"\n # Create a UserPagePermissionsProxy object to represent the user's global permissions, and\n # cache it in the context for the duration of the page request, if one does not exist already\n if 'user_page_permissions' not in context:\n context['user_page_permissions'] = UserPagePermissionsProxy(context['request'].user)\n\n # Now retrieve a PagePermissionTester from it, specific to the given page\n return context['user_page_permissions'].for_page(page)\n\n\[email protected]_tag(takes_context=True)\ndef test_page_is_public(context, page):\n \"\"\"\n Usage: {% test_page_is_public page as is_public %}\n Sets 'is_public' to True iff there are no page view restrictions in place on\n this page.\n Caches the list of page view restrictions in the context, to avoid repeated\n DB queries on repeated calls.\n \"\"\"\n if 'all_page_view_restriction_paths' not in context:\n context['all_page_view_restriction_paths'] = PageViewRestriction.objects.select_related('page').values_list('page__path', flat=True)\n\n is_private = any([\n page.path.startswith(restricted_path)\n for restricted_path in context['all_page_view_restriction_paths']\n ])\n\n return not is_private\n\n\[email protected]_tag\ndef hook_output(hook_name):\n \"\"\"\n Example: {% hook_output 'insert_editor_css' %}\n Whenever we have a hook whose functions take no parameters and return a string, this tag can be used\n to output the concatenation of all of those return values onto the page.\n Note that the output is not escaped - it is the hook function's responsibility to escape unsafe content.\n \"\"\"\n snippets = [fn() for fn in hooks.get_hooks(hook_name)]\n return ''.join(snippets)\n\n\[email protected]_tag\ndef usage_count_enabled():\n return getattr(settings, 'WAGTAIL_USAGE_COUNT_ENABLED', False)\n\n\[email protected]_tag\ndef base_url_setting():\n return getattr(settings, 'BASE_URL', None)\n\n\nclass EscapeScriptNode(template.Node):\n TAG_NAME = 'escapescript'\n\n def __init__(self, nodelist):\n super(EscapeScriptNode, self).__init__()\n self.nodelist = nodelist\n\n def render(self, context):\n out = self.nodelist.render(context)\n return escape_script(out)\n\n @classmethod\n def handle(cls, parser, token):\n nodelist = parser.parse(('end' + EscapeScriptNode.TAG_NAME,))\n parser.delete_first_token()\n return cls(nodelist)\n\nregister.tag(EscapeScriptNode.TAG_NAME, EscapeScriptNode.handle)\n\n\n# Helpers for Widget.render_with_errors, our extension to the Django widget API that allows widgets to\n# take on the responsibility of rendering their own error messages\[email protected]\ndef render_with_errors(bound_field):\n \"\"\"\n Usage: {{ field|render_with_errors }} as opposed to {{ field }}.\n If the field (a BoundField instance) has errors on it, and the associated widget implements\n a render_with_errors method, call that; otherwise, call the regular widget rendering mechanism.\n \"\"\"\n widget = bound_field.field.widget\n if bound_field.errors and hasattr(widget, 'render_with_errors'):\n return widget.render_with_errors(bound_field.html_name, bound_field.value(), attrs={'id': bound_field.auto_id}, errors=bound_field.errors)\n else:\n return bound_field.as_widget()\n\[email protected]\ndef has_unrendered_errors(bound_field):\n \"\"\"\n Return true if this field has errors that were not accounted for by render_with_errors, because\n the widget does not support the render_with_errors method\n \"\"\"\n return bound_field.errors and not hasattr(bound_field.field.widget, 'render_with_errors')\n", "path": "wagtail/wagtailadmin/templatetags/wagtailadmin_tags.py"}, {"content": "import re\nfrom six import string_types\n\nfrom django.db.models import Model\nfrom django.apps import apps\n\n\ndef camelcase_to_underscore(str):\n # http://djangosnippets.org/snippets/585/\n return re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\\\1', str).lower().strip('_')\n\n\ndef resolve_model_string(model_string, default_app=None):\n \"\"\"\n Resolve an 'app_label.model_name' string into an actual model class.\n If a model class is passed in, just return that.\n \"\"\"\n if isinstance(model_string, string_types):\n try:\n app_label, model_name = model_string.split(\".\")\n except ValueError:\n if default_app is not None:\n # If we can't split, assume a model in current app\n app_label = default_app\n model_name = model_string\n else:\n raise ValueError(\"Can not resolve {0!r} into a model. Model names \"\n \"should be in the form app_label.model_name\".format(\n model_string), model_string)\n\n return apps.get_model(app_label, model_name)\n\n elif isinstance(model_string, type) and issubclass(model_string, Model):\n return model_string\n\n else:\n raise LookupError(\"Can not resolve {0!r} into a model\".format(model_string), model_string)\n\n\nSCRIPT_RE = re.compile(r'<(-*)/script>')\ndef escape_script(text):\n \"\"\"\n Escape `</script>` tags in 'text' so that it can be placed within a `<script>` block without\n accidentally closing it. A '-' character will be inserted for each time it is escaped:\n `<-/script>`, `<--/script>` etc.\n \"\"\"\n return SCRIPT_RE.sub(r'<-\\1/script>', text)\n", "path": "wagtail/wagtailcore/utils.py"}]} | 3,072 | 869 |
gh_patches_debug_50453 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-3837 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Maybe a bug about module checking
### Bug description
<!-- Use this section to clearly and concisely describe the bug. -->
If I use conda to install only jupyterhub and python (conda install -c conda-forge python=3.9 jupyterhub), the following message showed as someone try to login:
```
Failed to set groups [Errno 1] Operation not permitted
Traceback (most recent call last):
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser", line 7, in <module>
from jupyterhub.singleuser import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py", line 5, in <module>
from .app import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py", line 38, in <module>
raise _import_error
TypeError: exceptions must derive from BaseException
```
I think the problem is the lines from 32 to 36 in jupyterhub/singleuser/app.py
```
except ImportError as e:
continue
if _import_error is None:
_import_error = e
else:
break
```
I changed that with:
```
except ImportError as e:
if _import_error is None:
_import_error = e
else:
break
continue
```
then the better message showed:
```
Failed to set groups [Errno 1] Operation not permitted
Traceback (most recent call last):
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser", line 7, in <module>
from jupyterhub.singleuser import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py", line 5, in <module>
from .app import main
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py", line 38, in <module>
raise _import_error
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py", line 30, in <module>
App = import_item(JUPYTERHUB_SINGLEUSER_APP)
File "/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/traitlets/utils/importstring.py", line 30, in import_item
module = __import__(package, fromlist=[obj])
ModuleNotFoundError: No module named 'jupyter_server'
```
The above message let me know that I have to install jupyter_server.
This issue can be closed anytime.
Any suggestion is welcome.
</issue>
<code>
[start of jupyterhub/singleuser/app.py]
1 """Make a single-user app based on the environment:
2
3 - $JUPYTERHUB_SINGLEUSER_APP, the base Application class, to be wrapped in JupyterHub authentication.
4 default: jupyter_server.serverapp.ServerApp
5
6 .. versionchanged:: 2.0
7
8 Default app changed to launch `jupyter labhub`.
9 Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.
10 """
11 import os
12
13 from traitlets import import_item
14
15 from .mixins import make_singleuser_app
16
17 JUPYTERHUB_SINGLEUSER_APP = os.environ.get("JUPYTERHUB_SINGLEUSER_APP")
18
19
20 if JUPYTERHUB_SINGLEUSER_APP:
21 App = import_item(JUPYTERHUB_SINGLEUSER_APP)
22 else:
23 App = None
24 _import_error = None
25 for JUPYTERHUB_SINGLEUSER_APP in (
26 "jupyter_server.serverapp.ServerApp",
27 "notebook.notebookapp.NotebookApp",
28 ):
29 try:
30 App = import_item(JUPYTERHUB_SINGLEUSER_APP)
31 except ImportError as e:
32 continue
33 if _import_error is None:
34 _import_error = e
35 else:
36 break
37 if App is None:
38 raise _import_error
39
40
41 SingleUserNotebookApp = make_singleuser_app(App)
42
43
44 def main():
45 """Launch a jupyterhub single-user server"""
46 if not os.environ.get("JUPYTERHUB_SINGLEUSER_APP"):
47 # app not specified, launch jupyter-labhub by default,
48 # if jupyterlab is recent enough (3.1).
49 # This is a minimally extended ServerApp that does:
50 # 1. ensure lab extension is enabled, and
51 # 2. set default URL to `/lab`
52 import re
53
54 _version_pat = re.compile(r"(\d+)\.(\d+)")
55 try:
56 import jupyterlab
57 from jupyterlab.labhubapp import SingleUserLabApp
58
59 m = _version_pat.match(jupyterlab.__version__)
60 except Exception:
61 m = None
62
63 if m is not None:
64 version_tuple = tuple(int(v) for v in m.groups())
65 if version_tuple >= (3, 1):
66 return SingleUserLabApp.launch_instance()
67
68 return SingleUserNotebookApp.launch_instance()
69
[end of jupyterhub/singleuser/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jupyterhub/singleuser/app.py b/jupyterhub/singleuser/app.py
--- a/jupyterhub/singleuser/app.py
+++ b/jupyterhub/singleuser/app.py
@@ -29,9 +29,9 @@
try:
App = import_item(JUPYTERHUB_SINGLEUSER_APP)
except ImportError as e:
- continue
if _import_error is None:
_import_error = e
+ continue
else:
break
if App is None:
| {"golden_diff": "diff --git a/jupyterhub/singleuser/app.py b/jupyterhub/singleuser/app.py\n--- a/jupyterhub/singleuser/app.py\n+++ b/jupyterhub/singleuser/app.py\n@@ -29,9 +29,9 @@\n try:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\n except ImportError as e:\n- continue\n if _import_error is None:\n _import_error = e\n+ continue\n else:\n break\n if App is None:\n", "issue": "Maybe a bug about module checking\n### Bug description\r\n<!-- Use this section to clearly and concisely describe the bug. -->\r\nIf I use conda to install only jupyterhub and python (conda install -c conda-forge python=3.9 jupyterhub), the following message showed as someone try to login:\r\n\r\n```\r\nFailed to set groups [Errno 1] Operation not permitted\r\nTraceback (most recent call last):\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser\", line 7, in <module>\r\n from jupyterhub.singleuser import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py\", line 5, in <module>\r\n from .app import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py\", line 38, in <module>\r\n raise _import_error\r\nTypeError: exceptions must derive from BaseException\r\n```\r\nI think the problem is the lines from 32 to 36 in jupyterhub/singleuser/app.py\r\n```\r\n except ImportError as e:\r\n continue\r\n if _import_error is None:\r\n _import_error = e\r\n else:\r\n break\r\n```\r\n\r\nI changed that with:\r\n```\r\n except ImportError as e:\r\n if _import_error is None:\r\n _import_error = e\r\n else:\r\n break\r\n continue\r\n```\r\nthen the better message showed:\r\n```\r\nFailed to set groups [Errno 1] Operation not permitted\r\nTraceback (most recent call last):\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/bin/jupyterhub-singleuser\", line 7, in <module>\r\n from jupyterhub.singleuser import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/__init__.py\", line 5, in <module>\r\n from .app import main\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py\", line 38, in <module>\r\n raise _import_error\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/jupyterhub/singleuser/app.py\", line 30, in <module>\r\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\r\n File \"/home/someone/bin/anaconda3/envs/py39jupyterhub222/lib/python3.9/site-packages/traitlets/utils/importstring.py\", line 30, in import_item\r\n module = __import__(package, fromlist=[obj])\r\nModuleNotFoundError: No module named 'jupyter_server'\r\n```\r\nThe above message let me know that I have to install jupyter_server.\r\nThis issue can be closed anytime.\r\nAny suggestion is welcome.\r\n\n", "before_files": [{"content": "\"\"\"Make a single-user app based on the environment:\n\n- $JUPYTERHUB_SINGLEUSER_APP, the base Application class, to be wrapped in JupyterHub authentication.\n default: jupyter_server.serverapp.ServerApp\n\n.. versionchanged:: 2.0\n\n Default app changed to launch `jupyter labhub`.\n Use JUPYTERHUB_SINGLEUSER_APP=notebook.notebookapp.NotebookApp for the legacy 'classic' notebook server.\n\"\"\"\nimport os\n\nfrom traitlets import import_item\n\nfrom .mixins import make_singleuser_app\n\nJUPYTERHUB_SINGLEUSER_APP = os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\")\n\n\nif JUPYTERHUB_SINGLEUSER_APP:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\nelse:\n App = None\n _import_error = None\n for JUPYTERHUB_SINGLEUSER_APP in (\n \"jupyter_server.serverapp.ServerApp\",\n \"notebook.notebookapp.NotebookApp\",\n ):\n try:\n App = import_item(JUPYTERHUB_SINGLEUSER_APP)\n except ImportError as e:\n continue\n if _import_error is None:\n _import_error = e\n else:\n break\n if App is None:\n raise _import_error\n\n\nSingleUserNotebookApp = make_singleuser_app(App)\n\n\ndef main():\n \"\"\"Launch a jupyterhub single-user server\"\"\"\n if not os.environ.get(\"JUPYTERHUB_SINGLEUSER_APP\"):\n # app not specified, launch jupyter-labhub by default,\n # if jupyterlab is recent enough (3.1).\n # This is a minimally extended ServerApp that does:\n # 1. ensure lab extension is enabled, and\n # 2. set default URL to `/lab`\n import re\n\n _version_pat = re.compile(r\"(\\d+)\\.(\\d+)\")\n try:\n import jupyterlab\n from jupyterlab.labhubapp import SingleUserLabApp\n\n m = _version_pat.match(jupyterlab.__version__)\n except Exception:\n m = None\n\n if m is not None:\n version_tuple = tuple(int(v) for v in m.groups())\n if version_tuple >= (3, 1):\n return SingleUserLabApp.launch_instance()\n\n return SingleUserNotebookApp.launch_instance()\n", "path": "jupyterhub/singleuser/app.py"}]} | 1,897 | 111 |
gh_patches_debug_39273 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3126 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider jimmy-johns is broken
During the global build at 2021-09-29-14-42-48, spider **jimmy-johns** failed with **0 features** and **1544 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/logs/jimmy-johns.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson))
</issue>
<code>
[start of locations/spiders/jimmy_johns.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4
5 from locations.items import GeojsonPointItem
6
7 STATES = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
8 "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
9 "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
10 "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
11 "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
12 HEADERS = { 'Content-Type': 'application/json' }
13 JJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'
14 CITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')
15 STORES = JJBASE.format('GetStoreAddressesByCityAndState')
16
17 class JimmyJohnsSpider(scrapy.Spider):
18 name = "jimmy-johns"
19 item_attributes = { 'brand': "Jimmy John's", 'brand_wikidata': "Q1689380" }
20 allowed_domains = ["www.jimmyjohns.com"]
21 download_delay = 0.2
22
23 def start_requests(self):
24 for state in STATES:
25 current_state = json.dumps({ 'state': state })
26 request = scrapy.Request(
27 CITIES,
28 method='POST',
29 body=current_state,
30 headers=HEADERS,
31 callback=self.parse_cities
32 )
33 request.meta['state'] = state
34 yield request
35
36 def parse_cities(self, response):
37 cities = json.loads(response.body)
38 for city in cities['d']:
39 current_city = json.dumps({ 'state': response.meta['state'], 'city': city })
40 request = scrapy.Request(
41 STORES,
42 method='POST',
43 body=current_city,
44 headers=HEADERS,
45 callback=self.parse
46 )
47 yield request
48
49 def parse(self, response):
50 stores = json.loads(response.body)
51 for store in stores['d']:
52 full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])
53 yield GeojsonPointItem(
54 name=store['storename'],
55 addr_full=full,
56 opening_hours=store['hours'],
57 phone=store['telephone'],
58 ref=store['storeid'],
59 lon=float(store['lng']),
60 lat=float(store['lat']),
61 )
62
[end of locations/spiders/jimmy_johns.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/jimmy_johns.py b/locations/spiders/jimmy_johns.py
--- a/locations/spiders/jimmy_johns.py
+++ b/locations/spiders/jimmy_johns.py
@@ -1,61 +1,36 @@
# -*- coding: utf-8 -*-
+from os import stat
import scrapy
+from urllib import parse
import json
-
from locations.items import GeojsonPointItem
-STATES = ["AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
- "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
- "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
- "NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
- "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
-HEADERS = { 'Content-Type': 'application/json' }
-JJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'
-CITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')
-STORES = JJBASE.format('GetStoreAddressesByCityAndState')
+class TemplateSpider(scrapy.Spider):
+ name = "jimmy_johns"
+ allowed_domains = ["locations.jimmyjohns.com"]
+ start_urls = (
+ 'https://locations.jimmyjohns.com/sitemap.xml',
+ )
-class JimmyJohnsSpider(scrapy.Spider):
- name = "jimmy-johns"
- item_attributes = { 'brand': "Jimmy John's", 'brand_wikidata': "Q1689380" }
- allowed_domains = ["www.jimmyjohns.com"]
- download_delay = 0.2
+ def parse(self, response):
+ stores = response.xpath('//url/loc[contains(text(),"sandwiches")]/text()').extract()
+ for store in stores:
+ yield scrapy.Request(response.urljoin(store), callback=self.parse_store)
- def start_requests(self):
- for state in STATES:
- current_state = json.dumps({ 'state': state })
- request = scrapy.Request(
- CITIES,
- method='POST',
- body=current_state,
- headers=HEADERS,
- callback=self.parse_cities
- )
- request.meta['state'] = state
- yield request
+ def parse_store(self, response):
+ data = json.loads(response.xpath('//script[@type="application/ld+json"]//text()').extract_first())
- def parse_cities(self, response):
- cities = json.loads(response.body)
- for city in cities['d']:
- current_city = json.dumps({ 'state': response.meta['state'], 'city': city })
- request = scrapy.Request(
- STORES,
- method='POST',
- body=current_city,
- headers=HEADERS,
- callback=self.parse
- )
- yield request
+ properties = {
+ 'ref': data[0]['url'],
+ 'addr_full': data[0]['address']['streetAddress'],
+ 'city': data[0]['address']['addressLocality'],
+ 'state': data[0]['address']['addressRegion'],
+ 'postcode': data[0]['address']['postalCode'],
+ 'website': response.url,
+ 'lat': data[0]['geo']['latitude'],
+ 'lon': data[0]['geo']['longitude'],
+ }
+ if data[0]['address']['telephone']:
+ properties['phone'] = data[0]['address']['telephone']
- def parse(self, response):
- stores = json.loads(response.body)
- for store in stores['d']:
- full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])
- yield GeojsonPointItem(
- name=store['storename'],
- addr_full=full,
- opening_hours=store['hours'],
- phone=store['telephone'],
- ref=store['storeid'],
- lon=float(store['lng']),
- lat=float(store['lat']),
- )
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/jimmy_johns.py b/locations/spiders/jimmy_johns.py\n--- a/locations/spiders/jimmy_johns.py\n+++ b/locations/spiders/jimmy_johns.py\n@@ -1,61 +1,36 @@\n # -*- coding: utf-8 -*-\n+from os import stat\n import scrapy\n+from urllib import parse\n import json\n-\n from locations.items import GeojsonPointItem\n \n-STATES = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\",\n- \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\",\n- \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n- \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\",\n- \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\n-HEADERS = { 'Content-Type': 'application/json' }\n-JJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'\n-CITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')\n-STORES = JJBASE.format('GetStoreAddressesByCityAndState')\n+class TemplateSpider(scrapy.Spider):\n+ name = \"jimmy_johns\"\n+ allowed_domains = [\"locations.jimmyjohns.com\"]\n+ start_urls = (\n+ 'https://locations.jimmyjohns.com/sitemap.xml',\n+ )\n \n-class JimmyJohnsSpider(scrapy.Spider):\n- name = \"jimmy-johns\"\n- item_attributes = { 'brand': \"Jimmy John's\", 'brand_wikidata': \"Q1689380\" }\n- allowed_domains = [\"www.jimmyjohns.com\"]\n- download_delay = 0.2\n+ def parse(self, response):\n+ stores = response.xpath('//url/loc[contains(text(),\"sandwiches\")]/text()').extract()\n+ for store in stores:\n+ yield scrapy.Request(response.urljoin(store), callback=self.parse_store)\n \n- def start_requests(self):\n- for state in STATES:\n- current_state = json.dumps({ 'state': state })\n- request = scrapy.Request(\n- CITIES,\n- method='POST',\n- body=current_state,\n- headers=HEADERS,\n- callback=self.parse_cities\n- )\n- request.meta['state'] = state\n- yield request\n+ def parse_store(self, response):\n+ data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]//text()').extract_first())\n \n- def parse_cities(self, response):\n- cities = json.loads(response.body)\n- for city in cities['d']:\n- current_city = json.dumps({ 'state': response.meta['state'], 'city': city })\n- request = scrapy.Request(\n- STORES,\n- method='POST',\n- body=current_city,\n- headers=HEADERS,\n- callback=self.parse\n- )\n- yield request\n+ properties = {\n+ 'ref': data[0]['url'],\n+ 'addr_full': data[0]['address']['streetAddress'],\n+ 'city': data[0]['address']['addressLocality'],\n+ 'state': data[0]['address']['addressRegion'],\n+ 'postcode': data[0]['address']['postalCode'],\n+ 'website': response.url,\n+ 'lat': data[0]['geo']['latitude'],\n+ 'lon': data[0]['geo']['longitude'],\n+ }\n+ if data[0]['address']['telephone']:\n+ properties['phone'] = data[0]['address']['telephone']\n \n- def parse(self, response):\n- stores = json.loads(response.body)\n- for store in stores['d']:\n- full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])\n- yield GeojsonPointItem(\n- name=store['storename'],\n- addr_full=full,\n- opening_hours=store['hours'],\n- phone=store['telephone'],\n- ref=store['storeid'],\n- lon=float(store['lng']),\n- lat=float(store['lat']),\n- )\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider jimmy-johns is broken\nDuring the global build at 2021-09-29-14-42-48, spider **jimmy-johns** failed with **0 features** and **1544 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/logs/jimmy-johns.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-29-14-42-48/output/jimmy-johns.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\n\nSTATES = [\"AL\", \"AK\", \"AZ\", \"AR\", \"CA\", \"CO\", \"CT\", \"DC\", \"DE\", \"FL\", \"GA\",\n \"HI\", \"ID\", \"IL\", \"IN\", \"IA\", \"KS\", \"KY\", \"LA\", \"ME\", \"MD\",\n \"MA\", \"MI\", \"MN\", \"MS\", \"MO\", \"MT\", \"NE\", \"NV\", \"NH\", \"NJ\",\n \"NM\", \"NY\", \"NC\", \"ND\", \"OH\", \"OK\", \"OR\", \"PA\", \"RI\", \"SC\",\n \"SD\", \"TN\", \"TX\", \"UT\", \"VT\", \"VA\", \"WA\", \"WV\", \"WI\", \"WY\"]\nHEADERS = { 'Content-Type': 'application/json' }\nJJBASE = 'https://www.jimmyjohns.com/webservices/Location/LocationServiceHandler.asmx/{}'\nCITIES = JJBASE.format('GetCitiesByStateNameAbbreviation')\nSTORES = JJBASE.format('GetStoreAddressesByCityAndState')\n\nclass JimmyJohnsSpider(scrapy.Spider):\n name = \"jimmy-johns\"\n item_attributes = { 'brand': \"Jimmy John's\", 'brand_wikidata': \"Q1689380\" }\n allowed_domains = [\"www.jimmyjohns.com\"]\n download_delay = 0.2\n\n def start_requests(self):\n for state in STATES:\n current_state = json.dumps({ 'state': state })\n request = scrapy.Request(\n CITIES,\n method='POST',\n body=current_state,\n headers=HEADERS,\n callback=self.parse_cities\n )\n request.meta['state'] = state\n yield request\n\n def parse_cities(self, response):\n cities = json.loads(response.body)\n for city in cities['d']:\n current_city = json.dumps({ 'state': response.meta['state'], 'city': city })\n request = scrapy.Request(\n STORES,\n method='POST',\n body=current_city,\n headers=HEADERS,\n callback=self.parse\n )\n yield request\n\n def parse(self, response):\n stores = json.loads(response.body)\n for store in stores['d']:\n full = '{}, {}, {} {}'.format(store['address'], store['city'], store['state'], store['postalcode'])\n yield GeojsonPointItem(\n name=store['storename'],\n addr_full=full,\n opening_hours=store['hours'],\n phone=store['telephone'],\n ref=store['storeid'],\n lon=float(store['lng']),\n lat=float(store['lat']),\n )\n", "path": "locations/spiders/jimmy_johns.py"}]} | 1,458 | 1,020 |
gh_patches_debug_28457 | rasdani/github-patches | git_diff | aws__aws-cli-329 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't shell out to rst2man.py
We've seen that shelling out to `rst2man.py` can be problematic.
In the "ideal" case (installing from a completely brand new python env or a new virtualenv) everything works.
The issue is some distro packages will rename the executable scripts provided by a package (sometimes `rst2man` or `rst2man-2.7.py` for example). `docutils` specifies this script as `rst2man.py` which is what you get if you use `pip` to install _everything_. The problem is that even if you `pip install awscli` if you install this into the system site-packages, pip will notice that `docutils` is already installed (for example via "sudo apt-get install python-docutils" or via the distro's package manager) and skip it.
If pip says docutils is installed then `import docutils` will work, but both the location of the `rst2man.py` script as well as what it's named can vary. This example may not even be on the PATH by default (homebrew puts things in /usr/local/share/python for example).
The code for `rst2man` is simple enough that we can directly use the code via `docutils` rather than piping to `rst2man`.
</issue>
<code>
[start of awscli/help.py]
1 # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import sys
14 import logging
15 import os
16 import platform
17 from subprocess import Popen, PIPE
18
19 from docutils.core import publish_string
20 import bcdoc
21 from bcdoc.clidocs import ReSTDocument
22 from bcdoc.clidocs import ProviderDocumentEventHandler
23 from bcdoc.clidocs import ServiceDocumentEventHandler
24 from bcdoc.clidocs import OperationDocumentEventHandler
25 import bcdoc.clidocevents
26 from bcdoc.textwriter import TextWriter
27
28 from awscli.argprocess import ParamShorthand
29
30
31 LOG = logging.getLogger('awscli.help')
32
33
34 class ExecutableNotFoundError(Exception):
35 def __init__(self, executable_name):
36 super(ExecutableNotFoundError, self).__init__(
37 'Could not find executable named "%s"' % executable_name)
38
39
40 def get_renderer():
41 """
42 Return the appropriate HelpRenderer implementation for the
43 current platform.
44 """
45 if platform.system() == 'Windows':
46 return WindowsHelpRenderer()
47 else:
48 return PosixHelpRenderer()
49
50
51 class HelpRenderer(object):
52 """
53 Interface for a help renderer.
54
55 The renderer is responsible for displaying the help content on
56 a particular platform.
57 """
58
59 def render(self, contents):
60 """
61 Each implementation of HelpRenderer must implement this
62 render method.
63 """
64 pass
65
66
67 class PosixHelpRenderer(HelpRenderer):
68 """
69 Render help content on a Posix-like system. This includes
70 Linux and MacOS X.
71 """
72
73 PAGER = 'less'
74
75 def get_pager_cmdline(self):
76 pager = self.PAGER
77 if 'MANPAGER' in os.environ:
78 pager = os.environ['MANPAGER']
79 elif 'PAGER' in os.environ:
80 pager = os.environ['PAGER']
81 return pager.split()
82
83 def render(self, contents):
84 rst2man = self._get_rst2man_name()
85 cmdline = [rst2man]
86 LOG.debug("Running command: %s", cmdline)
87 p2 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)
88 p2.stdin.write(contents)
89 p2.stdin.close()
90 if not self._exists_on_path('groff'):
91 raise ExecutableNotFoundError('groff')
92 cmdline = ['groff', '-man', '-T', 'ascii']
93 LOG.debug("Running command: %s", cmdline)
94 p3 = self._popen(cmdline, stdin=p2.stdout, stdout=PIPE)
95 cmdline = self.get_pager_cmdline()
96 LOG.debug("Running command: %s", cmdline)
97 p4 = self._popen(cmdline, stdin=p3.stdout)
98 p4.communicate()
99 sys.exit(1)
100
101 def _get_rst2man_name(self):
102 if self._exists_on_path('rst2man.py'):
103 return 'rst2man.py'
104 elif self._exists_on_path('rst2man'):
105 # Some distros like ubuntu will rename rst2man.py to rst2man
106 # if you install their version (i.e. "apt-get install
107 # python-docutils"). Though they could technically rename
108 # this to anything we'll support it renamed to 'rst2man' by
109 # explicitly checking for this case ourself.
110 return 'rst2man'
111 else:
112 # Give them the original name as set from docutils.
113 raise ExecutableNotFoundError('rst2man.py')
114
115 def _exists_on_path(self, name):
116 # Since we're only dealing with POSIX systems, we can
117 # ignore things like PATHEXT.
118 return any([os.path.exists(os.path.join(p, name))
119 for p in os.environ.get('PATH', []).split(os.pathsep)])
120
121 def _popen(self, *args, **kwargs):
122 return Popen(*args, **kwargs)
123
124
125 class WindowsHelpRenderer(HelpRenderer):
126 """
127 Render help content on a Windows platform.
128 """
129
130 def render(self, contents):
131 text_output = publish_string(contents,
132 writer=TextWriter())
133 sys.stdout.write(text_output.decode('utf-8'))
134 sys.exit(1)
135
136
137 class RawRenderer(HelpRenderer):
138 """
139 Render help as the raw ReST document.
140 """
141
142 def render(self, contents):
143 sys.stdout.write(contents)
144 sys.exit(1)
145
146
147 class HelpCommand(object):
148 """
149 HelpCommand Interface
150 ---------------------
151 A HelpCommand object acts as the interface between objects in the
152 CLI (e.g. Providers, Services, Operations, etc.) and the documentation
153 system (bcdoc).
154
155 A HelpCommand object wraps the object from the CLI space and provides
156 a consistent interface to critical information needed by the
157 documentation pipeline such as the object's name, description, etc.
158
159 The HelpCommand object is passed to the component of the
160 documentation pipeline that fires documentation events. It is
161 then passed on to each document event handler that has registered
162 for the events.
163
164 All HelpCommand objects contain the following attributes:
165
166 + ``session`` - A ``botocore`` ``Session`` object.
167 + ``obj`` - The object that is being documented.
168 + ``command_table`` - A dict mapping command names to
169 callable objects.
170 + ``arg_table`` - A dict mapping argument names to callable objects.
171 + ``doc`` - A ``Document`` object that is used to collect the
172 generated documentation.
173
174 In addition, please note the `properties` defined below which are
175 required to allow the object to be used in the document pipeline.
176
177 Implementations of HelpCommand are provided here for Provider,
178 Service and Operation objects. Other implementations for other
179 types of objects might be needed for customization in plugins.
180 As long as the implementations conform to this basic interface
181 it should be possible to pass them to the documentation system
182 and generate interactive and static help files.
183 """
184
185 EventHandlerClass = None
186 """
187 Each subclass should define this class variable to point to the
188 EventHandler class used by this HelpCommand.
189 """
190
191 def __init__(self, session, obj, command_table, arg_table):
192 self.session = session
193 self.obj = obj
194 self.command_table = command_table
195 self.arg_table = arg_table
196 self.renderer = get_renderer()
197 self.doc = ReSTDocument(target='man')
198
199 @property
200 def event_class(self):
201 """
202 Return the ``event_class`` for this object.
203
204 The ``event_class`` is used by the documentation pipeline
205 when generating documentation events. For the event below::
206
207 doc-title.<event_class>.<name>
208
209 The document pipeline would use this property to determine
210 the ``event_class`` value.
211 """
212 pass
213
214 @property
215 def name(self):
216 """
217 Return the name of the wrapped object.
218
219 This would be called by the document pipeline to determine
220 the ``name`` to be inserted into the event, as shown above.
221 """
222 pass
223
224 def __call__(self, args, parsed_globals):
225 # Create an event handler for a Provider Document
226 instance = self.EventHandlerClass(self)
227 # Now generate all of the events for a Provider document.
228 # We pass ourselves along so that we can, in turn, get passed
229 # to all event handlers.
230 bcdoc.clidocevents.generate_events(self.session, self)
231 self.renderer.render(self.doc.getvalue())
232 instance.unregister()
233
234
235 class ProviderHelpCommand(HelpCommand):
236 """Implements top level help command.
237
238 This is what is called when ``aws help`` is run.
239
240 """
241 EventHandlerClass = ProviderDocumentEventHandler
242
243 def __init__(self, session, command_table, arg_table,
244 description, synopsis, usage):
245 HelpCommand.__init__(self, session, session.provider,
246 command_table, arg_table)
247 self.description = description
248 self.synopsis = synopsis
249 self.help_usage = usage
250
251 @property
252 def event_class(self):
253 return 'Provider'
254
255 @property
256 def name(self):
257 return self.obj.name
258
259
260 class ServiceHelpCommand(HelpCommand):
261 """Implements service level help.
262
263 This is the object invoked whenever a service command
264 help is implemented, e.g. ``aws ec2 help``.
265
266 """
267
268 EventHandlerClass = ServiceDocumentEventHandler
269
270 def __init__(self, session, obj, command_table, arg_table, name,
271 event_class):
272 super(ServiceHelpCommand, self).__init__(session, obj, command_table,
273 arg_table)
274 self._name = name
275 self._event_class = event_class
276
277 @property
278 def event_class(self):
279 return self._event_class
280
281 @property
282 def name(self):
283 return self._name
284
285
286 class OperationHelpCommand(HelpCommand):
287 """Implements operation level help.
288
289 This is the object invoked whenever help for a service is requested,
290 e.g. ``aws ec2 describe-instances help``.
291
292 """
293 EventHandlerClass = OperationDocumentEventHandler
294
295 def __init__(self, session, service, operation, arg_table, name,
296 event_class):
297 HelpCommand.__init__(self, session, operation, None, arg_table)
298 self.service = service
299 self.param_shorthand = ParamShorthand()
300 self._name = name
301 self._event_class = event_class
302
303 @property
304 def event_class(self):
305 return self._event_class
306
307 @property
308 def name(self):
309 return self._name
310
[end of awscli/help.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/help.py b/awscli/help.py
--- a/awscli/help.py
+++ b/awscli/help.py
@@ -17,6 +17,7 @@
from subprocess import Popen, PIPE
from docutils.core import publish_string
+from docutils.writers import manpage
import bcdoc
from bcdoc.clidocs import ReSTDocument
from bcdoc.clidocs import ProviderDocumentEventHandler
@@ -81,21 +82,17 @@
return pager.split()
def render(self, contents):
- rst2man = self._get_rst2man_name()
- cmdline = [rst2man]
- LOG.debug("Running command: %s", cmdline)
- p2 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)
- p2.stdin.write(contents)
- p2.stdin.close()
+ man_contents = publish_string(contents, writer=manpage.Writer())
if not self._exists_on_path('groff'):
raise ExecutableNotFoundError('groff')
cmdline = ['groff', '-man', '-T', 'ascii']
LOG.debug("Running command: %s", cmdline)
- p3 = self._popen(cmdline, stdin=p2.stdout, stdout=PIPE)
+ p3 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)
+ groff_output = p3.communicate(input=man_contents)[0]
cmdline = self.get_pager_cmdline()
LOG.debug("Running command: %s", cmdline)
- p4 = self._popen(cmdline, stdin=p3.stdout)
- p4.communicate()
+ p4 = self._popen(cmdline, stdin=PIPE)
+ p4.communicate(input=groff_output)
sys.exit(1)
def _get_rst2man_name(self):
| {"golden_diff": "diff --git a/awscli/help.py b/awscli/help.py\n--- a/awscli/help.py\n+++ b/awscli/help.py\n@@ -17,6 +17,7 @@\n from subprocess import Popen, PIPE\n \n from docutils.core import publish_string\n+from docutils.writers import manpage\n import bcdoc\n from bcdoc.clidocs import ReSTDocument\n from bcdoc.clidocs import ProviderDocumentEventHandler\n@@ -81,21 +82,17 @@\n return pager.split()\n \n def render(self, contents):\n- rst2man = self._get_rst2man_name()\n- cmdline = [rst2man]\n- LOG.debug(\"Running command: %s\", cmdline)\n- p2 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)\n- p2.stdin.write(contents)\n- p2.stdin.close()\n+ man_contents = publish_string(contents, writer=manpage.Writer())\n if not self._exists_on_path('groff'):\n raise ExecutableNotFoundError('groff')\n cmdline = ['groff', '-man', '-T', 'ascii']\n LOG.debug(\"Running command: %s\", cmdline)\n- p3 = self._popen(cmdline, stdin=p2.stdout, stdout=PIPE)\n+ p3 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)\n+ groff_output = p3.communicate(input=man_contents)[0]\n cmdline = self.get_pager_cmdline()\n LOG.debug(\"Running command: %s\", cmdline)\n- p4 = self._popen(cmdline, stdin=p3.stdout)\n- p4.communicate()\n+ p4 = self._popen(cmdline, stdin=PIPE)\n+ p4.communicate(input=groff_output)\n sys.exit(1)\n \n def _get_rst2man_name(self):\n", "issue": "Don't shell out to rst2man.py\nWe've seen that shelling out to `rst2man.py` can be problematic.\n\nIn the \"ideal\" case (installing from a completely brand new python env or a new virtualenv) everything works.\n\nThe issue is some distro packages will rename the executable scripts provided by a package (sometimes `rst2man` or `rst2man-2.7.py` for example). `docutils` specifies this script as `rst2man.py` which is what you get if you use `pip` to install _everything_. The problem is that even if you `pip install awscli` if you install this into the system site-packages, pip will notice that `docutils` is already installed (for example via \"sudo apt-get install python-docutils\" or via the distro's package manager) and skip it.\n\nIf pip says docutils is installed then `import docutils` will work, but both the location of the `rst2man.py` script as well as what it's named can vary. This example may not even be on the PATH by default (homebrew puts things in /usr/local/share/python for example).\n\nThe code for `rst2man` is simple enough that we can directly use the code via `docutils` rather than piping to `rst2man`.\n\n", "before_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\nimport logging\nimport os\nimport platform\nfrom subprocess import Popen, PIPE\n\nfrom docutils.core import publish_string\nimport bcdoc\nfrom bcdoc.clidocs import ReSTDocument\nfrom bcdoc.clidocs import ProviderDocumentEventHandler\nfrom bcdoc.clidocs import ServiceDocumentEventHandler\nfrom bcdoc.clidocs import OperationDocumentEventHandler\nimport bcdoc.clidocevents\nfrom bcdoc.textwriter import TextWriter\n\nfrom awscli.argprocess import ParamShorthand\n\n\nLOG = logging.getLogger('awscli.help')\n\n\nclass ExecutableNotFoundError(Exception):\n def __init__(self, executable_name):\n super(ExecutableNotFoundError, self).__init__(\n 'Could not find executable named \"%s\"' % executable_name)\n\n\ndef get_renderer():\n \"\"\"\n Return the appropriate HelpRenderer implementation for the\n current platform.\n \"\"\"\n if platform.system() == 'Windows':\n return WindowsHelpRenderer()\n else:\n return PosixHelpRenderer()\n\n\nclass HelpRenderer(object):\n \"\"\"\n Interface for a help renderer.\n\n The renderer is responsible for displaying the help content on\n a particular platform.\n \"\"\"\n\n def render(self, contents):\n \"\"\"\n Each implementation of HelpRenderer must implement this\n render method.\n \"\"\"\n pass\n\n\nclass PosixHelpRenderer(HelpRenderer):\n \"\"\"\n Render help content on a Posix-like system. This includes\n Linux and MacOS X.\n \"\"\"\n\n PAGER = 'less'\n\n def get_pager_cmdline(self):\n pager = self.PAGER\n if 'MANPAGER' in os.environ:\n pager = os.environ['MANPAGER']\n elif 'PAGER' in os.environ:\n pager = os.environ['PAGER']\n return pager.split()\n\n def render(self, contents):\n rst2man = self._get_rst2man_name()\n cmdline = [rst2man]\n LOG.debug(\"Running command: %s\", cmdline)\n p2 = self._popen(cmdline, stdin=PIPE, stdout=PIPE)\n p2.stdin.write(contents)\n p2.stdin.close()\n if not self._exists_on_path('groff'):\n raise ExecutableNotFoundError('groff')\n cmdline = ['groff', '-man', '-T', 'ascii']\n LOG.debug(\"Running command: %s\", cmdline)\n p3 = self._popen(cmdline, stdin=p2.stdout, stdout=PIPE)\n cmdline = self.get_pager_cmdline()\n LOG.debug(\"Running command: %s\", cmdline)\n p4 = self._popen(cmdline, stdin=p3.stdout)\n p4.communicate()\n sys.exit(1)\n\n def _get_rst2man_name(self):\n if self._exists_on_path('rst2man.py'):\n return 'rst2man.py'\n elif self._exists_on_path('rst2man'):\n # Some distros like ubuntu will rename rst2man.py to rst2man\n # if you install their version (i.e. \"apt-get install\n # python-docutils\"). Though they could technically rename\n # this to anything we'll support it renamed to 'rst2man' by\n # explicitly checking for this case ourself.\n return 'rst2man'\n else:\n # Give them the original name as set from docutils.\n raise ExecutableNotFoundError('rst2man.py')\n\n def _exists_on_path(self, name):\n # Since we're only dealing with POSIX systems, we can\n # ignore things like PATHEXT.\n return any([os.path.exists(os.path.join(p, name))\n for p in os.environ.get('PATH', []).split(os.pathsep)])\n\n def _popen(self, *args, **kwargs):\n return Popen(*args, **kwargs)\n\n\nclass WindowsHelpRenderer(HelpRenderer):\n \"\"\"\n Render help content on a Windows platform.\n \"\"\"\n\n def render(self, contents):\n text_output = publish_string(contents,\n writer=TextWriter())\n sys.stdout.write(text_output.decode('utf-8'))\n sys.exit(1)\n\n\nclass RawRenderer(HelpRenderer):\n \"\"\"\n Render help as the raw ReST document.\n \"\"\"\n\n def render(self, contents):\n sys.stdout.write(contents)\n sys.exit(1)\n\n\nclass HelpCommand(object):\n \"\"\"\n HelpCommand Interface\n ---------------------\n A HelpCommand object acts as the interface between objects in the\n CLI (e.g. Providers, Services, Operations, etc.) and the documentation\n system (bcdoc).\n\n A HelpCommand object wraps the object from the CLI space and provides\n a consistent interface to critical information needed by the\n documentation pipeline such as the object's name, description, etc.\n\n The HelpCommand object is passed to the component of the\n documentation pipeline that fires documentation events. It is\n then passed on to each document event handler that has registered\n for the events.\n\n All HelpCommand objects contain the following attributes:\n\n + ``session`` - A ``botocore`` ``Session`` object.\n + ``obj`` - The object that is being documented.\n + ``command_table`` - A dict mapping command names to\n callable objects.\n + ``arg_table`` - A dict mapping argument names to callable objects.\n + ``doc`` - A ``Document`` object that is used to collect the\n generated documentation.\n\n In addition, please note the `properties` defined below which are\n required to allow the object to be used in the document pipeline.\n\n Implementations of HelpCommand are provided here for Provider,\n Service and Operation objects. Other implementations for other\n types of objects might be needed for customization in plugins.\n As long as the implementations conform to this basic interface\n it should be possible to pass them to the documentation system\n and generate interactive and static help files.\n \"\"\"\n\n EventHandlerClass = None\n \"\"\"\n Each subclass should define this class variable to point to the\n EventHandler class used by this HelpCommand.\n \"\"\"\n\n def __init__(self, session, obj, command_table, arg_table):\n self.session = session\n self.obj = obj\n self.command_table = command_table\n self.arg_table = arg_table\n self.renderer = get_renderer()\n self.doc = ReSTDocument(target='man')\n\n @property\n def event_class(self):\n \"\"\"\n Return the ``event_class`` for this object.\n\n The ``event_class`` is used by the documentation pipeline\n when generating documentation events. For the event below::\n\n doc-title.<event_class>.<name>\n\n The document pipeline would use this property to determine\n the ``event_class`` value.\n \"\"\"\n pass\n\n @property\n def name(self):\n \"\"\"\n Return the name of the wrapped object.\n\n This would be called by the document pipeline to determine\n the ``name`` to be inserted into the event, as shown above.\n \"\"\"\n pass\n\n def __call__(self, args, parsed_globals):\n # Create an event handler for a Provider Document\n instance = self.EventHandlerClass(self)\n # Now generate all of the events for a Provider document.\n # We pass ourselves along so that we can, in turn, get passed\n # to all event handlers.\n bcdoc.clidocevents.generate_events(self.session, self)\n self.renderer.render(self.doc.getvalue())\n instance.unregister()\n\n\nclass ProviderHelpCommand(HelpCommand):\n \"\"\"Implements top level help command.\n\n This is what is called when ``aws help`` is run.\n\n \"\"\"\n EventHandlerClass = ProviderDocumentEventHandler\n\n def __init__(self, session, command_table, arg_table,\n description, synopsis, usage):\n HelpCommand.__init__(self, session, session.provider,\n command_table, arg_table)\n self.description = description\n self.synopsis = synopsis\n self.help_usage = usage\n\n @property\n def event_class(self):\n return 'Provider'\n\n @property\n def name(self):\n return self.obj.name\n\n\nclass ServiceHelpCommand(HelpCommand):\n \"\"\"Implements service level help.\n\n This is the object invoked whenever a service command\n help is implemented, e.g. ``aws ec2 help``.\n\n \"\"\"\n\n EventHandlerClass = ServiceDocumentEventHandler\n\n def __init__(self, session, obj, command_table, arg_table, name,\n event_class):\n super(ServiceHelpCommand, self).__init__(session, obj, command_table,\n arg_table)\n self._name = name\n self._event_class = event_class\n\n @property\n def event_class(self):\n return self._event_class\n\n @property\n def name(self):\n return self._name\n\n\nclass OperationHelpCommand(HelpCommand):\n \"\"\"Implements operation level help.\n\n This is the object invoked whenever help for a service is requested,\n e.g. ``aws ec2 describe-instances help``.\n\n \"\"\"\n EventHandlerClass = OperationDocumentEventHandler\n\n def __init__(self, session, service, operation, arg_table, name,\n event_class):\n HelpCommand.__init__(self, session, operation, None, arg_table)\n self.service = service\n self.param_shorthand = ParamShorthand()\n self._name = name\n self._event_class = event_class\n\n @property\n def event_class(self):\n return self._event_class\n\n @property\n def name(self):\n return self._name\n", "path": "awscli/help.py"}]} | 3,832 | 408 |
gh_patches_debug_28170 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1019 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"Run algorithm" page is missing the breadcrumb bar
Maybe this is on purpose, but I find it confusing that the page that allows users to upload images and run an algorithm does not show the breadcrumb bar to get back to the algorithm overview page.
Example: https://grand-challenge.org/algorithms/vertebra-segmentation/run/
To be consistent with the job list the title of that page could also read "Run this algorithm" instead "Run an algorithm"
</issue>
<code>
[start of app/grandchallenge/algorithms/views.py]
1 import logging
2
3 from dal import autocomplete
4 from django.conf import settings
5 from django.contrib.auth import get_user_model
6 from django.contrib.auth.mixins import (
7 PermissionRequiredMixin,
8 UserPassesTestMixin,
9 )
10 from django.contrib.messages.views import SuccessMessageMixin
11 from django.http import Http404
12 from django.views.generic import (
13 CreateView,
14 DetailView,
15 FormView,
16 ListView,
17 UpdateView,
18 )
19 from guardian.mixins import (
20 LoginRequiredMixin,
21 PermissionListMixin,
22 PermissionRequiredMixin as ObjectPermissionRequiredMixin,
23 )
24 from rest_framework.permissions import DjangoObjectPermissions
25 from rest_framework.viewsets import ReadOnlyModelViewSet
26 from rest_framework_guardian.filters import ObjectPermissionsFilter
27
28 from grandchallenge.algorithms.forms import (
29 AlgorithmForm,
30 AlgorithmImageForm,
31 AlgorithmImageUpdateForm,
32 EditorsForm,
33 UsersForm,
34 )
35 from grandchallenge.algorithms.models import (
36 Algorithm,
37 AlgorithmImage,
38 Job,
39 Result,
40 )
41 from grandchallenge.algorithms.serializers import (
42 AlgorithmImageSerializer,
43 AlgorithmSerializer,
44 JobSerializer,
45 ResultSerializer,
46 )
47 from grandchallenge.cases.forms import UploadRawImagesForm
48 from grandchallenge.cases.models import RawImageUploadSession
49 from grandchallenge.subdomains.utils import reverse
50
51 logger = logging.getLogger(__name__)
52
53
54 class AlgorithmCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
55 model = Algorithm
56 form_class = AlgorithmForm
57 permission_required = (
58 f"{Algorithm._meta.app_label}.add_{Algorithm._meta.model_name}"
59 )
60
61 def form_valid(self, form):
62 response = super().form_valid(form=form)
63 self.object.add_editor(self.request.user)
64 return response
65
66 def get_form_kwargs(self):
67 kwargs = super().get_form_kwargs()
68 kwargs.update({"user": self.request.user})
69 return kwargs
70
71
72 class AlgorithmList(PermissionListMixin, ListView):
73 model = Algorithm
74 permission_required = {
75 f"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}"
76 }
77
78 def get_queryset(self, *args, **kwargs):
79 # Add algorithms that are publicly visible
80 qs = super().get_queryset(*args, **kwargs)
81 qs |= Algorithm.objects.filter(visible_to_public=True)
82
83 return qs
84
85
86 class AlgorithmDetail(
87 LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
88 ):
89 model = Algorithm
90 permission_required = (
91 f"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}"
92 )
93 raise_exception = True
94
95
96 class AlgorithmUpdate(
97 LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView
98 ):
99 model = Algorithm
100 form_class = AlgorithmForm
101 permission_required = (
102 f"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}"
103 )
104 raise_exception = True
105
106 def get_form_kwargs(self):
107 kwargs = super().get_form_kwargs()
108 kwargs.update({"user": self.request.user})
109 return kwargs
110
111
112 class AlgorithmUserAutocomplete(
113 LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView
114 ):
115 def test_func(self):
116 group_pks = (
117 Algorithm.objects.all()
118 .select_related("editors_group")
119 .values_list("editors_group__pk", flat=True)
120 )
121 return (
122 self.request.user.is_superuser
123 or self.request.user.groups.filter(pk__in=group_pks).exists()
124 )
125
126 def get_queryset(self):
127 qs = (
128 get_user_model()
129 .objects.all()
130 .order_by("username")
131 .exclude(username=settings.ANONYMOUS_USER_NAME)
132 )
133
134 if self.q:
135 qs = qs.filter(username__istartswith=self.q)
136
137 return qs
138
139
140 class AlgorithmUserGroupUpdateMixin(
141 LoginRequiredMixin,
142 ObjectPermissionRequiredMixin,
143 SuccessMessageMixin,
144 FormView,
145 ):
146 template_name = "algorithms/algorithm_user_groups_form.html"
147 permission_required = (
148 f"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}"
149 )
150 raise_exception = True
151
152 def get_permission_object(self):
153 return self.algorithm
154
155 @property
156 def algorithm(self):
157 return Algorithm.objects.get(slug=self.kwargs["slug"])
158
159 def get_context_data(self, **kwargs):
160 context = super().get_context_data(**kwargs)
161 context.update(
162 {"object": self.algorithm, "role": self.get_form().role}
163 )
164 return context
165
166 def get_success_url(self):
167 return self.algorithm.get_absolute_url()
168
169 def form_valid(self, form):
170 form.add_or_remove_user(algorithm=self.algorithm)
171 return super().form_valid(form)
172
173
174 class EditorsUpdate(AlgorithmUserGroupUpdateMixin):
175 form_class = EditorsForm
176 success_message = "Editors successfully updated"
177
178
179 class UsersUpdate(AlgorithmUserGroupUpdateMixin):
180 form_class = UsersForm
181 success_message = "Users successfully updated"
182
183
184 class AlgorithmImageCreate(
185 LoginRequiredMixin, ObjectPermissionRequiredMixin, CreateView
186 ):
187 model = AlgorithmImage
188 form_class = AlgorithmImageForm
189 permission_required = (
190 f"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}"
191 )
192 raise_exception = True
193
194 def get_form_kwargs(self):
195 kwargs = super().get_form_kwargs()
196 kwargs.update({"user": self.request.user})
197 return kwargs
198
199 @property
200 def algorithm(self):
201 return Algorithm.objects.get(slug=self.kwargs["slug"])
202
203 def get_permission_object(self):
204 return self.algorithm
205
206 def form_valid(self, form):
207 form.instance.creator = self.request.user
208 form.instance.algorithm = self.algorithm
209
210 uploaded_file = form.cleaned_data["chunked_upload"][0]
211 form.instance.staged_image_uuid = uploaded_file.uuid
212
213 return super().form_valid(form)
214
215
216 class AlgorithmImageDetail(
217 LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
218 ):
219 model = AlgorithmImage
220 permission_required = f"{AlgorithmImage._meta.app_label}.view_{AlgorithmImage._meta.model_name}"
221 raise_exception = True
222
223
224 class AlgorithmImageUpdate(
225 LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView
226 ):
227 model = AlgorithmImage
228 form_class = AlgorithmImageUpdateForm
229 permission_required = f"{AlgorithmImage._meta.app_label}.change_{AlgorithmImage._meta.model_name}"
230 raise_exception = True
231
232
233 class AlgorithmExecutionSessionCreate(
234 LoginRequiredMixin,
235 ObjectPermissionRequiredMixin,
236 SuccessMessageMixin,
237 CreateView,
238 ):
239 model = RawImageUploadSession
240 form_class = UploadRawImagesForm
241 template_name = "algorithms/algorithm_execution_session_create.html"
242 success_message = (
243 "Your images have been uploaded, "
244 "please check back here to see the processing status."
245 )
246 permission_required = (
247 f"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}"
248 )
249 raise_exception = True
250
251 @property
252 def algorithm(self) -> Algorithm:
253 return Algorithm.objects.get(slug=self.kwargs["slug"])
254
255 def get_permission_object(self):
256 return self.algorithm
257
258 def get_initial(self):
259 if self.algorithm.latest_ready_image is None:
260 raise Http404()
261 return super().get_initial()
262
263 def get_form_kwargs(self):
264 kwargs = super().get_form_kwargs()
265 kwargs.update({"user": self.request.user})
266 return kwargs
267
268 def form_valid(self, form):
269 form.instance.creator = self.request.user
270 form.instance.algorithm_image = self.algorithm.latest_ready_image
271 return super().form_valid(form)
272
273 def get_success_url(self):
274 return reverse(
275 "algorithms:jobs-list", kwargs={"slug": self.kwargs["slug"]}
276 )
277
278
279 class AlgorithmJobsList(LoginRequiredMixin, PermissionListMixin, ListView):
280 model = Job
281 permission_required = f"{Job._meta.app_label}.view_{Job._meta.model_name}"
282
283 @property
284 def algorithm(self) -> Algorithm:
285 return Algorithm.objects.get(slug=self.kwargs["slug"])
286
287 def get_context_data(self, *args, **kwargs):
288 context = super().get_context_data(*args, **kwargs)
289 context.update({"algorithm": self.algorithm})
290 return context
291
292 def get_queryset(self, *args, **kwargs):
293 """Filter the jobs for this algorithm."""
294 qs = super().get_queryset(*args, **kwargs)
295 return qs.filter(algorithm_image__algorithm=self.algorithm)
296
297
298 class AlgorithmViewSet(ReadOnlyModelViewSet):
299 queryset = Algorithm.objects.all()
300 serializer_class = AlgorithmSerializer
301 permission_classes = [DjangoObjectPermissions]
302 filter_backends = [ObjectPermissionsFilter]
303
304
305 class AlgorithmImageViewSet(ReadOnlyModelViewSet):
306 queryset = AlgorithmImage.objects.all()
307 serializer_class = AlgorithmImageSerializer
308 permission_classes = [DjangoObjectPermissions]
309 filter_backends = [ObjectPermissionsFilter]
310
311
312 class JobViewSet(ReadOnlyModelViewSet):
313 queryset = Job.objects.all()
314 serializer_class = JobSerializer
315 permission_classes = [DjangoObjectPermissions]
316 filter_backends = [ObjectPermissionsFilter]
317
318
319 class ResultViewSet(ReadOnlyModelViewSet):
320 queryset = Result.objects.all()
321 serializer_class = ResultSerializer
322 permission_classes = [DjangoObjectPermissions]
323 filter_backends = [ObjectPermissionsFilter]
324
[end of app/grandchallenge/algorithms/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/grandchallenge/algorithms/views.py b/app/grandchallenge/algorithms/views.py
--- a/app/grandchallenge/algorithms/views.py
+++ b/app/grandchallenge/algorithms/views.py
@@ -212,6 +212,11 @@
return super().form_valid(form)
+ def get_context_data(self, *args, **kwargs):
+ context = super().get_context_data(*args, **kwargs)
+ context.update({"algorithm": self.algorithm})
+ return context
+
class AlgorithmImageDetail(
LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView
@@ -229,6 +234,11 @@
permission_required = f"{AlgorithmImage._meta.app_label}.change_{AlgorithmImage._meta.model_name}"
raise_exception = True
+ def get_context_data(self, *args, **kwargs):
+ context = super().get_context_data(*args, **kwargs)
+ context.update({"algorithm": self.object.algorithm})
+ return context
+
class AlgorithmExecutionSessionCreate(
LoginRequiredMixin,
@@ -270,6 +280,11 @@
form.instance.algorithm_image = self.algorithm.latest_ready_image
return super().form_valid(form)
+ def get_context_data(self, *args, **kwargs):
+ context = super().get_context_data(*args, **kwargs)
+ context.update({"algorithm": self.algorithm})
+ return context
+
def get_success_url(self):
return reverse(
"algorithms:jobs-list", kwargs={"slug": self.kwargs["slug"]}
| {"golden_diff": "diff --git a/app/grandchallenge/algorithms/views.py b/app/grandchallenge/algorithms/views.py\n--- a/app/grandchallenge/algorithms/views.py\n+++ b/app/grandchallenge/algorithms/views.py\n@@ -212,6 +212,11 @@\n \n return super().form_valid(form)\n \n+ def get_context_data(self, *args, **kwargs):\n+ context = super().get_context_data(*args, **kwargs)\n+ context.update({\"algorithm\": self.algorithm})\n+ return context\n+\n \n class AlgorithmImageDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n@@ -229,6 +234,11 @@\n permission_required = f\"{AlgorithmImage._meta.app_label}.change_{AlgorithmImage._meta.model_name}\"\n raise_exception = True\n \n+ def get_context_data(self, *args, **kwargs):\n+ context = super().get_context_data(*args, **kwargs)\n+ context.update({\"algorithm\": self.object.algorithm})\n+ return context\n+\n \n class AlgorithmExecutionSessionCreate(\n LoginRequiredMixin,\n@@ -270,6 +280,11 @@\n form.instance.algorithm_image = self.algorithm.latest_ready_image\n return super().form_valid(form)\n \n+ def get_context_data(self, *args, **kwargs):\n+ context = super().get_context_data(*args, **kwargs)\n+ context.update({\"algorithm\": self.algorithm})\n+ return context\n+\n def get_success_url(self):\n return reverse(\n \"algorithms:jobs-list\", kwargs={\"slug\": self.kwargs[\"slug\"]}\n", "issue": "\"Run algorithm\" page is missing the breadcrumb bar\nMaybe this is on purpose, but I find it confusing that the page that allows users to upload images and run an algorithm does not show the breadcrumb bar to get back to the algorithm overview page.\r\n\r\nExample: https://grand-challenge.org/algorithms/vertebra-segmentation/run/\r\n\r\nTo be consistent with the job list the title of that page could also read \"Run this algorithm\" instead \"Run an algorithm\"\n", "before_files": [{"content": "import logging\n\nfrom dal import autocomplete\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import (\n PermissionRequiredMixin,\n UserPassesTestMixin,\n)\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import Http404\nfrom django.views.generic import (\n CreateView,\n DetailView,\n FormView,\n ListView,\n UpdateView,\n)\nfrom guardian.mixins import (\n LoginRequiredMixin,\n PermissionListMixin,\n PermissionRequiredMixin as ObjectPermissionRequiredMixin,\n)\nfrom rest_framework.permissions import DjangoObjectPermissions\nfrom rest_framework.viewsets import ReadOnlyModelViewSet\nfrom rest_framework_guardian.filters import ObjectPermissionsFilter\n\nfrom grandchallenge.algorithms.forms import (\n AlgorithmForm,\n AlgorithmImageForm,\n AlgorithmImageUpdateForm,\n EditorsForm,\n UsersForm,\n)\nfrom grandchallenge.algorithms.models import (\n Algorithm,\n AlgorithmImage,\n Job,\n Result,\n)\nfrom grandchallenge.algorithms.serializers import (\n AlgorithmImageSerializer,\n AlgorithmSerializer,\n JobSerializer,\n ResultSerializer,\n)\nfrom grandchallenge.cases.forms import UploadRawImagesForm\nfrom grandchallenge.cases.models import RawImageUploadSession\nfrom grandchallenge.subdomains.utils import reverse\n\nlogger = logging.getLogger(__name__)\n\n\nclass AlgorithmCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):\n model = Algorithm\n form_class = AlgorithmForm\n permission_required = (\n f\"{Algorithm._meta.app_label}.add_{Algorithm._meta.model_name}\"\n )\n\n def form_valid(self, form):\n response = super().form_valid(form=form)\n self.object.add_editor(self.request.user)\n return response\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n\nclass AlgorithmList(PermissionListMixin, ListView):\n model = Algorithm\n permission_required = {\n f\"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}\"\n }\n\n def get_queryset(self, *args, **kwargs):\n # Add algorithms that are publicly visible\n qs = super().get_queryset(*args, **kwargs)\n qs |= Algorithm.objects.filter(visible_to_public=True)\n\n return qs\n\n\nclass AlgorithmDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n):\n model = Algorithm\n permission_required = (\n f\"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n\nclass AlgorithmUpdate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView\n):\n model = Algorithm\n form_class = AlgorithmForm\n permission_required = (\n f\"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n\nclass AlgorithmUserAutocomplete(\n LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView\n):\n def test_func(self):\n group_pks = (\n Algorithm.objects.all()\n .select_related(\"editors_group\")\n .values_list(\"editors_group__pk\", flat=True)\n )\n return (\n self.request.user.is_superuser\n or self.request.user.groups.filter(pk__in=group_pks).exists()\n )\n\n def get_queryset(self):\n qs = (\n get_user_model()\n .objects.all()\n .order_by(\"username\")\n .exclude(username=settings.ANONYMOUS_USER_NAME)\n )\n\n if self.q:\n qs = qs.filter(username__istartswith=self.q)\n\n return qs\n\n\nclass AlgorithmUserGroupUpdateMixin(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n SuccessMessageMixin,\n FormView,\n):\n template_name = \"algorithms/algorithm_user_groups_form.html\"\n permission_required = (\n f\"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n def get_permission_object(self):\n return self.algorithm\n\n @property\n def algorithm(self):\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\"object\": self.algorithm, \"role\": self.get_form().role}\n )\n return context\n\n def get_success_url(self):\n return self.algorithm.get_absolute_url()\n\n def form_valid(self, form):\n form.add_or_remove_user(algorithm=self.algorithm)\n return super().form_valid(form)\n\n\nclass EditorsUpdate(AlgorithmUserGroupUpdateMixin):\n form_class = EditorsForm\n success_message = \"Editors successfully updated\"\n\n\nclass UsersUpdate(AlgorithmUserGroupUpdateMixin):\n form_class = UsersForm\n success_message = \"Users successfully updated\"\n\n\nclass AlgorithmImageCreate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, CreateView\n):\n model = AlgorithmImage\n form_class = AlgorithmImageForm\n permission_required = (\n f\"{Algorithm._meta.app_label}.change_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n @property\n def algorithm(self):\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_permission_object(self):\n return self.algorithm\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.algorithm = self.algorithm\n\n uploaded_file = form.cleaned_data[\"chunked_upload\"][0]\n form.instance.staged_image_uuid = uploaded_file.uuid\n\n return super().form_valid(form)\n\n\nclass AlgorithmImageDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n):\n model = AlgorithmImage\n permission_required = f\"{AlgorithmImage._meta.app_label}.view_{AlgorithmImage._meta.model_name}\"\n raise_exception = True\n\n\nclass AlgorithmImageUpdate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView\n):\n model = AlgorithmImage\n form_class = AlgorithmImageUpdateForm\n permission_required = f\"{AlgorithmImage._meta.app_label}.change_{AlgorithmImage._meta.model_name}\"\n raise_exception = True\n\n\nclass AlgorithmExecutionSessionCreate(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n SuccessMessageMixin,\n CreateView,\n):\n model = RawImageUploadSession\n form_class = UploadRawImagesForm\n template_name = \"algorithms/algorithm_execution_session_create.html\"\n success_message = (\n \"Your images have been uploaded, \"\n \"please check back here to see the processing status.\"\n )\n permission_required = (\n f\"{Algorithm._meta.app_label}.view_{Algorithm._meta.model_name}\"\n )\n raise_exception = True\n\n @property\n def algorithm(self) -> Algorithm:\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_permission_object(self):\n return self.algorithm\n\n def get_initial(self):\n if self.algorithm.latest_ready_image is None:\n raise Http404()\n return super().get_initial()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.algorithm_image = self.algorithm.latest_ready_image\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse(\n \"algorithms:jobs-list\", kwargs={\"slug\": self.kwargs[\"slug\"]}\n )\n\n\nclass AlgorithmJobsList(LoginRequiredMixin, PermissionListMixin, ListView):\n model = Job\n permission_required = f\"{Job._meta.app_label}.view_{Job._meta.model_name}\"\n\n @property\n def algorithm(self) -> Algorithm:\n return Algorithm.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context.update({\"algorithm\": self.algorithm})\n return context\n\n def get_queryset(self, *args, **kwargs):\n \"\"\"Filter the jobs for this algorithm.\"\"\"\n qs = super().get_queryset(*args, **kwargs)\n return qs.filter(algorithm_image__algorithm=self.algorithm)\n\n\nclass AlgorithmViewSet(ReadOnlyModelViewSet):\n queryset = Algorithm.objects.all()\n serializer_class = AlgorithmSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n\nclass AlgorithmImageViewSet(ReadOnlyModelViewSet):\n queryset = AlgorithmImage.objects.all()\n serializer_class = AlgorithmImageSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n\nclass JobViewSet(ReadOnlyModelViewSet):\n queryset = Job.objects.all()\n serializer_class = JobSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n\nclass ResultViewSet(ReadOnlyModelViewSet):\n queryset = Result.objects.all()\n serializer_class = ResultSerializer\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n", "path": "app/grandchallenge/algorithms/views.py"}]} | 3,452 | 340 |
gh_patches_debug_4071 | rasdani/github-patches | git_diff | mlcommons__GaNDLF-173 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug during saving probabilities
**Describe the bug**
Small bug is occurring during saving probabilities in classification tasks. This is due to file existence check. It should check if the file exists instead of checking if the directory exists.
</issue>
<code>
[start of GANDLF/inference_manager.py]
1 from GANDLF.inference_loop import inference_loop
2 import os
3 import numpy as np
4 import torch
5 import torch.nn.functional as F
6
7
8 def InferenceManager(dataframe, outputDir, parameters, device):
9 """
10 This function takes in a dataframe, with some other parameters and performs the inference
11 """
12 # get the indeces for kfold splitting
13 inferenceData_full = dataframe
14
15 # # initialize parameters for inference
16 if not ("weights" in parameters):
17 parameters["weights"] = None # no need for loss weights for inference
18 if not ("class_weights" in parameters):
19 parameters["class_weights"] = None # no need for class weights for inference
20
21 n_folds = parameters["nested_training"]["validation"]
22
23 fold_dirs = []
24 if n_folds > 1:
25 directories = sorted(os.listdir(outputDir))
26 for d in directories:
27 if d.isdigit():
28 fold_dirs.append(os.path.join(outputDir, d, ""))
29 else:
30 fold_dirs = [outputDir]
31
32 probs_list = []
33
34 is_classification = parameters["problem_type"] == "classification"
35
36 for fold_dir in fold_dirs:
37 parameters["current_fold_dir"] = fold_dir
38 inference_loop(
39 inferenceDataFromPickle=inferenceData_full,
40 outputDir=fold_dir,
41 device=device,
42 parameters=parameters,
43 )
44
45 logits_dir = os.path.join(fold_dir, "logits.csv")
46 is_logits_dir_exist = os.path.isdir(logits_dir)
47
48 if is_classification and is_logits_dir_exist:
49 fold_logits = np.genfromtxt(logits_dir, delimiter=",")
50 fold_logits = torch.from_numpy(fold_logits)
51 fold_probs = F.softmax(fold_logits, dim=1)
52 probs_list.append(fold_probs)
53
54 if probs_list and is_classification:
55 probs_list = torch.stack(probs_list)
56 averaged_probs = torch.mean(probs_list, 0).numpy()
57 np.savetxt(
58 os.path.join(outputDir, "averaged_probabilities.csv"),
59 averaged_probs,
60 delimiter=",",
61 )
62
63
[end of GANDLF/inference_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/GANDLF/inference_manager.py b/GANDLF/inference_manager.py
--- a/GANDLF/inference_manager.py
+++ b/GANDLF/inference_manager.py
@@ -43,7 +43,7 @@
)
logits_dir = os.path.join(fold_dir, "logits.csv")
- is_logits_dir_exist = os.path.isdir(logits_dir)
+ is_logits_dir_exist = os.path.isfile(logits_dir)
if is_classification and is_logits_dir_exist:
fold_logits = np.genfromtxt(logits_dir, delimiter=",")
| {"golden_diff": "diff --git a/GANDLF/inference_manager.py b/GANDLF/inference_manager.py\n--- a/GANDLF/inference_manager.py\n+++ b/GANDLF/inference_manager.py\n@@ -43,7 +43,7 @@\n )\n \n logits_dir = os.path.join(fold_dir, \"logits.csv\")\n- is_logits_dir_exist = os.path.isdir(logits_dir)\n+ is_logits_dir_exist = os.path.isfile(logits_dir)\n \n if is_classification and is_logits_dir_exist:\n fold_logits = np.genfromtxt(logits_dir, delimiter=\",\")\n", "issue": "Bug during saving probabilities\n**Describe the bug**\r\nSmall bug is occurring during saving probabilities in classification tasks. This is due to file existence check. It should check if the file exists instead of checking if the directory exists.\r\n\n", "before_files": [{"content": "from GANDLF.inference_loop import inference_loop\nimport os\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\ndef InferenceManager(dataframe, outputDir, parameters, device):\n \"\"\"\n This function takes in a dataframe, with some other parameters and performs the inference\n \"\"\"\n # get the indeces for kfold splitting\n inferenceData_full = dataframe\n\n # # initialize parameters for inference\n if not (\"weights\" in parameters):\n parameters[\"weights\"] = None # no need for loss weights for inference\n if not (\"class_weights\" in parameters):\n parameters[\"class_weights\"] = None # no need for class weights for inference\n\n n_folds = parameters[\"nested_training\"][\"validation\"]\n\n fold_dirs = []\n if n_folds > 1:\n directories = sorted(os.listdir(outputDir))\n for d in directories:\n if d.isdigit():\n fold_dirs.append(os.path.join(outputDir, d, \"\"))\n else:\n fold_dirs = [outputDir]\n\n probs_list = []\n\n is_classification = parameters[\"problem_type\"] == \"classification\"\n\n for fold_dir in fold_dirs:\n parameters[\"current_fold_dir\"] = fold_dir\n inference_loop(\n inferenceDataFromPickle=inferenceData_full,\n outputDir=fold_dir,\n device=device,\n parameters=parameters,\n )\n\n logits_dir = os.path.join(fold_dir, \"logits.csv\")\n is_logits_dir_exist = os.path.isdir(logits_dir)\n\n if is_classification and is_logits_dir_exist:\n fold_logits = np.genfromtxt(logits_dir, delimiter=\",\")\n fold_logits = torch.from_numpy(fold_logits)\n fold_probs = F.softmax(fold_logits, dim=1)\n probs_list.append(fold_probs)\n\n if probs_list and is_classification:\n probs_list = torch.stack(probs_list)\n averaged_probs = torch.mean(probs_list, 0).numpy()\n np.savetxt(\n os.path.join(outputDir, \"averaged_probabilities.csv\"),\n averaged_probs,\n delimiter=\",\",\n )\n\n", "path": "GANDLF/inference_manager.py"}]} | 1,144 | 122 |
gh_patches_debug_3037 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-4360 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Windows: Cannot bundle with debug if pkg_resources is a dependency
This issue happens when I try to bundle my project, in the Analysis.assemble phase and only when I try to do it with debug enabled. PyInstaller tries to compile a module that is part of an executable (pyinstaller.exe in this case) which fails because it cannot read the module.
This is with Windows 10, Python 3.6.6 (official from python.org) and PyInstaller 3.5.dev0+51429f8fc (which should be the latest develop version as of today).
Here is the traceback:
```
Traceback (most recent call last):
File "c:\python36-32\Lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\python36-32\Lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\RMYROY~1\VIRTUA~1\CDDA-G~3\Scripts\pyinstaller.exe\__main__.py", line 9, in <module>
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\__main__.py", line 111, in run
run_build(pyi_config, spec_file, **vars(args))
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\__main__.py", line 63, in run_build
PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 846, in main
build(specfile, kw.get('distpath'), kw.get('workpath'), kw.get('clean_build'))
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 793, in build
exec(code, spec_namespace)
File "launcher.spec", line 17, in <module>
noarchive=True)
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 243, in __init__
self.__postinit__()
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\datastruct.py", line 158, in __postinit__
self.assemble()
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\building\build_main.py", line 599, in assemble
for name, path, typecode in compile_py_files(new_toc, CONF['workpath']):
File "c:\users\rmyroy~1\virtua~1\cdda-g~3\lib\site-packages\PyInstaller\utils\misc.py", line 150, in compile_py_files
with open(obj_fnm, 'rb') as fh:
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\RMYROY~1\\VIRTUA~1\\CDDA-G~3\\Scripts\\pyinstaller.exe\\__main__.pyo'
```
For some reason, the following entry is added in Analysis.pure
```python
('__main__.pyc', 'C:\\Users\\RMYROY~1\\VIRTUA~1\\CDDA-G~3\\Scripts\\pyinstaller.exe\\__main__.py', 'PYMODULE')
```
**That entry is incorrect in that it shouldn't have been added in pure or it shouldn't be compiled in assemble which is the source of this issue.**
Here is my spec file:
```python
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['cddagl\\launcher.py'],
pathex=['C:\\Program Files (x86)\\Windows Kits\\10\\Redist\\ucrt\\DLLs\\x86\\', 'C:\\Users\\Rémy Roy\\Projects\\CDDA-Game-Launcher'],
binaries=[],
datas=[('alembic', 'alembic'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('cddagl/VERSION', 'cddagl'), ('C:\\Users\\Rémy Roy\\VirtualEnvs\\CDDA-Game-Launcher\\Scripts\\UnRAR.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ja/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ja/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],
hiddenimports=['lxml.cssselect', 'babel.numbers'],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=True)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[('v', None, 'OPTION')],
exclude_binaries=True,
name='launcher',
debug=True,
bootloader_ignore_signals=False,
strip=False,
upx=False,
console=True , icon='cddagl\\resources\\launcher.ico')
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=False,
upx_exclude=[],
name='launcher')
```
You can probably reproduce this issue easily by cloning [my project](https://github.com/remyroy/CDDA-Game-Launcher) and issuing the following command:
```
python setup.py freeze --debug=1
```
Here is the full pyinstaller log output: https://gist.github.com/remyroy/37f7f0a912d5d714a947cddfb78769d4
I'll investigate how that entry is added in Analysis to give more context to this issue.
</issue>
<code>
[start of PyInstaller/hooks/hook-pkg_resources.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2019, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9 from PyInstaller.utils.hooks import collect_submodules
10
11 # pkg_resources keeps vendored modules in its _vendor subpackage, and does
12 # sys.meta_path based import magic to expose them as pkg_resources.extern.*
13 hiddenimports = collect_submodules('pkg_resources._vendor')
14
[end of PyInstaller/hooks/hook-pkg_resources.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py
--- a/PyInstaller/hooks/hook-pkg_resources.py
+++ b/PyInstaller/hooks/hook-pkg_resources.py
@@ -11,3 +11,5 @@
# pkg_resources keeps vendored modules in its _vendor subpackage, and does
# sys.meta_path based import magic to expose them as pkg_resources.extern.*
hiddenimports = collect_submodules('pkg_resources._vendor')
+
+excludedimports = ['__main__']
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-pkg_resources.py b/PyInstaller/hooks/hook-pkg_resources.py\n--- a/PyInstaller/hooks/hook-pkg_resources.py\n+++ b/PyInstaller/hooks/hook-pkg_resources.py\n@@ -11,3 +11,5 @@\n # pkg_resources keeps vendored modules in its _vendor subpackage, and does\n # sys.meta_path based import magic to expose them as pkg_resources.extern.*\n hiddenimports = collect_submodules('pkg_resources._vendor')\n+\n+excludedimports = ['__main__']\n", "issue": "Windows: Cannot bundle with debug if pkg_resources is a dependency\nThis issue happens when I try to bundle my project, in the Analysis.assemble phase and only when I try to do it with debug enabled. PyInstaller tries to compile a module that is part of an executable (pyinstaller.exe in this case) which fails because it cannot read the module.\r\n\r\nThis is with Windows 10, Python 3.6.6 (official from python.org) and PyInstaller 3.5.dev0+51429f8fc (which should be the latest develop version as of today).\r\n\r\nHere is the traceback:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"c:\\python36-32\\Lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\python36-32\\Lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\Users\\RMYROY~1\\VIRTUA~1\\CDDA-G~3\\Scripts\\pyinstaller.exe\\__main__.py\", line 9, in <module>\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\__main__.py\", line 111, in run\r\n run_build(pyi_config, spec_file, **vars(args))\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\__main__.py\", line 63, in run_build\r\n PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 846, in main\r\n build(specfile, kw.get('distpath'), kw.get('workpath'), kw.get('clean_build'))\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 793, in build\r\n exec(code, spec_namespace)\r\n File \"launcher.spec\", line 17, in <module>\r\n noarchive=True)\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 243, in __init__\r\n self.__postinit__()\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\datastruct.py\", line 158, in __postinit__\r\n self.assemble()\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\building\\build_main.py\", line 599, in assemble\r\n for name, path, typecode in compile_py_files(new_toc, CONF['workpath']):\r\n File \"c:\\users\\rmyroy~1\\virtua~1\\cdda-g~3\\lib\\site-packages\\PyInstaller\\utils\\misc.py\", line 150, in compile_py_files\r\n with open(obj_fnm, 'rb') as fh:\r\nFileNotFoundError: [Errno 2] No such file or directory: 'C:\\\\Users\\\\RMYROY~1\\\\VIRTUA~1\\\\CDDA-G~3\\\\Scripts\\\\pyinstaller.exe\\\\__main__.pyo'\r\n```\r\n\r\nFor some reason, the following entry is added in Analysis.pure\r\n\r\n```python\r\n('__main__.pyc', 'C:\\\\Users\\\\RMYROY~1\\\\VIRTUA~1\\\\CDDA-G~3\\\\Scripts\\\\pyinstaller.exe\\\\__main__.py', 'PYMODULE')\r\n```\r\n\r\n**That entry is incorrect in that it shouldn't have been added in pure or it shouldn't be compiled in assemble which is the source of this issue.**\r\n\r\nHere is my spec file:\r\n\r\n```python\r\n# -*- mode: python ; coding: utf-8 -*-\r\n\r\nblock_cipher = None\r\n\r\n\r\na = Analysis(['cddagl\\\\launcher.py'],\r\n pathex=['C:\\\\Program Files (x86)\\\\Windows Kits\\\\10\\\\Redist\\\\ucrt\\\\DLLs\\\\x86\\\\', 'C:\\\\Users\\\\R\u00e9my Roy\\\\Projects\\\\CDDA-Game-Launcher'],\r\n binaries=[],\r\n datas=[('alembic', 'alembic'), ('data', 'data'), ('cddagl/resources', 'cddagl/resources'), ('cddagl/VERSION', 'cddagl'), ('C:\\\\Users\\\\R\u00e9my Roy\\\\VirtualEnvs\\\\CDDA-Game-Launcher\\\\Scripts\\\\UnRAR.exe', '.'), ('cddagl/locale/en/LC_MESSAGES/cddagl.mo', 'cddagl/locale/en/LC_MESSAGES'), ('cddagl/locale/fr/LC_MESSAGES/cddagl.mo', 'cddagl/locale/fr/LC_MESSAGES'), ('cddagl/locale/it/LC_MESSAGES/cddagl.mo', 'cddagl/locale/it/LC_MESSAGES'), ('cddagl/locale/ja/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ja/LC_MESSAGES'), ('cddagl/locale/ru/LC_MESSAGES/cddagl.mo', 'cddagl/locale/ru/LC_MESSAGES')],\r\n hiddenimports=['lxml.cssselect', 'babel.numbers'],\r\n hookspath=[],\r\n runtime_hooks=[],\r\n excludes=[],\r\n win_no_prefer_redirects=False,\r\n win_private_assemblies=False,\r\n cipher=block_cipher,\r\n noarchive=True)\r\npyz = PYZ(a.pure, a.zipped_data,\r\n cipher=block_cipher)\r\nexe = EXE(pyz,\r\n a.scripts,\r\n [('v', None, 'OPTION')],\r\n exclude_binaries=True,\r\n name='launcher',\r\n debug=True,\r\n bootloader_ignore_signals=False,\r\n strip=False,\r\n upx=False,\r\n console=True , icon='cddagl\\\\resources\\\\launcher.ico')\r\ncoll = COLLECT(exe,\r\n a.binaries,\r\n a.zipfiles,\r\n a.datas,\r\n strip=False,\r\n upx=False,\r\n upx_exclude=[],\r\n name='launcher')\r\n```\r\n\r\nYou can probably reproduce this issue easily by cloning [my project](https://github.com/remyroy/CDDA-Game-Launcher) and issuing the following command:\r\n\r\n```\r\npython setup.py freeze --debug=1\r\n```\r\n\r\nHere is the full pyinstaller log output: https://gist.github.com/remyroy/37f7f0a912d5d714a947cddfb78769d4\r\n\r\nI'll investigate how that entry is added in Analysis to give more context to this issue.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2019, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom PyInstaller.utils.hooks import collect_submodules\n\n# pkg_resources keeps vendored modules in its _vendor subpackage, and does\n# sys.meta_path based import magic to expose them as pkg_resources.extern.*\nhiddenimports = collect_submodules('pkg_resources._vendor')\n", "path": "PyInstaller/hooks/hook-pkg_resources.py"}]} | 2,200 | 119 |
gh_patches_debug_878 | rasdani/github-patches | git_diff | privacyidea__privacyidea-1746 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix typo in registration token
The example of the registration token contains a typo.
The toketype of course is a "registration" token, not a "register".
</issue>
<code>
[start of privacyidea/lib/tokens/registrationtoken.py]
1 # -*- coding: utf-8 -*-
2 #
3 # privacyIDEA
4 # Aug 12, 2014 Cornelius Kölbel
5 # License: AGPLv3
6 # contact: http://www.privacyidea.org
7 #
8 # 2015-01-29 Adapt during migration to flask
9 # Cornelius Kölbel <[email protected]>
10 #
11 # This code is free software; you can redistribute it and/or
12 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
13 # License as published by the Free Software Foundation; either
14 # version 3 of the License, or any later version.
15 #
16 # This code is distributed in the hope that it will be useful,
17 # but WITHOUT ANY WARRANTY; without even the implied warranty of
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
20 #
21 # You should have received a copy of the GNU Affero General Public
22 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #
24 """
25 This file contains the definition of the RegisterToken class.
26
27 The code is tested in test_lib_tokens_registration.py.
28 """
29
30 import logging
31
32 from privacyidea.lib.utils import to_unicode
33 from privacyidea.lib.tokens.passwordtoken import PasswordTokenClass
34 from privacyidea.lib.log import log_with
35 from privacyidea.lib.crypto import generate_password
36 from privacyidea.lib.decorators import check_token_locked
37 from privacyidea.lib import _
38
39 optional = True
40 required = False
41
42 log = logging.getLogger(__name__)
43
44
45 class RegistrationTokenClass(PasswordTokenClass):
46 """
47 Token to implement a registration code.
48 It can be used to create a registration code or a "TAN" which can be used
49 once by a user to authenticate somewhere. After this registration code is
50 used, the token is automatically deleted.
51
52 The idea is to provide a workflow, where the user can get a registration code
53 by e.g. postal mail and then use this code as the initial first factor to
54 authenticate to the UI to enroll real tokens.
55
56 A registration code can be created by an administrative task with the
57 token/init api like this:
58
59 **Example Authentication Request**:
60
61 .. sourcecode:: http
62
63 POST /token/init HTTP/1.1
64 Host: example.com
65 Accept: application/json
66
67 type=register
68 user=cornelius
69 realm=realm1
70
71 **Example response**:
72
73 .. sourcecode:: http
74
75 HTTP/1.1 200 OK
76 Content-Type: application/json
77
78 {
79 "detail": {
80 "registrationcode": "12345808124095097608"
81 },
82 "id": 1,
83 "jsonrpc": "2.0",
84 "result": {
85 "status": true,
86 "value": true
87 },
88 "version": "privacyIDEA unknown"
89 }
90
91 """
92
93 def __init__(self, aToken):
94 PasswordTokenClass.__init__(self, aToken)
95 self.hKeyRequired = False
96 self.set_type(u"registration")
97 self.otp_len = 24
98
99 @staticmethod
100 def get_class_type():
101 return "registration"
102
103 @staticmethod
104 def get_class_prefix():
105 return "REG"
106
107 @staticmethod
108 @log_with(log)
109 def get_class_info(key=None, ret='all'):
110 """
111 returns a subtree of the token definition
112
113 :param key: subsection identifier
114 :type key: string
115 :param ret: default return value, if nothing is found
116 :type ret: user defined
117 :return: subsection if key exists or user defined
118 :rtype: dict or scalar
119 """
120 res = {'type': 'registration',
121 'title': 'Registration Code Token',
122 'description': _('Registration: A token that creates a '
123 'registration code that '
124 'can be used as a second factor once.'),
125 'init': {},
126 'config': {},
127 'user': [],
128 # This tokentype is enrollable in the UI for...
129 'ui_enroll': ["admin"],
130 'policy': {},
131 }
132
133 if key:
134 ret = res.get(key)
135 else:
136 if ret == 'all':
137 ret = res
138 return ret
139
140 def update(self, param):
141 """
142 This method is called during the initialization process.
143 :param param: parameters from the token init
144 :type param: dict
145 :return: None
146 """
147 if "genkey" in param:
148 # We do not need the genkey! We generate anyway.
149 # Otherwise genkey and otpkey will raise an exception in
150 # PasswordTokenClass
151 del param["genkey"]
152 param["otpkey"] = generate_password(size=self.otp_len)
153 PasswordTokenClass.update(self, param)
154
155 @log_with(log, log_entry=False)
156 @check_token_locked
157 def inc_count_auth_success(self):
158 """
159 Increase the counter, that counts successful authentications
160 In case of successful authentication the token does needs to be deleted.
161 """
162 self.delete_token()
163 return 1
164
165 @log_with(log)
166 def get_init_detail(self, params=None, user=None):
167 """
168 At the end of the initialization we return the registration code.
169 """
170 response_detail = PasswordTokenClass.get_init_detail(self, params, user)
171 params = params or {}
172 secretHOtp = self.token.get_otpkey()
173 registrationcode = secretHOtp.getKey()
174 response_detail["registrationcode"] = to_unicode(registrationcode)
175 return response_detail
176
[end of privacyidea/lib/tokens/registrationtoken.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/privacyidea/lib/tokens/registrationtoken.py b/privacyidea/lib/tokens/registrationtoken.py
--- a/privacyidea/lib/tokens/registrationtoken.py
+++ b/privacyidea/lib/tokens/registrationtoken.py
@@ -64,7 +64,7 @@
Host: example.com
Accept: application/json
- type=register
+ type=registration
user=cornelius
realm=realm1
| {"golden_diff": "diff --git a/privacyidea/lib/tokens/registrationtoken.py b/privacyidea/lib/tokens/registrationtoken.py\n--- a/privacyidea/lib/tokens/registrationtoken.py\n+++ b/privacyidea/lib/tokens/registrationtoken.py\n@@ -64,7 +64,7 @@\n Host: example.com\n Accept: application/json\n \n- type=register\n+ type=registration\n user=cornelius\n realm=realm1\n", "issue": "Fix typo in registration token\nThe example of the registration token contains a typo.\r\nThe toketype of course is a \"registration\" token, not a \"register\".\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# privacyIDEA\n# Aug 12, 2014 Cornelius K\u00f6lbel\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# 2015-01-29 Adapt during migration to flask\n# Cornelius K\u00f6lbel <[email protected]>\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\"\"\"\nThis file contains the definition of the RegisterToken class.\n\nThe code is tested in test_lib_tokens_registration.py.\n\"\"\"\n\nimport logging\n\nfrom privacyidea.lib.utils import to_unicode\nfrom privacyidea.lib.tokens.passwordtoken import PasswordTokenClass\nfrom privacyidea.lib.log import log_with\nfrom privacyidea.lib.crypto import generate_password\nfrom privacyidea.lib.decorators import check_token_locked\nfrom privacyidea.lib import _\n\noptional = True\nrequired = False\n\nlog = logging.getLogger(__name__)\n\n\nclass RegistrationTokenClass(PasswordTokenClass):\n \"\"\"\n Token to implement a registration code.\n It can be used to create a registration code or a \"TAN\" which can be used\n once by a user to authenticate somewhere. After this registration code is\n used, the token is automatically deleted.\n\n The idea is to provide a workflow, where the user can get a registration code\n by e.g. postal mail and then use this code as the initial first factor to\n authenticate to the UI to enroll real tokens.\n\n A registration code can be created by an administrative task with the\n token/init api like this:\n\n **Example Authentication Request**:\n\n .. sourcecode:: http\n\n POST /token/init HTTP/1.1\n Host: example.com\n Accept: application/json\n\n type=register\n user=cornelius\n realm=realm1\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Content-Type: application/json\n\n {\n \"detail\": {\n \"registrationcode\": \"12345808124095097608\"\n },\n \"id\": 1,\n \"jsonrpc\": \"2.0\",\n \"result\": {\n \"status\": true,\n \"value\": true\n },\n \"version\": \"privacyIDEA unknown\"\n }\n\n \"\"\"\n\n def __init__(self, aToken):\n PasswordTokenClass.__init__(self, aToken)\n self.hKeyRequired = False\n self.set_type(u\"registration\")\n self.otp_len = 24\n\n @staticmethod\n def get_class_type():\n return \"registration\"\n\n @staticmethod\n def get_class_prefix():\n return \"REG\"\n\n @staticmethod\n @log_with(log)\n def get_class_info(key=None, ret='all'):\n \"\"\"\n returns a subtree of the token definition\n\n :param key: subsection identifier\n :type key: string\n :param ret: default return value, if nothing is found\n :type ret: user defined\n :return: subsection if key exists or user defined\n :rtype: dict or scalar\n \"\"\"\n res = {'type': 'registration',\n 'title': 'Registration Code Token',\n 'description': _('Registration: A token that creates a '\n 'registration code that '\n 'can be used as a second factor once.'),\n 'init': {},\n 'config': {},\n 'user': [],\n # This tokentype is enrollable in the UI for...\n 'ui_enroll': [\"admin\"],\n 'policy': {},\n }\n\n if key:\n ret = res.get(key)\n else:\n if ret == 'all':\n ret = res\n return ret\n\n def update(self, param):\n \"\"\"\n This method is called during the initialization process.\n :param param: parameters from the token init\n :type param: dict\n :return: None\n \"\"\"\n if \"genkey\" in param:\n # We do not need the genkey! We generate anyway.\n # Otherwise genkey and otpkey will raise an exception in\n # PasswordTokenClass\n del param[\"genkey\"]\n param[\"otpkey\"] = generate_password(size=self.otp_len)\n PasswordTokenClass.update(self, param)\n\n @log_with(log, log_entry=False)\n @check_token_locked\n def inc_count_auth_success(self):\n \"\"\"\n Increase the counter, that counts successful authentications\n In case of successful authentication the token does needs to be deleted.\n \"\"\"\n self.delete_token()\n return 1\n\n @log_with(log)\n def get_init_detail(self, params=None, user=None):\n \"\"\"\n At the end of the initialization we return the registration code.\n \"\"\"\n response_detail = PasswordTokenClass.get_init_detail(self, params, user)\n params = params or {}\n secretHOtp = self.token.get_otpkey()\n registrationcode = secretHOtp.getKey()\n response_detail[\"registrationcode\"] = to_unicode(registrationcode)\n return response_detail\n", "path": "privacyidea/lib/tokens/registrationtoken.py"}]} | 2,234 | 101 |
gh_patches_debug_5499 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-3438 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cometml Logger epoch is not set.
<!--
### Common bugs:
1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79).
2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq)
-->
## 🐛 Bug
While logging using comet ml there is an argument to set epoch https://www.comet.ml/docs/python-sdk/Experiment/#experimentlog_metrics
The info is available in metrics dict, but instead of passing it as an arg, it is passed as metrics value. I will suply a PR in a moment
</issue>
<code>
[start of pytorch_lightning/loggers/comet.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Comet
17 -----
18 """
19
20 from argparse import Namespace
21 from typing import Optional, Dict, Union, Any
22
23 try:
24 from comet_ml import Experiment as CometExperiment
25 from comet_ml import ExistingExperiment as CometExistingExperiment
26 from comet_ml import OfflineExperiment as CometOfflineExperiment
27 from comet_ml import BaseExperiment as CometBaseExperiment
28 try:
29 from comet_ml.api import API
30 except ImportError: # pragma: no-cover
31 # For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300
32 from comet_ml.papi import API # pragma: no-cover
33 from comet_ml.config import get_config, get_api_key
34 except ImportError: # pragma: no-cover
35 CometExperiment = None
36 CometExistingExperiment = None
37 CometOfflineExperiment = None
38 CometBaseExperiment = None
39 API = None
40 _COMET_AVAILABLE = False
41 else:
42 _COMET_AVAILABLE = True
43
44
45 import torch
46 from torch import is_tensor
47
48 from pytorch_lightning import _logger as log
49 from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment
50 from pytorch_lightning.utilities.exceptions import MisconfigurationException
51 from pytorch_lightning.utilities import rank_zero_only
52
53
54 class CometLogger(LightningLoggerBase):
55 r"""
56 Log using `Comet.ml <https://www.comet.ml>`_. Install it with pip:
57
58 .. code-block:: bash
59
60 pip install comet-ml
61
62 Comet requires either an API Key (online mode) or a local directory path (offline mode).
63
64 **ONLINE MODE**
65
66 Example:
67 >>> import os
68 >>> from pytorch_lightning import Trainer
69 >>> from pytorch_lightning.loggers import CometLogger
70 >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class
71 >>> comet_logger = CometLogger(
72 ... api_key=os.environ.get('COMET_API_KEY'),
73 ... workspace=os.environ.get('COMET_WORKSPACE'), # Optional
74 ... save_dir='.', # Optional
75 ... project_name='default_project', # Optional
76 ... rest_api_key=os.environ.get('COMET_REST_API_KEY'), # Optional
77 ... experiment_name='default' # Optional
78 ... )
79 >>> trainer = Trainer(logger=comet_logger)
80
81 **OFFLINE MODE**
82
83 Example:
84 >>> from pytorch_lightning.loggers import CometLogger
85 >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class
86 >>> comet_logger = CometLogger(
87 ... save_dir='.',
88 ... workspace=os.environ.get('COMET_WORKSPACE'), # Optional
89 ... project_name='default_project', # Optional
90 ... rest_api_key=os.environ.get('COMET_REST_API_KEY'), # Optional
91 ... experiment_name='default' # Optional
92 ... )
93 >>> trainer = Trainer(logger=comet_logger)
94
95 Args:
96 api_key: Required in online mode. API key, found on Comet.ml. If not given, this
97 will be loaded from the environment variable COMET_API_KEY or ~/.comet.config
98 if either exists.
99 save_dir: Required in offline mode. The path for the directory to save local
100 comet logs. If given, this also sets the directory for saving checkpoints.
101 workspace: Optional. Name of workspace for this user
102 project_name: Optional. Send your experiment to a specific project.
103 Otherwise will be sent to Uncategorized Experiments.
104 If the project name does not already exist, Comet.ml will create a new project.
105 rest_api_key: Optional. Rest API key found in Comet.ml settings.
106 This is used to determine version number
107 experiment_name: Optional. String representing the name for this particular experiment on Comet.ml.
108 experiment_key: Optional. If set, restores from existing experiment.
109 offline: If api_key and save_dir are both given, this determines whether
110 the experiment will be in online or offline mode. This is useful if you use
111 save_dir to control the checkpoints directory and have a ~/.comet.config
112 file but still want to run offline experiments.
113 """
114
115 def __init__(self,
116 api_key: Optional[str] = None,
117 save_dir: Optional[str] = None,
118 workspace: Optional[str] = None,
119 project_name: Optional[str] = None,
120 rest_api_key: Optional[str] = None,
121 experiment_name: Optional[str] = None,
122 experiment_key: Optional[str] = None,
123 offline: bool = False,
124 **kwargs):
125
126 if not _COMET_AVAILABLE:
127 raise ImportError('You want to use `comet_ml` logger which is not installed yet,'
128 ' install it with `pip install comet-ml`.')
129 super().__init__()
130 self._experiment = None
131
132 # Determine online or offline mode based on which arguments were passed to CometLogger
133 api_key = api_key or get_api_key(None, get_config())
134
135 if api_key is not None and save_dir is not None:
136 self.mode = "offline" if offline else "online"
137 self.api_key = api_key
138 self._save_dir = save_dir
139 elif api_key is not None:
140 self.mode = "online"
141 self.api_key = api_key
142 self._save_dir = None
143 elif save_dir is not None:
144 self.mode = "offline"
145 self._save_dir = save_dir
146 else:
147 # If neither api_key nor save_dir are passed as arguments, raise an exception
148 raise MisconfigurationException(
149 "CometLogger requires either api_key or save_dir during initialization."
150 )
151
152 log.info(f"CometLogger will be initialized in {self.mode} mode")
153
154 self.workspace = workspace
155 self.project_name = project_name
156 self.experiment_key = experiment_key
157 self._kwargs = kwargs
158
159 if rest_api_key is not None:
160 # Comet.ml rest API, used to determine version number
161 self.rest_api_key = rest_api_key
162 self.comet_api = API(self.rest_api_key)
163 else:
164 self.rest_api_key = None
165 self.comet_api = None
166
167 if experiment_name:
168 self.experiment.set_name(experiment_name)
169 self._kwargs = kwargs
170
171 @property
172 @rank_zero_experiment
173 def experiment(self) -> CometBaseExperiment:
174 r"""
175 Actual Comet object. To use Comet features in your
176 :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
177
178 Example::
179
180 self.logger.experiment.some_comet_function()
181
182 """
183 if self._experiment is not None:
184 return self._experiment
185
186 if self.mode == "online":
187 if self.experiment_key is None:
188 self._experiment = CometExperiment(
189 api_key=self.api_key,
190 workspace=self.workspace,
191 project_name=self.project_name,
192 **self._kwargs
193 )
194 self.experiment_key = self._experiment.get_key()
195 else:
196 self._experiment = CometExistingExperiment(
197 api_key=self.api_key,
198 workspace=self.workspace,
199 project_name=self.project_name,
200 previous_experiment=self.experiment_key,
201 **self._kwargs
202 )
203 else:
204 self._experiment = CometOfflineExperiment(
205 offline_directory=self.save_dir,
206 workspace=self.workspace,
207 project_name=self.project_name,
208 **self._kwargs
209 )
210
211 return self._experiment
212
213 @rank_zero_only
214 def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
215 params = self._convert_params(params)
216 params = self._flatten_dict(params)
217 self.experiment.log_parameters(params)
218
219 @rank_zero_only
220 def log_metrics(
221 self,
222 metrics: Dict[str, Union[torch.Tensor, float]],
223 step: Optional[int] = None
224 ) -> None:
225 assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0'
226
227 # Comet.ml expects metrics to be a dictionary of detached tensors on CPU
228 for key, val in metrics.items():
229 if is_tensor(val):
230 metrics[key] = val.cpu().detach()
231
232 self.experiment.log_metrics(metrics, step=step)
233
234 def reset_experiment(self):
235 self._experiment = None
236
237 @rank_zero_only
238 def finalize(self, status: str) -> None:
239 r"""
240 When calling ``self.experiment.end()``, that experiment won't log any more data to Comet.
241 That's why, if you need to log any more data, you need to create an ExistingCometExperiment.
242 For example, to log data when testing your model after training, because when training is
243 finalized :meth:`CometLogger.finalize` is called.
244
245 This happens automatically in the :meth:`~CometLogger.experiment` property, when
246 ``self._experiment`` is set to ``None``, i.e. ``self.reset_experiment()``.
247 """
248 self.experiment.end()
249 self.reset_experiment()
250
251 @property
252 def save_dir(self) -> Optional[str]:
253 return self._save_dir
254
255 @property
256 def name(self) -> str:
257 return str(self.experiment.project_name)
258
259 @property
260 def version(self) -> str:
261 return self.experiment.id
262
263 def __getstate__(self):
264 state = self.__dict__.copy()
265 state["_experiment"] = None
266 return state
267
[end of pytorch_lightning/loggers/comet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/loggers/comet.py b/pytorch_lightning/loggers/comet.py
--- a/pytorch_lightning/loggers/comet.py
+++ b/pytorch_lightning/loggers/comet.py
@@ -229,7 +229,10 @@
if is_tensor(val):
metrics[key] = val.cpu().detach()
- self.experiment.log_metrics(metrics, step=step)
+ metrics_without_epoch = metrics.copy()
+ epoch = metrics_without_epoch.pop('epoch', None)
+
+ self.experiment.log_metrics(metrics_without_epoch, step=step, epoch=epoch)
def reset_experiment(self):
self._experiment = None
| {"golden_diff": "diff --git a/pytorch_lightning/loggers/comet.py b/pytorch_lightning/loggers/comet.py\n--- a/pytorch_lightning/loggers/comet.py\n+++ b/pytorch_lightning/loggers/comet.py\n@@ -229,7 +229,10 @@\n if is_tensor(val):\n metrics[key] = val.cpu().detach()\n \n- self.experiment.log_metrics(metrics, step=step)\n+ metrics_without_epoch = metrics.copy()\n+ epoch = metrics_without_epoch.pop('epoch', None)\n+\n+ self.experiment.log_metrics(metrics_without_epoch, step=step, epoch=epoch)\n \n def reset_experiment(self):\n self._experiment = None\n", "issue": "Cometml Logger epoch is not set.\n<!-- \r\n### Common bugs:\r\n1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). \r\n2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) \r\n-->\r\n\r\n## \ud83d\udc1b Bug\r\n\r\nWhile logging using comet ml there is an argument to set epoch https://www.comet.ml/docs/python-sdk/Experiment/#experimentlog_metrics\r\nThe info is available in metrics dict, but instead of passing it as an arg, it is passed as metrics value. I will suply a PR in a moment\r\n\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nComet\n-----\n\"\"\"\n\nfrom argparse import Namespace\nfrom typing import Optional, Dict, Union, Any\n\ntry:\n from comet_ml import Experiment as CometExperiment\n from comet_ml import ExistingExperiment as CometExistingExperiment\n from comet_ml import OfflineExperiment as CometOfflineExperiment\n from comet_ml import BaseExperiment as CometBaseExperiment\n try:\n from comet_ml.api import API\n except ImportError: # pragma: no-cover\n # For more information, see: https://www.comet.ml/docs/python-sdk/releases/#release-300\n from comet_ml.papi import API # pragma: no-cover\n from comet_ml.config import get_config, get_api_key\nexcept ImportError: # pragma: no-cover\n CometExperiment = None\n CometExistingExperiment = None\n CometOfflineExperiment = None\n CometBaseExperiment = None\n API = None\n _COMET_AVAILABLE = False\nelse:\n _COMET_AVAILABLE = True\n\n\nimport torch\nfrom torch import is_tensor\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities import rank_zero_only\n\n\nclass CometLogger(LightningLoggerBase):\n r\"\"\"\n Log using `Comet.ml <https://www.comet.ml>`_. Install it with pip:\n\n .. code-block:: bash\n\n pip install comet-ml\n\n Comet requires either an API Key (online mode) or a local directory path (offline mode).\n\n **ONLINE MODE**\n\n Example:\n >>> import os\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.loggers import CometLogger\n >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n >>> comet_logger = CometLogger(\n ... api_key=os.environ.get('COMET_API_KEY'),\n ... workspace=os.environ.get('COMET_WORKSPACE'), # Optional\n ... save_dir='.', # Optional\n ... project_name='default_project', # Optional\n ... rest_api_key=os.environ.get('COMET_REST_API_KEY'), # Optional\n ... experiment_name='default' # Optional\n ... )\n >>> trainer = Trainer(logger=comet_logger)\n\n **OFFLINE MODE**\n\n Example:\n >>> from pytorch_lightning.loggers import CometLogger\n >>> # arguments made to CometLogger are passed on to the comet_ml.Experiment class\n >>> comet_logger = CometLogger(\n ... save_dir='.',\n ... workspace=os.environ.get('COMET_WORKSPACE'), # Optional\n ... project_name='default_project', # Optional\n ... rest_api_key=os.environ.get('COMET_REST_API_KEY'), # Optional\n ... experiment_name='default' # Optional\n ... )\n >>> trainer = Trainer(logger=comet_logger)\n\n Args:\n api_key: Required in online mode. API key, found on Comet.ml. If not given, this\n will be loaded from the environment variable COMET_API_KEY or ~/.comet.config\n if either exists.\n save_dir: Required in offline mode. The path for the directory to save local\n comet logs. If given, this also sets the directory for saving checkpoints.\n workspace: Optional. Name of workspace for this user\n project_name: Optional. Send your experiment to a specific project.\n Otherwise will be sent to Uncategorized Experiments.\n If the project name does not already exist, Comet.ml will create a new project.\n rest_api_key: Optional. Rest API key found in Comet.ml settings.\n This is used to determine version number\n experiment_name: Optional. String representing the name for this particular experiment on Comet.ml.\n experiment_key: Optional. If set, restores from existing experiment.\n offline: If api_key and save_dir are both given, this determines whether\n the experiment will be in online or offline mode. This is useful if you use\n save_dir to control the checkpoints directory and have a ~/.comet.config\n file but still want to run offline experiments.\n \"\"\"\n\n def __init__(self,\n api_key: Optional[str] = None,\n save_dir: Optional[str] = None,\n workspace: Optional[str] = None,\n project_name: Optional[str] = None,\n rest_api_key: Optional[str] = None,\n experiment_name: Optional[str] = None,\n experiment_key: Optional[str] = None,\n offline: bool = False,\n **kwargs):\n\n if not _COMET_AVAILABLE:\n raise ImportError('You want to use `comet_ml` logger which is not installed yet,'\n ' install it with `pip install comet-ml`.')\n super().__init__()\n self._experiment = None\n\n # Determine online or offline mode based on which arguments were passed to CometLogger\n api_key = api_key or get_api_key(None, get_config())\n\n if api_key is not None and save_dir is not None:\n self.mode = \"offline\" if offline else \"online\"\n self.api_key = api_key\n self._save_dir = save_dir\n elif api_key is not None:\n self.mode = \"online\"\n self.api_key = api_key\n self._save_dir = None\n elif save_dir is not None:\n self.mode = \"offline\"\n self._save_dir = save_dir\n else:\n # If neither api_key nor save_dir are passed as arguments, raise an exception\n raise MisconfigurationException(\n \"CometLogger requires either api_key or save_dir during initialization.\"\n )\n\n log.info(f\"CometLogger will be initialized in {self.mode} mode\")\n\n self.workspace = workspace\n self.project_name = project_name\n self.experiment_key = experiment_key\n self._kwargs = kwargs\n\n if rest_api_key is not None:\n # Comet.ml rest API, used to determine version number\n self.rest_api_key = rest_api_key\n self.comet_api = API(self.rest_api_key)\n else:\n self.rest_api_key = None\n self.comet_api = None\n\n if experiment_name:\n self.experiment.set_name(experiment_name)\n self._kwargs = kwargs\n\n @property\n @rank_zero_experiment\n def experiment(self) -> CometBaseExperiment:\n r\"\"\"\n Actual Comet object. To use Comet features in your\n :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.\n\n Example::\n\n self.logger.experiment.some_comet_function()\n\n \"\"\"\n if self._experiment is not None:\n return self._experiment\n\n if self.mode == \"online\":\n if self.experiment_key is None:\n self._experiment = CometExperiment(\n api_key=self.api_key,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n self.experiment_key = self._experiment.get_key()\n else:\n self._experiment = CometExistingExperiment(\n api_key=self.api_key,\n workspace=self.workspace,\n project_name=self.project_name,\n previous_experiment=self.experiment_key,\n **self._kwargs\n )\n else:\n self._experiment = CometOfflineExperiment(\n offline_directory=self.save_dir,\n workspace=self.workspace,\n project_name=self.project_name,\n **self._kwargs\n )\n\n return self._experiment\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:\n params = self._convert_params(params)\n params = self._flatten_dict(params)\n self.experiment.log_parameters(params)\n\n @rank_zero_only\n def log_metrics(\n self,\n metrics: Dict[str, Union[torch.Tensor, float]],\n step: Optional[int] = None\n ) -> None:\n assert rank_zero_only.rank == 0, 'experiment tried to log from global_rank != 0'\n\n # Comet.ml expects metrics to be a dictionary of detached tensors on CPU\n for key, val in metrics.items():\n if is_tensor(val):\n metrics[key] = val.cpu().detach()\n\n self.experiment.log_metrics(metrics, step=step)\n\n def reset_experiment(self):\n self._experiment = None\n\n @rank_zero_only\n def finalize(self, status: str) -> None:\n r\"\"\"\n When calling ``self.experiment.end()``, that experiment won't log any more data to Comet.\n That's why, if you need to log any more data, you need to create an ExistingCometExperiment.\n For example, to log data when testing your model after training, because when training is\n finalized :meth:`CometLogger.finalize` is called.\n\n This happens automatically in the :meth:`~CometLogger.experiment` property, when\n ``self._experiment`` is set to ``None``, i.e. ``self.reset_experiment()``.\n \"\"\"\n self.experiment.end()\n self.reset_experiment()\n\n @property\n def save_dir(self) -> Optional[str]:\n return self._save_dir\n\n @property\n def name(self) -> str:\n return str(self.experiment.project_name)\n\n @property\n def version(self) -> str:\n return self.experiment.id\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"_experiment\"] = None\n return state\n", "path": "pytorch_lightning/loggers/comet.py"}]} | 3,570 | 149 |
gh_patches_debug_37467 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1293 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Parameter model_zoo cannot take a path end with `/`
If the parameter `model_zoo` is a path end with slash, the following exception will show up:
```
[2019-10-10 13:15:47,574] [INFO] [image_builder.py:227:_print_docker_progress]
Traceback (most recent call last):
File "/miniconda2/envs/elasticdl-rc0/bin/elasticdl", line 10, in <module>
sys.exit(main())
File "/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/client.py", line 46, in main
args.func(args)
File "/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/api.py", line 24, in train
docker_tlskey=args.docker_tlskey,
File "/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py", line 74, in build_and_push_docker_image
_build_docker_image(client, ctx_dir, df.name, image_name)
File "/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py", line 241, in _build_docker_image
_print_docker_progress(line)
File "/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py", line 224, in _print_docker_progress
raise RuntimeError("Docker image build: " + error)
RuntimeError: Docker image build: COPY failed: stat /var/lib/docker/tmp/docker-builder531748767/model_zoo: no such file or directory
```
</issue>
<code>
[start of elasticdl/python/elasticdl/api.py]
1 import os
2
3 from elasticdl.python.common import k8s_client as k8s
4 from elasticdl.python.common.args import (
5 build_arguments_from_parsed_result,
6 parse_envs,
7 )
8 from elasticdl.python.common.log_utils import default_logger as logger
9 from elasticdl.python.elasticdl.image_builder import (
10 build_and_push_docker_image,
11 remove_images,
12 )
13
14
15 def train(args):
16 image_name = build_and_push_docker_image(
17 model_zoo=args.model_zoo,
18 base_image=args.image_base,
19 docker_image_repository=args.docker_image_repository,
20 extra_pypi=args.extra_pypi_index,
21 cluster_spec=args.cluster_spec,
22 docker_base_url=args.docker_base_url,
23 docker_tlscert=args.docker_tlscert,
24 docker_tlskey=args.docker_tlskey,
25 )
26
27 container_args = [
28 "-m",
29 "elasticdl.python.master.main",
30 "--worker_image",
31 image_name,
32 "--model_zoo",
33 _model_zoo_in_docker(args.model_zoo),
34 "--cluster_spec",
35 _cluster_spec_def_in_docker(args.cluster_spec),
36 ]
37 container_args.extend(
38 build_arguments_from_parsed_result(
39 args, filter_args=["model_zoo", "cluster_spec", "worker_image"]
40 )
41 )
42
43 _submit_job(image_name, args, container_args)
44 # TODO: print dashboard url after launching the master pod
45
46
47 def evaluate(args):
48 image_name = build_and_push_docker_image(
49 model_zoo=args.model_zoo,
50 base_image=args.image_base,
51 docker_image_repository=args.docker_image_repository,
52 extra_pypi=args.extra_pypi_index,
53 cluster_spec=args.cluster_spec,
54 docker_base_url=args.docker_base_url,
55 docker_tlscert=args.docker_tlscert,
56 docker_tlskey=args.docker_tlskey,
57 )
58 container_args = [
59 "-m",
60 "elasticdl.python.master.main",
61 "--worker_image",
62 image_name,
63 "--model_zoo",
64 _model_zoo_in_docker(args.model_zoo),
65 "--cluster_spec",
66 _cluster_spec_def_in_docker(args.cluster_spec),
67 ]
68 container_args.extend(
69 build_arguments_from_parsed_result(
70 args, filter_args=["model_zoo", "cluster_spec", "worker_image"]
71 )
72 )
73
74 _submit_job(image_name, args, container_args)
75
76
77 def predict(args):
78 image_name = build_and_push_docker_image(
79 model_zoo=args.model_zoo,
80 base_image=args.image_base,
81 docker_image_repository=args.docker_image_repository,
82 extra_pypi=args.extra_pypi_index,
83 cluster_spec=args.cluster_spec,
84 docker_base_url=args.docker_base_url,
85 docker_tlscert=args.docker_tlscert,
86 docker_tlskey=args.docker_tlskey,
87 )
88 container_args = [
89 "-m",
90 "elasticdl.python.master.main",
91 "--worker_image",
92 image_name,
93 "--model_zoo",
94 _model_zoo_in_docker(args.model_zoo),
95 "--cluster_spec",
96 _cluster_spec_def_in_docker(args.cluster_spec),
97 ]
98 container_args.extend(
99 build_arguments_from_parsed_result(
100 args, filter_args=["model_zoo", "cluster_spec", "worker_image"]
101 )
102 )
103
104 _submit_job(image_name, args, container_args)
105
106
107 def clean(args):
108 if args.docker_image_repository and args.all:
109 raise ValueError(
110 "--docker_image_repository and --all cannot "
111 "be specified at the same time"
112 )
113 if not (args.docker_image_repository or args.all):
114 raise ValueError(
115 "Either --docker_image_repository or --all "
116 "needs to be configured"
117 )
118 remove_images(
119 docker_image_repository=args.docker_image_repository,
120 docker_base_url=args.docker_base_url,
121 docker_tlscert=args.docker_tlscert,
122 docker_tlskey=args.docker_tlskey,
123 )
124
125
126 def _submit_job(image_name, client_args, container_args):
127 client = k8s.Client(
128 image_name=image_name,
129 namespace=client_args.namespace,
130 job_name=client_args.job_name,
131 event_callback=None,
132 cluster_spec=client_args.cluster_spec,
133 )
134
135 client.create_master(
136 resource_requests=client_args.master_resource_request,
137 resource_limits=client_args.master_resource_limit,
138 args=container_args,
139 pod_priority=client_args.master_pod_priority,
140 image_pull_policy=client_args.image_pull_policy,
141 restart_policy=client_args.restart_policy,
142 volume=client_args.volume,
143 envs=parse_envs(client_args.envs),
144 )
145 logger.info(
146 "ElasticDL job %s was successfully submitted. The master pod is: %s."
147 % (client_args.job_name, client.get_master_pod_name())
148 )
149
150
151 def _model_zoo_in_docker(model_zoo):
152 MODEL_ROOT_PATH = "/model_zoo"
153 return os.path.join(MODEL_ROOT_PATH, os.path.basename(model_zoo))
154
155
156 def _cluster_spec_def_in_docker(cluster_spec):
157 CLUSTER_SPEC_ROOT_PATH = "/cluster_spec"
158 return (
159 os.path.join(CLUSTER_SPEC_ROOT_PATH, os.path.basename(cluster_spec))
160 if cluster_spec
161 else ""
162 )
163
[end of elasticdl/python/elasticdl/api.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/elasticdl/api.py b/elasticdl/python/elasticdl/api.py
--- a/elasticdl/python/elasticdl/api.py
+++ b/elasticdl/python/elasticdl/api.py
@@ -13,8 +13,10 @@
def train(args):
+ model_zoo = os.path.normpath(args.model_zoo)
+
image_name = build_and_push_docker_image(
- model_zoo=args.model_zoo,
+ model_zoo=model_zoo,
base_image=args.image_base,
docker_image_repository=args.docker_image_repository,
extra_pypi=args.extra_pypi_index,
@@ -30,7 +32,7 @@
"--worker_image",
image_name,
"--model_zoo",
- _model_zoo_in_docker(args.model_zoo),
+ _model_zoo_in_docker(model_zoo),
"--cluster_spec",
_cluster_spec_def_in_docker(args.cluster_spec),
]
@@ -45,8 +47,10 @@
def evaluate(args):
+ model_zoo = os.path.normpath(args.model_zoo)
+
image_name = build_and_push_docker_image(
- model_zoo=args.model_zoo,
+ model_zoo=model_zoo,
base_image=args.image_base,
docker_image_repository=args.docker_image_repository,
extra_pypi=args.extra_pypi_index,
@@ -61,7 +65,7 @@
"--worker_image",
image_name,
"--model_zoo",
- _model_zoo_in_docker(args.model_zoo),
+ _model_zoo_in_docker(model_zoo),
"--cluster_spec",
_cluster_spec_def_in_docker(args.cluster_spec),
]
@@ -75,8 +79,10 @@
def predict(args):
+ model_zoo = os.path.normpath(args.model_zoo)
+
image_name = build_and_push_docker_image(
- model_zoo=args.model_zoo,
+ model_zoo=model_zoo,
base_image=args.image_base,
docker_image_repository=args.docker_image_repository,
extra_pypi=args.extra_pypi_index,
@@ -91,7 +97,7 @@
"--worker_image",
image_name,
"--model_zoo",
- _model_zoo_in_docker(args.model_zoo),
+ _model_zoo_in_docker(model_zoo),
"--cluster_spec",
_cluster_spec_def_in_docker(args.cluster_spec),
]
| {"golden_diff": "diff --git a/elasticdl/python/elasticdl/api.py b/elasticdl/python/elasticdl/api.py\n--- a/elasticdl/python/elasticdl/api.py\n+++ b/elasticdl/python/elasticdl/api.py\n@@ -13,8 +13,10 @@\n \n \n def train(args):\n+ model_zoo = os.path.normpath(args.model_zoo)\n+\n image_name = build_and_push_docker_image(\n- model_zoo=args.model_zoo,\n+ model_zoo=model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n@@ -30,7 +32,7 @@\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n- _model_zoo_in_docker(args.model_zoo),\n+ _model_zoo_in_docker(model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n@@ -45,8 +47,10 @@\n \n \n def evaluate(args):\n+ model_zoo = os.path.normpath(args.model_zoo)\n+\n image_name = build_and_push_docker_image(\n- model_zoo=args.model_zoo,\n+ model_zoo=model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n@@ -61,7 +65,7 @@\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n- _model_zoo_in_docker(args.model_zoo),\n+ _model_zoo_in_docker(model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n@@ -75,8 +79,10 @@\n \n \n def predict(args):\n+ model_zoo = os.path.normpath(args.model_zoo)\n+\n image_name = build_and_push_docker_image(\n- model_zoo=args.model_zoo,\n+ model_zoo=model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n@@ -91,7 +97,7 @@\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n- _model_zoo_in_docker(args.model_zoo),\n+ _model_zoo_in_docker(model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n", "issue": "Parameter model_zoo cannot take a path end with `/`\nIf the parameter `model_zoo` is a path end with slash, the following exception will show up:\r\n```\r\n[2019-10-10 13:15:47,574] [INFO] [image_builder.py:227:_print_docker_progress]\r\n\r\nTraceback (most recent call last):\r\n File \"/miniconda2/envs/elasticdl-rc0/bin/elasticdl\", line 10, in <module>\r\n sys.exit(main())\r\n File \"/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/client.py\", line 46, in main\r\n args.func(args)\r\n File \"/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/api.py\", line 24, in train\r\n docker_tlskey=args.docker_tlskey,\r\n File \"/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py\", line 74, in build_and_push_docker_image\r\n _build_docker_image(client, ctx_dir, df.name, image_name)\r\n File \"/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py\", line 241, in _build_docker_image\r\n _print_docker_progress(line)\r\n File \"/miniconda2/envs/elasticdl-rc0/lib/python3.7/site-packages/elasticdl/python/elasticdl/image_builder.py\", line 224, in _print_docker_progress\r\n raise RuntimeError(\"Docker image build: \" + error)\r\nRuntimeError: Docker image build: COPY failed: stat /var/lib/docker/tmp/docker-builder531748767/model_zoo: no such file or directory\r\n```\n", "before_files": [{"content": "import os\n\nfrom elasticdl.python.common import k8s_client as k8s\nfrom elasticdl.python.common.args import (\n build_arguments_from_parsed_result,\n parse_envs,\n)\nfrom elasticdl.python.common.log_utils import default_logger as logger\nfrom elasticdl.python.elasticdl.image_builder import (\n build_and_push_docker_image,\n remove_images,\n)\n\n\ndef train(args):\n image_name = build_and_push_docker_image(\n model_zoo=args.model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n _model_zoo_in_docker(args.model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n container_args.extend(\n build_arguments_from_parsed_result(\n args, filter_args=[\"model_zoo\", \"cluster_spec\", \"worker_image\"]\n )\n )\n\n _submit_job(image_name, args, container_args)\n # TODO: print dashboard url after launching the master pod\n\n\ndef evaluate(args):\n image_name = build_and_push_docker_image(\n model_zoo=args.model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n _model_zoo_in_docker(args.model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n container_args.extend(\n build_arguments_from_parsed_result(\n args, filter_args=[\"model_zoo\", \"cluster_spec\", \"worker_image\"]\n )\n )\n\n _submit_job(image_name, args, container_args)\n\n\ndef predict(args):\n image_name = build_and_push_docker_image(\n model_zoo=args.model_zoo,\n base_image=args.image_base,\n docker_image_repository=args.docker_image_repository,\n extra_pypi=args.extra_pypi_index,\n cluster_spec=args.cluster_spec,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n container_args = [\n \"-m\",\n \"elasticdl.python.master.main\",\n \"--worker_image\",\n image_name,\n \"--model_zoo\",\n _model_zoo_in_docker(args.model_zoo),\n \"--cluster_spec\",\n _cluster_spec_def_in_docker(args.cluster_spec),\n ]\n container_args.extend(\n build_arguments_from_parsed_result(\n args, filter_args=[\"model_zoo\", \"cluster_spec\", \"worker_image\"]\n )\n )\n\n _submit_job(image_name, args, container_args)\n\n\ndef clean(args):\n if args.docker_image_repository and args.all:\n raise ValueError(\n \"--docker_image_repository and --all cannot \"\n \"be specified at the same time\"\n )\n if not (args.docker_image_repository or args.all):\n raise ValueError(\n \"Either --docker_image_repository or --all \"\n \"needs to be configured\"\n )\n remove_images(\n docker_image_repository=args.docker_image_repository,\n docker_base_url=args.docker_base_url,\n docker_tlscert=args.docker_tlscert,\n docker_tlskey=args.docker_tlskey,\n )\n\n\ndef _submit_job(image_name, client_args, container_args):\n client = k8s.Client(\n image_name=image_name,\n namespace=client_args.namespace,\n job_name=client_args.job_name,\n event_callback=None,\n cluster_spec=client_args.cluster_spec,\n )\n\n client.create_master(\n resource_requests=client_args.master_resource_request,\n resource_limits=client_args.master_resource_limit,\n args=container_args,\n pod_priority=client_args.master_pod_priority,\n image_pull_policy=client_args.image_pull_policy,\n restart_policy=client_args.restart_policy,\n volume=client_args.volume,\n envs=parse_envs(client_args.envs),\n )\n logger.info(\n \"ElasticDL job %s was successfully submitted. The master pod is: %s.\"\n % (client_args.job_name, client.get_master_pod_name())\n )\n\n\ndef _model_zoo_in_docker(model_zoo):\n MODEL_ROOT_PATH = \"/model_zoo\"\n return os.path.join(MODEL_ROOT_PATH, os.path.basename(model_zoo))\n\n\ndef _cluster_spec_def_in_docker(cluster_spec):\n CLUSTER_SPEC_ROOT_PATH = \"/cluster_spec\"\n return (\n os.path.join(CLUSTER_SPEC_ROOT_PATH, os.path.basename(cluster_spec))\n if cluster_spec\n else \"\"\n )\n", "path": "elasticdl/python/elasticdl/api.py"}]} | 2,445 | 537 |
gh_patches_debug_11115 | rasdani/github-patches | git_diff | optuna__optuna-2686 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Waiting for Heartbeat
<!-- Please write a clear and concise description of what the bug is. -->
## Expected behavior
After `study.optimize` with a high heartbeat interval (e.g. 60s) finishes running trials, it should exit almost immediately.
## Environment
- Optuna version: 2.7.0
- Python version: 3.9
- OS: Fedora 33
- (Optional) Other libraries and their versions: PostgreSQL
## Observed Behavior
Instead of exiting immediately, the main thread tries to join with the heartbeat thread, which only completes after the next scheduled hearbeat. This can lead to high delays where the program does basically nothing.
## Steps to reproduce
1. Create a RDB storage with a heartbeat interval of 60s.
2. Create a fast objective, a study and call `study.optimize(n_trials=1)`.
3. Measure runtime.
## Additional context (optional)
It might be better to use the [`wait(timeout=...)`](https://docs.python.org/3/library/threading.html#threading.Event.wait) method [here](https://github.com/optuna/optuna/blob/2fd68d0102a22e76b5a4a142cc87722d92dcf667/optuna/_optimize.py#L340).
</issue>
<code>
[start of optuna/_optimize.py]
1 from concurrent.futures import FIRST_COMPLETED
2 from concurrent.futures import Future
3 from concurrent.futures import ThreadPoolExecutor
4 from concurrent.futures import wait
5 import copy
6 import datetime
7 import gc
8 import itertools
9 import math
10 import os
11 import sys
12 from threading import Event
13 from threading import Thread
14 import time
15 from typing import Any
16 from typing import Callable
17 from typing import cast
18 from typing import List
19 from typing import Optional
20 from typing import Sequence
21 from typing import Set
22 from typing import Tuple
23 from typing import Type
24 from typing import Union
25 import warnings
26
27 import optuna
28 from optuna import exceptions
29 from optuna import logging
30 from optuna import progress_bar as pbar_module
31 from optuna import storages
32 from optuna import trial as trial_module
33 from optuna.trial import FrozenTrial
34 from optuna.trial import TrialState
35
36
37 _logger = logging.get_logger(__name__)
38
39
40 def _optimize(
41 study: "optuna.Study",
42 func: "optuna.study.ObjectiveFuncType",
43 n_trials: Optional[int] = None,
44 timeout: Optional[float] = None,
45 n_jobs: int = 1,
46 catch: Tuple[Type[Exception], ...] = (),
47 callbacks: Optional[List[Callable[["optuna.Study", FrozenTrial], None]]] = None,
48 gc_after_trial: bool = False,
49 show_progress_bar: bool = False,
50 ) -> None:
51 if not isinstance(catch, tuple):
52 raise TypeError(
53 "The catch argument is of type '{}' but must be a tuple.".format(type(catch).__name__)
54 )
55
56 if not study._optimize_lock.acquire(False):
57 raise RuntimeError("Nested invocation of `Study.optimize` method isn't allowed.")
58
59 # TODO(crcrpar): Make progress bar work when n_jobs != 1.
60 progress_bar = pbar_module._ProgressBar(show_progress_bar and n_jobs == 1, n_trials, timeout)
61
62 study._stop_flag = False
63
64 try:
65 if n_jobs == 1:
66 _optimize_sequential(
67 study,
68 func,
69 n_trials,
70 timeout,
71 catch,
72 callbacks,
73 gc_after_trial,
74 reseed_sampler_rng=False,
75 time_start=None,
76 progress_bar=progress_bar,
77 )
78 else:
79 if show_progress_bar:
80 warnings.warn("Progress bar only supports serial execution (`n_jobs=1`).")
81
82 if n_jobs == -1:
83 n_jobs = os.cpu_count() or 1
84
85 time_start = datetime.datetime.now()
86 futures: Set[Future] = set()
87
88 with ThreadPoolExecutor(max_workers=n_jobs) as executor:
89 for n_submitted_trials in itertools.count():
90 if study._stop_flag:
91 break
92
93 if (
94 timeout is not None
95 and (datetime.datetime.now() - time_start).total_seconds() > timeout
96 ):
97 break
98
99 if n_trials is not None and n_submitted_trials >= n_trials:
100 break
101
102 if len(futures) >= n_jobs:
103 completed, futures = wait(futures, return_when=FIRST_COMPLETED)
104 # Raise if exception occurred in executing the completed futures.
105 for f in completed:
106 f.result()
107
108 futures.add(
109 executor.submit(
110 _optimize_sequential,
111 study,
112 func,
113 1,
114 timeout,
115 catch,
116 callbacks,
117 gc_after_trial,
118 True,
119 time_start,
120 None,
121 )
122 )
123 finally:
124 study._optimize_lock.release()
125 progress_bar.close()
126
127
128 def _optimize_sequential(
129 study: "optuna.Study",
130 func: "optuna.study.ObjectiveFuncType",
131 n_trials: Optional[int],
132 timeout: Optional[float],
133 catch: Tuple[Type[Exception], ...],
134 callbacks: Optional[List[Callable[["optuna.Study", FrozenTrial], None]]],
135 gc_after_trial: bool,
136 reseed_sampler_rng: bool,
137 time_start: Optional[datetime.datetime],
138 progress_bar: Optional[pbar_module._ProgressBar],
139 ) -> None:
140 if reseed_sampler_rng:
141 study.sampler.reseed_rng()
142
143 i_trial = 0
144
145 if time_start is None:
146 time_start = datetime.datetime.now()
147
148 while True:
149 if study._stop_flag:
150 break
151
152 if n_trials is not None:
153 if i_trial >= n_trials:
154 break
155 i_trial += 1
156
157 if timeout is not None:
158 elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds()
159 if elapsed_seconds >= timeout:
160 break
161
162 try:
163 trial = _run_trial(study, func, catch)
164 except Exception:
165 raise
166 finally:
167 # The following line mitigates memory problems that can be occurred in some
168 # environments (e.g., services that use computing containers such as CircleCI).
169 # Please refer to the following PR for further details:
170 # https://github.com/optuna/optuna/pull/325.
171 if gc_after_trial:
172 gc.collect()
173
174 if callbacks is not None:
175 frozen_trial = copy.deepcopy(study._storage.get_trial(trial._trial_id))
176 for callback in callbacks:
177 callback(study, frozen_trial)
178
179 if progress_bar is not None:
180 progress_bar.update((datetime.datetime.now() - time_start).total_seconds())
181
182 study._storage.remove_session()
183
184
185 def _run_trial(
186 study: "optuna.Study",
187 func: "optuna.study.ObjectiveFuncType",
188 catch: Tuple[Type[Exception], ...],
189 ) -> trial_module.Trial:
190 if study._storage.is_heartbeat_enabled():
191 failed_trial_ids = study._storage.fail_stale_trials(study._study_id)
192 failed_trial_callback = study._storage.get_failed_trial_callback()
193 if failed_trial_callback is not None:
194 for trial_id in failed_trial_ids:
195 failed_trial = copy.deepcopy(study._storage.get_trial(trial_id))
196 failed_trial_callback(study, failed_trial)
197
198 trial = study.ask()
199
200 state: Optional[TrialState] = None
201 values: Optional[List[float]] = None
202 func_err: Optional[Exception] = None
203 func_err_fail_exc_info: Optional[Any] = None
204 # Set to a string if `func` returns correctly but the return value violates assumptions.
205 values_conversion_failure_message: Optional[str] = None
206 stop_event: Optional[Event] = None
207 thread: Optional[Thread] = None
208
209 if study._storage.is_heartbeat_enabled():
210 stop_event = Event()
211 thread = Thread(
212 target=_record_heartbeat, args=(trial._trial_id, study._storage, stop_event)
213 )
214 thread.start()
215
216 try:
217 value_or_values = func(trial)
218 except exceptions.TrialPruned as e:
219 # TODO(mamu): Handle multi-objective cases.
220 state = TrialState.PRUNED
221 func_err = e
222 except Exception as e:
223 state = TrialState.FAIL
224 func_err = e
225 func_err_fail_exc_info = sys.exc_info()
226 else:
227 # TODO(hvy): Avoid checking the values both here and inside `Study.tell`.
228 values, values_conversion_failure_message = _check_and_convert_to_values(
229 len(study.directions), value_or_values, trial.number
230 )
231 if values_conversion_failure_message is not None:
232 state = TrialState.FAIL
233 else:
234 state = TrialState.COMPLETE
235
236 if study._storage.is_heartbeat_enabled():
237 assert stop_event is not None
238 assert thread is not None
239 stop_event.set()
240 thread.join()
241
242 # `Study.tell` may raise during trial post-processing.
243 try:
244 study.tell(trial, values=values, state=state)
245 except Exception:
246 raise
247 finally:
248 if state == TrialState.COMPLETE:
249 study._log_completed_trial(trial, cast(List[float], values))
250 elif state == TrialState.PRUNED:
251 _logger.info("Trial {} pruned. {}".format(trial.number, str(func_err)))
252 elif state == TrialState.FAIL:
253 if func_err is not None:
254 _logger.warning(
255 "Trial {} failed because of the following error: {}".format(
256 trial.number, repr(func_err)
257 ),
258 exc_info=func_err_fail_exc_info,
259 )
260 elif values_conversion_failure_message is not None:
261 _logger.warning(values_conversion_failure_message)
262 else:
263 assert False, "Should not reach."
264 else:
265 assert False, "Should not reach."
266
267 if state == TrialState.FAIL and func_err is not None and not isinstance(func_err, catch):
268 raise func_err
269 return trial
270
271
272 def _check_and_convert_to_values(
273 n_objectives: int, original_value: Union[float, Sequence[float]], trial_number: int
274 ) -> Tuple[Optional[List[float]], Optional[str]]:
275 if isinstance(original_value, Sequence):
276 if n_objectives != len(original_value):
277 return (
278 None,
279 (
280 f"Trial {trial_number} failed, because the number of the values "
281 f"{len(original_value)} did not match the number of the objectives "
282 f"{n_objectives}."
283 ),
284 )
285 else:
286 _original_values = list(original_value)
287 else:
288 _original_values = [original_value]
289
290 _checked_values = []
291 for v in _original_values:
292 checked_v, failure_message = _check_single_value(v, trial_number)
293 if failure_message is not None:
294 # TODO(Imamura): Construct error message taking into account all values and do not
295 # early return
296 # `value` is assumed to be ignored on failure so we can set it to any value.
297 return None, failure_message
298 elif isinstance(checked_v, float):
299 _checked_values.append(checked_v)
300 else:
301 assert False
302
303 return _checked_values, None
304
305
306 def _check_single_value(
307 original_value: float, trial_number: int
308 ) -> Tuple[Optional[float], Optional[str]]:
309 value = None
310 failure_message = None
311
312 try:
313 value = float(original_value)
314 except (
315 ValueError,
316 TypeError,
317 ):
318 failure_message = (
319 f"Trial {trial_number} failed, because the value {repr(original_value)} could not be "
320 "cast to float."
321 )
322
323 if value is not None and math.isnan(value):
324 value = None
325 failure_message = (
326 f"Trial {trial_number} failed, because the objective function returned "
327 f"{original_value}."
328 )
329
330 return value, failure_message
331
332
333 def _record_heartbeat(trial_id: int, storage: storages.BaseStorage, stop_event: Event) -> None:
334 heartbeat_interval = storage.get_heartbeat_interval()
335 assert heartbeat_interval is not None
336 while True:
337 storage.record_heartbeat(trial_id)
338 if stop_event.is_set():
339 return
340 time.sleep(heartbeat_interval)
341
[end of optuna/_optimize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/optuna/_optimize.py b/optuna/_optimize.py
--- a/optuna/_optimize.py
+++ b/optuna/_optimize.py
@@ -11,7 +11,6 @@
import sys
from threading import Event
from threading import Thread
-import time
from typing import Any
from typing import Callable
from typing import cast
@@ -335,6 +334,5 @@
assert heartbeat_interval is not None
while True:
storage.record_heartbeat(trial_id)
- if stop_event.is_set():
+ if stop_event.wait(timeout=heartbeat_interval):
return
- time.sleep(heartbeat_interval)
| {"golden_diff": "diff --git a/optuna/_optimize.py b/optuna/_optimize.py\n--- a/optuna/_optimize.py\n+++ b/optuna/_optimize.py\n@@ -11,7 +11,6 @@\n import sys\n from threading import Event\n from threading import Thread\n-import time\n from typing import Any\n from typing import Callable\n from typing import cast\n@@ -335,6 +334,5 @@\n assert heartbeat_interval is not None\n while True:\n storage.record_heartbeat(trial_id)\n- if stop_event.is_set():\n+ if stop_event.wait(timeout=heartbeat_interval):\n return\n- time.sleep(heartbeat_interval)\n", "issue": "Waiting for Heartbeat\n<!-- Please write a clear and concise description of what the bug is. -->\r\n\r\n## Expected behavior\r\n\r\nAfter `study.optimize` with a high heartbeat interval (e.g. 60s) finishes running trials, it should exit almost immediately. \r\n\r\n## Environment\r\n\r\n- Optuna version: 2.7.0\r\n- Python version: 3.9\r\n- OS: Fedora 33\r\n- (Optional) Other libraries and their versions: PostgreSQL\r\n\r\n## Observed Behavior\r\nInstead of exiting immediately, the main thread tries to join with the heartbeat thread, which only completes after the next scheduled hearbeat. This can lead to high delays where the program does basically nothing.\r\n\r\n## Steps to reproduce\r\n\r\n1. Create a RDB storage with a heartbeat interval of 60s.\r\n2. Create a fast objective, a study and call `study.optimize(n_trials=1)`.\r\n3. Measure runtime.\r\n\r\n## Additional context (optional)\r\nIt might be better to use the [`wait(timeout=...)`](https://docs.python.org/3/library/threading.html#threading.Event.wait) method [here](https://github.com/optuna/optuna/blob/2fd68d0102a22e76b5a4a142cc87722d92dcf667/optuna/_optimize.py#L340).\r\n\r\n\n", "before_files": [{"content": "from concurrent.futures import FIRST_COMPLETED\nfrom concurrent.futures import Future\nfrom concurrent.futures import ThreadPoolExecutor\nfrom concurrent.futures import wait\nimport copy\nimport datetime\nimport gc\nimport itertools\nimport math\nimport os\nimport sys\nfrom threading import Event\nfrom threading import Thread\nimport time\nfrom typing import Any\nfrom typing import Callable\nfrom typing import cast\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Set\nfrom typing import Tuple\nfrom typing import Type\nfrom typing import Union\nimport warnings\n\nimport optuna\nfrom optuna import exceptions\nfrom optuna import logging\nfrom optuna import progress_bar as pbar_module\nfrom optuna import storages\nfrom optuna import trial as trial_module\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\n_logger = logging.get_logger(__name__)\n\n\ndef _optimize(\n study: \"optuna.Study\",\n func: \"optuna.study.ObjectiveFuncType\",\n n_trials: Optional[int] = None,\n timeout: Optional[float] = None,\n n_jobs: int = 1,\n catch: Tuple[Type[Exception], ...] = (),\n callbacks: Optional[List[Callable[[\"optuna.Study\", FrozenTrial], None]]] = None,\n gc_after_trial: bool = False,\n show_progress_bar: bool = False,\n) -> None:\n if not isinstance(catch, tuple):\n raise TypeError(\n \"The catch argument is of type '{}' but must be a tuple.\".format(type(catch).__name__)\n )\n\n if not study._optimize_lock.acquire(False):\n raise RuntimeError(\"Nested invocation of `Study.optimize` method isn't allowed.\")\n\n # TODO(crcrpar): Make progress bar work when n_jobs != 1.\n progress_bar = pbar_module._ProgressBar(show_progress_bar and n_jobs == 1, n_trials, timeout)\n\n study._stop_flag = False\n\n try:\n if n_jobs == 1:\n _optimize_sequential(\n study,\n func,\n n_trials,\n timeout,\n catch,\n callbacks,\n gc_after_trial,\n reseed_sampler_rng=False,\n time_start=None,\n progress_bar=progress_bar,\n )\n else:\n if show_progress_bar:\n warnings.warn(\"Progress bar only supports serial execution (`n_jobs=1`).\")\n\n if n_jobs == -1:\n n_jobs = os.cpu_count() or 1\n\n time_start = datetime.datetime.now()\n futures: Set[Future] = set()\n\n with ThreadPoolExecutor(max_workers=n_jobs) as executor:\n for n_submitted_trials in itertools.count():\n if study._stop_flag:\n break\n\n if (\n timeout is not None\n and (datetime.datetime.now() - time_start).total_seconds() > timeout\n ):\n break\n\n if n_trials is not None and n_submitted_trials >= n_trials:\n break\n\n if len(futures) >= n_jobs:\n completed, futures = wait(futures, return_when=FIRST_COMPLETED)\n # Raise if exception occurred in executing the completed futures.\n for f in completed:\n f.result()\n\n futures.add(\n executor.submit(\n _optimize_sequential,\n study,\n func,\n 1,\n timeout,\n catch,\n callbacks,\n gc_after_trial,\n True,\n time_start,\n None,\n )\n )\n finally:\n study._optimize_lock.release()\n progress_bar.close()\n\n\ndef _optimize_sequential(\n study: \"optuna.Study\",\n func: \"optuna.study.ObjectiveFuncType\",\n n_trials: Optional[int],\n timeout: Optional[float],\n catch: Tuple[Type[Exception], ...],\n callbacks: Optional[List[Callable[[\"optuna.Study\", FrozenTrial], None]]],\n gc_after_trial: bool,\n reseed_sampler_rng: bool,\n time_start: Optional[datetime.datetime],\n progress_bar: Optional[pbar_module._ProgressBar],\n) -> None:\n if reseed_sampler_rng:\n study.sampler.reseed_rng()\n\n i_trial = 0\n\n if time_start is None:\n time_start = datetime.datetime.now()\n\n while True:\n if study._stop_flag:\n break\n\n if n_trials is not None:\n if i_trial >= n_trials:\n break\n i_trial += 1\n\n if timeout is not None:\n elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds()\n if elapsed_seconds >= timeout:\n break\n\n try:\n trial = _run_trial(study, func, catch)\n except Exception:\n raise\n finally:\n # The following line mitigates memory problems that can be occurred in some\n # environments (e.g., services that use computing containers such as CircleCI).\n # Please refer to the following PR for further details:\n # https://github.com/optuna/optuna/pull/325.\n if gc_after_trial:\n gc.collect()\n\n if callbacks is not None:\n frozen_trial = copy.deepcopy(study._storage.get_trial(trial._trial_id))\n for callback in callbacks:\n callback(study, frozen_trial)\n\n if progress_bar is not None:\n progress_bar.update((datetime.datetime.now() - time_start).total_seconds())\n\n study._storage.remove_session()\n\n\ndef _run_trial(\n study: \"optuna.Study\",\n func: \"optuna.study.ObjectiveFuncType\",\n catch: Tuple[Type[Exception], ...],\n) -> trial_module.Trial:\n if study._storage.is_heartbeat_enabled():\n failed_trial_ids = study._storage.fail_stale_trials(study._study_id)\n failed_trial_callback = study._storage.get_failed_trial_callback()\n if failed_trial_callback is not None:\n for trial_id in failed_trial_ids:\n failed_trial = copy.deepcopy(study._storage.get_trial(trial_id))\n failed_trial_callback(study, failed_trial)\n\n trial = study.ask()\n\n state: Optional[TrialState] = None\n values: Optional[List[float]] = None\n func_err: Optional[Exception] = None\n func_err_fail_exc_info: Optional[Any] = None\n # Set to a string if `func` returns correctly but the return value violates assumptions.\n values_conversion_failure_message: Optional[str] = None\n stop_event: Optional[Event] = None\n thread: Optional[Thread] = None\n\n if study._storage.is_heartbeat_enabled():\n stop_event = Event()\n thread = Thread(\n target=_record_heartbeat, args=(trial._trial_id, study._storage, stop_event)\n )\n thread.start()\n\n try:\n value_or_values = func(trial)\n except exceptions.TrialPruned as e:\n # TODO(mamu): Handle multi-objective cases.\n state = TrialState.PRUNED\n func_err = e\n except Exception as e:\n state = TrialState.FAIL\n func_err = e\n func_err_fail_exc_info = sys.exc_info()\n else:\n # TODO(hvy): Avoid checking the values both here and inside `Study.tell`.\n values, values_conversion_failure_message = _check_and_convert_to_values(\n len(study.directions), value_or_values, trial.number\n )\n if values_conversion_failure_message is not None:\n state = TrialState.FAIL\n else:\n state = TrialState.COMPLETE\n\n if study._storage.is_heartbeat_enabled():\n assert stop_event is not None\n assert thread is not None\n stop_event.set()\n thread.join()\n\n # `Study.tell` may raise during trial post-processing.\n try:\n study.tell(trial, values=values, state=state)\n except Exception:\n raise\n finally:\n if state == TrialState.COMPLETE:\n study._log_completed_trial(trial, cast(List[float], values))\n elif state == TrialState.PRUNED:\n _logger.info(\"Trial {} pruned. {}\".format(trial.number, str(func_err)))\n elif state == TrialState.FAIL:\n if func_err is not None:\n _logger.warning(\n \"Trial {} failed because of the following error: {}\".format(\n trial.number, repr(func_err)\n ),\n exc_info=func_err_fail_exc_info,\n )\n elif values_conversion_failure_message is not None:\n _logger.warning(values_conversion_failure_message)\n else:\n assert False, \"Should not reach.\"\n else:\n assert False, \"Should not reach.\"\n\n if state == TrialState.FAIL and func_err is not None and not isinstance(func_err, catch):\n raise func_err\n return trial\n\n\ndef _check_and_convert_to_values(\n n_objectives: int, original_value: Union[float, Sequence[float]], trial_number: int\n) -> Tuple[Optional[List[float]], Optional[str]]:\n if isinstance(original_value, Sequence):\n if n_objectives != len(original_value):\n return (\n None,\n (\n f\"Trial {trial_number} failed, because the number of the values \"\n f\"{len(original_value)} did not match the number of the objectives \"\n f\"{n_objectives}.\"\n ),\n )\n else:\n _original_values = list(original_value)\n else:\n _original_values = [original_value]\n\n _checked_values = []\n for v in _original_values:\n checked_v, failure_message = _check_single_value(v, trial_number)\n if failure_message is not None:\n # TODO(Imamura): Construct error message taking into account all values and do not\n # early return\n # `value` is assumed to be ignored on failure so we can set it to any value.\n return None, failure_message\n elif isinstance(checked_v, float):\n _checked_values.append(checked_v)\n else:\n assert False\n\n return _checked_values, None\n\n\ndef _check_single_value(\n original_value: float, trial_number: int\n) -> Tuple[Optional[float], Optional[str]]:\n value = None\n failure_message = None\n\n try:\n value = float(original_value)\n except (\n ValueError,\n TypeError,\n ):\n failure_message = (\n f\"Trial {trial_number} failed, because the value {repr(original_value)} could not be \"\n \"cast to float.\"\n )\n\n if value is not None and math.isnan(value):\n value = None\n failure_message = (\n f\"Trial {trial_number} failed, because the objective function returned \"\n f\"{original_value}.\"\n )\n\n return value, failure_message\n\n\ndef _record_heartbeat(trial_id: int, storage: storages.BaseStorage, stop_event: Event) -> None:\n heartbeat_interval = storage.get_heartbeat_interval()\n assert heartbeat_interval is not None\n while True:\n storage.record_heartbeat(trial_id)\n if stop_event.is_set():\n return\n time.sleep(heartbeat_interval)\n", "path": "optuna/_optimize.py"}]} | 4,085 | 140 |
gh_patches_debug_31575 | rasdani/github-patches | git_diff | python-discord__bot-475 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tags can't be edited due to using the POST method with the API
Currently, the `!tag edit` subcommand is just an alias of `!tag set`. This means that if we try to edit an existing tag, the bot will use the POST http method to communicate with the API. Since we're not posting a new tag, but editing an existing entry, the API will reject this request.
Instead of using POST, we should be using PATCH, since we're only partially updating the entry in the database.
</issue>
<code>
[start of bot/cogs/tags.py]
1 import logging
2 import time
3
4 from discord import Colour, Embed
5 from discord.ext.commands import Bot, Cog, Context, group
6
7 from bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles
8 from bot.converters import TagContentConverter, TagNameConverter
9 from bot.decorators import with_role
10 from bot.pagination import LinePaginator
11
12
13 log = logging.getLogger(__name__)
14
15 TEST_CHANNELS = (
16 Channels.devtest,
17 Channels.bot,
18 Channels.helpers
19 )
20
21
22 class Tags(Cog):
23 """Save new tags and fetch existing tags."""
24
25 def __init__(self, bot: Bot):
26 self.bot = bot
27 self.tag_cooldowns = {}
28
29 @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)
30 async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
31 """Show all known tags, a single tag, or run a subcommand."""
32 await ctx.invoke(self.get_command, tag_name=tag_name)
33
34 @tags_group.command(name='get', aliases=('show', 'g'))
35 async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
36 """Get a specified tag, or a list of all tags if no tag is specified."""
37 def _command_on_cooldown(tag_name: str) -> bool:
38 """
39 Check if the command is currently on cooldown, on a per-tag, per-channel basis.
40
41 The cooldown duration is set in constants.py.
42 """
43 now = time.time()
44
45 cooldown_conditions = (
46 tag_name
47 and tag_name in self.tag_cooldowns
48 and (now - self.tag_cooldowns[tag_name]["time"]) < Cooldowns.tags
49 and self.tag_cooldowns[tag_name]["channel"] == ctx.channel.id
50 )
51
52 if cooldown_conditions:
53 return True
54 return False
55
56 if _command_on_cooldown(tag_name):
57 time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name]["time"])
58 log.warning(f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
59 f"Cooldown ends in {time_left:.1f} seconds.")
60 return
61
62 if tag_name is not None:
63 tag = await self.bot.api_client.get(f'bot/tags/{tag_name}')
64 if ctx.channel.id not in TEST_CHANNELS:
65 self.tag_cooldowns[tag_name] = {
66 "time": time.time(),
67 "channel": ctx.channel.id
68 }
69 await ctx.send(embed=Embed.from_dict(tag['embed']))
70
71 else:
72 tags = await self.bot.api_client.get('bot/tags')
73 if not tags:
74 await ctx.send(embed=Embed(
75 description="**There are no tags in the database!**",
76 colour=Colour.red()
77 ))
78 else:
79 embed: Embed = Embed(title="**Current tags**")
80 await LinePaginator.paginate(
81 sorted(f"**»** {tag['title']}" for tag in tags),
82 ctx,
83 embed,
84 footer_text="To show a tag, type !tags <tagname>.",
85 empty=False,
86 max_lines=15
87 )
88
89 @tags_group.command(name='set', aliases=('add', 'edit', 's'))
90 @with_role(*MODERATION_ROLES)
91 async def set_command(
92 self,
93 ctx: Context,
94 tag_name: TagNameConverter,
95 *,
96 tag_content: TagContentConverter,
97 ) -> None:
98 """Create a new tag or update an existing one."""
99 body = {
100 'title': tag_name.lower().strip(),
101 'embed': {
102 'title': tag_name,
103 'description': tag_content
104 }
105 }
106
107 await self.bot.api_client.post('bot/tags', json=body)
108
109 log.debug(f"{ctx.author} successfully added the following tag to our database: \n"
110 f"tag_name: {tag_name}\n"
111 f"tag_content: '{tag_content}'\n")
112
113 await ctx.send(embed=Embed(
114 title="Tag successfully added",
115 description=f"**{tag_name}** added to tag database.",
116 colour=Colour.blurple()
117 ))
118
119 @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))
120 @with_role(Roles.admin, Roles.owner)
121 async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:
122 """Remove a tag from the database."""
123 await self.bot.api_client.delete(f'bot/tags/{tag_name}')
124
125 log.debug(f"{ctx.author} successfully deleted the tag called '{tag_name}'")
126 await ctx.send(embed=Embed(
127 title=tag_name,
128 description=f"Tag successfully removed: {tag_name}.",
129 colour=Colour.blurple()
130 ))
131
132
133 def setup(bot: Bot) -> None:
134 """Tags cog load."""
135 bot.add_cog(Tags(bot))
136 log.info("Cog loaded: Tags")
137
[end of bot/cogs/tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py
--- a/bot/cogs/tags.py
+++ b/bot/cogs/tags.py
@@ -86,7 +86,7 @@
max_lines=15
)
- @tags_group.command(name='set', aliases=('add', 'edit', 's'))
+ @tags_group.command(name='set', aliases=('add', 's'))
@with_role(*MODERATION_ROLES)
async def set_command(
self,
@@ -95,7 +95,7 @@
*,
tag_content: TagContentConverter,
) -> None:
- """Create a new tag or update an existing one."""
+ """Create a new tag."""
body = {
'title': tag_name.lower().strip(),
'embed': {
@@ -116,6 +116,35 @@
colour=Colour.blurple()
))
+ @tags_group.command(name='edit', aliases=('e', ))
+ @with_role(*MODERATION_ROLES)
+ async def edit_command(
+ self,
+ ctx: Context,
+ tag_name: TagNameConverter,
+ *,
+ tag_content: TagContentConverter,
+ ) -> None:
+ """Edit an existing tag."""
+ body = {
+ 'embed': {
+ 'title': tag_name,
+ 'description': tag_content
+ }
+ }
+
+ await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)
+
+ log.debug(f"{ctx.author} successfully edited the following tag in our database: \n"
+ f"tag_name: {tag_name}\n"
+ f"tag_content: '{tag_content}'\n")
+
+ await ctx.send(embed=Embed(
+ title="Tag successfully edited",
+ description=f"**{tag_name}** edited in the database.",
+ colour=Colour.blurple()
+ ))
+
@tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))
@with_role(Roles.admin, Roles.owner)
async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:
| {"golden_diff": "diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py\n--- a/bot/cogs/tags.py\n+++ b/bot/cogs/tags.py\n@@ -86,7 +86,7 @@\n max_lines=15\n )\n \n- @tags_group.command(name='set', aliases=('add', 'edit', 's'))\n+ @tags_group.command(name='set', aliases=('add', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n@@ -95,7 +95,7 @@\n *,\n tag_content: TagContentConverter,\n ) -> None:\n- \"\"\"Create a new tag or update an existing one.\"\"\"\n+ \"\"\"Create a new tag.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n@@ -116,6 +116,35 @@\n colour=Colour.blurple()\n ))\n \n+ @tags_group.command(name='edit', aliases=('e', ))\n+ @with_role(*MODERATION_ROLES)\n+ async def edit_command(\n+ self,\n+ ctx: Context,\n+ tag_name: TagNameConverter,\n+ *,\n+ tag_content: TagContentConverter,\n+ ) -> None:\n+ \"\"\"Edit an existing tag.\"\"\"\n+ body = {\n+ 'embed': {\n+ 'title': tag_name,\n+ 'description': tag_content\n+ }\n+ }\n+\n+ await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)\n+\n+ log.debug(f\"{ctx.author} successfully edited the following tag in our database: \\n\"\n+ f\"tag_name: {tag_name}\\n\"\n+ f\"tag_content: '{tag_content}'\\n\")\n+\n+ await ctx.send(embed=Embed(\n+ title=\"Tag successfully edited\",\n+ description=f\"**{tag_name}** edited in the database.\",\n+ colour=Colour.blurple()\n+ ))\n+\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admin, Roles.owner)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n", "issue": "Tags can't be edited due to using the POST method with the API\nCurrently, the `!tag edit` subcommand is just an alias of `!tag set`. This means that if we try to edit an existing tag, the bot will use the POST http method to communicate with the API. Since we're not posting a new tag, but editing an existing entry, the API will reject this request. \r\n\r\nInstead of using POST, we should be using PATCH, since we're only partially updating the entry in the database.\n", "before_files": [{"content": "import logging\nimport time\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Bot, Cog, Context, group\n\nfrom bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles\nfrom bot.converters import TagContentConverter, TagNameConverter\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\n\nlog = logging.getLogger(__name__)\n\nTEST_CHANNELS = (\n Channels.devtest,\n Channels.bot,\n Channels.helpers\n)\n\n\nclass Tags(Cog):\n \"\"\"Save new tags and fetch existing tags.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.tag_cooldowns = {}\n\n @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)\n async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Show all known tags, a single tag, or run a subcommand.\"\"\"\n await ctx.invoke(self.get_command, tag_name=tag_name)\n\n @tags_group.command(name='get', aliases=('show', 'g'))\n async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Get a specified tag, or a list of all tags if no tag is specified.\"\"\"\n def _command_on_cooldown(tag_name: str) -> bool:\n \"\"\"\n Check if the command is currently on cooldown, on a per-tag, per-channel basis.\n\n The cooldown duration is set in constants.py.\n \"\"\"\n now = time.time()\n\n cooldown_conditions = (\n tag_name\n and tag_name in self.tag_cooldowns\n and (now - self.tag_cooldowns[tag_name][\"time\"]) < Cooldowns.tags\n and self.tag_cooldowns[tag_name][\"channel\"] == ctx.channel.id\n )\n\n if cooldown_conditions:\n return True\n return False\n\n if _command_on_cooldown(tag_name):\n time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name][\"time\"])\n log.warning(f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n f\"Cooldown ends in {time_left:.1f} seconds.\")\n return\n\n if tag_name is not None:\n tag = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n if ctx.channel.id not in TEST_CHANNELS:\n self.tag_cooldowns[tag_name] = {\n \"time\": time.time(),\n \"channel\": ctx.channel.id\n }\n await ctx.send(embed=Embed.from_dict(tag['embed']))\n\n else:\n tags = await self.bot.api_client.get('bot/tags')\n if not tags:\n await ctx.send(embed=Embed(\n description=\"**There are no tags in the database!**\",\n colour=Colour.red()\n ))\n else:\n embed: Embed = Embed(title=\"**Current tags**\")\n await LinePaginator.paginate(\n sorted(f\"**\u00bb** {tag['title']}\" for tag in tags),\n ctx,\n embed,\n footer_text=\"To show a tag, type !tags <tagname>.\",\n empty=False,\n max_lines=15\n )\n\n @tags_group.command(name='set', aliases=('add', 'edit', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Create a new tag or update an existing one.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.post('bot/tags', json=body)\n\n log.debug(f\"{ctx.author} successfully added the following tag to our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully added\",\n description=f\"**{tag_name}** added to tag database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admin, Roles.owner)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n \"\"\"Remove a tag from the database.\"\"\"\n await self.bot.api_client.delete(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully deleted the tag called '{tag_name}'\")\n await ctx.send(embed=Embed(\n title=tag_name,\n description=f\"Tag successfully removed: {tag_name}.\",\n colour=Colour.blurple()\n ))\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Tags cog load.\"\"\"\n bot.add_cog(Tags(bot))\n log.info(\"Cog loaded: Tags\")\n", "path": "bot/cogs/tags.py"}]} | 2,026 | 486 |
gh_patches_debug_9387 | rasdani/github-patches | git_diff | flairNLP__flair-1679 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
is corpus object reusable across ModelTrainer instances ?
I have three checkpoint files generated from a training run that uses PooledFlair embedding. Say chk10.pt, chk20.pt, chk30.pt.
I finalize using the following code in a for loop to get the F1 predictions out:
trainer: ModelTrainer = ModelTrainer.load_checkpoint(chkfile, corpus)
trainer.train('.', checkpoint = False, train_with_dev=True, max_epochs=epochs)
I set the epochs to the value at which this checkpoint got generated. So 10, 20, 30 etc. So typically it goes straight to creating the final model and emitting the predictions.
This works perfectly fine for the first time in the loop, after which the predictions are quite wrong. Now instead of doing it in the loop, if i simply do just once by restarting the process i get the values i expect. This behavior happens only with PooledFlairEmbedding. Same program runs just fine with ElmoEmbedding, BertEmbedding.
So my question is why is this the case ? Is it because i create the corpus object outside the for loop and keep reusing it across different ModelTrainer instances ?
It happens quite regularly for me. If needed i can make a small program and share.
</issue>
<code>
[start of flair/nn.py]
1 import warnings
2 from pathlib import Path
3
4 import torch.nn
5
6 from abc import abstractmethod
7
8 from typing import Union, List
9
10 from torch.utils.data.dataset import Dataset
11
12 import flair
13 from flair import file_utils
14 from flair.data import DataPoint, Sentence
15 from flair.datasets import DataLoader
16 from flair.training_utils import Result
17
18
19 class Model(torch.nn.Module):
20 """Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.
21 Every new type of model must implement these methods."""
22
23 @abstractmethod
24 def forward_loss(
25 self, data_points: Union[List[DataPoint], DataPoint]
26 ) -> torch.tensor:
27 """Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training."""
28 pass
29
30 @abstractmethod
31 def evaluate(
32 self,
33 sentences: Union[List[DataPoint], Dataset],
34 out_path: Path = None,
35 embedding_storage_mode: str = "none",
36 ) -> (Result, float):
37 """Evaluates the model. Returns a Result object containing evaluation
38 results and a loss value. Implement this to enable evaluation.
39 :param data_loader: DataLoader that iterates over dataset to be evaluated
40 :param out_path: Optional output path to store predictions
41 :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and
42 freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU
43 :return: Returns a Tuple consisting of a Result object and a loss float value
44 """
45 pass
46
47 @abstractmethod
48 def _get_state_dict(self):
49 """Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()
50 functionality."""
51 pass
52
53 @staticmethod
54 @abstractmethod
55 def _init_model_with_state_dict(state):
56 """Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()
57 functionality."""
58 pass
59
60 @staticmethod
61 @abstractmethod
62 def _fetch_model(model_name) -> str:
63 return model_name
64
65 def save(self, model_file: Union[str, Path]):
66 """
67 Saves the current model to the provided file.
68 :param model_file: the model file
69 """
70 model_state = self._get_state_dict()
71
72 torch.save(model_state, str(model_file), pickle_protocol=4)
73
74 @classmethod
75 def load(cls, model: Union[str, Path]):
76 """
77 Loads the model from the given file.
78 :param model: the model file
79 :return: the loaded text classifier model
80 """
81 model_file = cls._fetch_model(str(model))
82
83 with warnings.catch_warnings():
84 warnings.filterwarnings("ignore")
85 # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
86 # see https://github.com/zalandoresearch/flair/issues/351
87 f = file_utils.load_big_file(str(model_file))
88 state = torch.load(f, map_location=flair.device)
89
90 model = cls._init_model_with_state_dict(state)
91
92 model.eval()
93 model.to(flair.device)
94
95 return model
96
97
98 class LockedDropout(torch.nn.Module):
99 """
100 Implementation of locked (or variational) dropout. Randomly drops out entire parameters in embedding space.
101 """
102
103 def __init__(self, dropout_rate=0.5, batch_first=True, inplace=False):
104 super(LockedDropout, self).__init__()
105 self.dropout_rate = dropout_rate
106 self.batch_first = batch_first
107 self.inplace = inplace
108
109 def forward(self, x):
110 if not self.training or not self.dropout_rate:
111 return x
112
113 if not self.batch_first:
114 m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout_rate)
115 else:
116 m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout_rate)
117
118 mask = torch.autograd.Variable(m, requires_grad=False) / (1 - self.dropout_rate)
119 mask = mask.expand_as(x)
120 return mask * x
121
122 def extra_repr(self):
123 inplace_str = ", inplace" if self.inplace else ""
124 return "p={}{}".format(self.dropout_rate, inplace_str)
125
126
127 class WordDropout(torch.nn.Module):
128 """
129 Implementation of word dropout. Randomly drops out entire words (or characters) in embedding space.
130 """
131
132 def __init__(self, dropout_rate=0.05, inplace=False):
133 super(WordDropout, self).__init__()
134 self.dropout_rate = dropout_rate
135 self.inplace = inplace
136
137 def forward(self, x):
138 if not self.training or not self.dropout_rate:
139 return x
140
141 m = x.data.new(x.size(0), x.size(1), 1).bernoulli_(1 - self.dropout_rate)
142
143 mask = torch.autograd.Variable(m, requires_grad=False)
144 return mask * x
145
146 def extra_repr(self):
147 inplace_str = ", inplace" if self.inplace else ""
148 return "p={}{}".format(self.dropout_rate, inplace_str)
149
[end of flair/nn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flair/nn.py b/flair/nn.py
--- a/flair/nn.py
+++ b/flair/nn.py
@@ -85,7 +85,7 @@
# load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
# see https://github.com/zalandoresearch/flair/issues/351
f = file_utils.load_big_file(str(model_file))
- state = torch.load(f, map_location=flair.device)
+ state = torch.load(f, map_location='cpu')
model = cls._init_model_with_state_dict(state)
| {"golden_diff": "diff --git a/flair/nn.py b/flair/nn.py\n--- a/flair/nn.py\n+++ b/flair/nn.py\n@@ -85,7 +85,7 @@\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = file_utils.load_big_file(str(model_file))\n- state = torch.load(f, map_location=flair.device)\n+ state = torch.load(f, map_location='cpu')\n \n model = cls._init_model_with_state_dict(state)\n", "issue": "is corpus object reusable across ModelTrainer instances ?\nI have three checkpoint files generated from a training run that uses PooledFlair embedding. Say chk10.pt, chk20.pt, chk30.pt.\r\n\r\nI finalize using the following code in a for loop to get the F1 predictions out:\r\n\r\ntrainer: ModelTrainer = ModelTrainer.load_checkpoint(chkfile, corpus)\r\ntrainer.train('.', checkpoint = False, train_with_dev=True, max_epochs=epochs)\r\n\r\nI set the epochs to the value at which this checkpoint got generated. So 10, 20, 30 etc. So typically it goes straight to creating the final model and emitting the predictions.\r\n\r\nThis works perfectly fine for the first time in the loop, after which the predictions are quite wrong. Now instead of doing it in the loop, if i simply do just once by restarting the process i get the values i expect. This behavior happens only with PooledFlairEmbedding. Same program runs just fine with ElmoEmbedding, BertEmbedding.\r\n\r\nSo my question is why is this the case ? Is it because i create the corpus object outside the for loop and keep reusing it across different ModelTrainer instances ? \r\n\r\nIt happens quite regularly for me. If needed i can make a small program and share.\r\n\n", "before_files": [{"content": "import warnings\nfrom pathlib import Path\n\nimport torch.nn\n\nfrom abc import abstractmethod\n\nfrom typing import Union, List\n\nfrom torch.utils.data.dataset import Dataset\n\nimport flair\nfrom flair import file_utils\nfrom flair.data import DataPoint, Sentence\nfrom flair.datasets import DataLoader\nfrom flair.training_utils import Result\n\n\nclass Model(torch.nn.Module):\n \"\"\"Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.\n Every new type of model must implement these methods.\"\"\"\n\n @abstractmethod\n def forward_loss(\n self, data_points: Union[List[DataPoint], DataPoint]\n ) -> torch.tensor:\n \"\"\"Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training.\"\"\"\n pass\n\n @abstractmethod\n def evaluate(\n self,\n sentences: Union[List[DataPoint], Dataset],\n out_path: Path = None,\n embedding_storage_mode: str = \"none\",\n ) -> (Result, float):\n \"\"\"Evaluates the model. Returns a Result object containing evaluation\n results and a loss value. Implement this to enable evaluation.\n :param data_loader: DataLoader that iterates over dataset to be evaluated\n :param out_path: Optional output path to store predictions\n :param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and\n freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU\n :return: Returns a Tuple consisting of a Result object and a loss float value\n \"\"\"\n pass\n\n @abstractmethod\n def _get_state_dict(self):\n \"\"\"Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()\n functionality.\"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def _init_model_with_state_dict(state):\n \"\"\"Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()\n functionality.\"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def _fetch_model(model_name) -> str:\n return model_name\n\n def save(self, model_file: Union[str, Path]):\n \"\"\"\n Saves the current model to the provided file.\n :param model_file: the model file\n \"\"\"\n model_state = self._get_state_dict()\n\n torch.save(model_state, str(model_file), pickle_protocol=4)\n\n @classmethod\n def load(cls, model: Union[str, Path]):\n \"\"\"\n Loads the model from the given file.\n :param model: the model file\n :return: the loaded text classifier model\n \"\"\"\n model_file = cls._fetch_model(str(model))\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n # load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups\n # see https://github.com/zalandoresearch/flair/issues/351\n f = file_utils.load_big_file(str(model_file))\n state = torch.load(f, map_location=flair.device)\n\n model = cls._init_model_with_state_dict(state)\n\n model.eval()\n model.to(flair.device)\n\n return model\n\n\nclass LockedDropout(torch.nn.Module):\n \"\"\"\n Implementation of locked (or variational) dropout. Randomly drops out entire parameters in embedding space.\n \"\"\"\n\n def __init__(self, dropout_rate=0.5, batch_first=True, inplace=False):\n super(LockedDropout, self).__init__()\n self.dropout_rate = dropout_rate\n self.batch_first = batch_first\n self.inplace = inplace\n\n def forward(self, x):\n if not self.training or not self.dropout_rate:\n return x\n\n if not self.batch_first:\n m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - self.dropout_rate)\n else:\n m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_(1 - self.dropout_rate)\n\n mask = torch.autograd.Variable(m, requires_grad=False) / (1 - self.dropout_rate)\n mask = mask.expand_as(x)\n return mask * x\n\n def extra_repr(self):\n inplace_str = \", inplace\" if self.inplace else \"\"\n return \"p={}{}\".format(self.dropout_rate, inplace_str)\n\n\nclass WordDropout(torch.nn.Module):\n \"\"\"\n Implementation of word dropout. Randomly drops out entire words (or characters) in embedding space.\n \"\"\"\n\n def __init__(self, dropout_rate=0.05, inplace=False):\n super(WordDropout, self).__init__()\n self.dropout_rate = dropout_rate\n self.inplace = inplace\n\n def forward(self, x):\n if not self.training or not self.dropout_rate:\n return x\n\n m = x.data.new(x.size(0), x.size(1), 1).bernoulli_(1 - self.dropout_rate)\n\n mask = torch.autograd.Variable(m, requires_grad=False)\n return mask * x\n\n def extra_repr(self):\n inplace_str = \", inplace\" if self.inplace else \"\"\n return \"p={}{}\".format(self.dropout_rate, inplace_str)\n", "path": "flair/nn.py"}]} | 2,285 | 144 |
gh_patches_debug_23571 | rasdani/github-patches | git_diff | napari__napari-5565 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
viewer axes point in basically random directions 😂
## 🐛 Bug
I was trying to explain anisotropic image resolution to someone the other day and this napari bug severely undermined my otherwise excellent demonstration. 😂 TLDR: the axes 0, 1, 2 displayed in the viewer by default are not the same as the axes 0, 1, 2 on the input array (and by extension the positions of the `layer.scale` parameter).
## To Reproduce
```python
import numpy as np
import napari
volume = np.random.random((64, 64, 64))
viewer, layer = napari.imshow(volume, scale=(1, 2, 4), ndisplay=3)
viewer.axes.visible = True
napari.run()
```
<img width="948" alt="Screen Shot 2023-02-07 at 9 05 39 pm" src="https://user-images.githubusercontent.com/492549/217215889-c6aca123-f529-4a82-9893-4d585ac69256.png">
## Expected behavior
The code produces a 3D oblong/rectangular prism with sides of length 64, 128, and 256 in world-space. Given the scale setting above, one would expect axis 0 to point in the direction of the side of length 64, 1 for 128, and 2 for 256. Instead, you see 0 pointing along the side of length 128, 1 along the side of length 256, and 2 along the side of length 64. (**Edited:** [fixed typo](https://github.com/napari/napari/issues/5536#issuecomment-1420555774), 0->2)
## Environment
```
napari: 0.4.17rc4.dev169+g57fa998e7.d20221219
Platform: macOS-12.5.1-arm64-arm-64bit
System: MacOS 12.5.1
Python: 3.10.5 | packaged by conda-forge | (main, Jun 14 2022, 07:07:06) [Clang 13.0.1 ]
Qt: 5.15.4
PyQt5: 5.15.7
NumPy: 1.22.4
SciPy: 1.8.1
Dask: 2022.9.1
VisPy: 0.12.1
magicgui: 0.5.1
superqt: 0.3.2
in-n-out: 0.1.5
app-model: 0.1.1
npe2: 0.6.1
OpenGL:
- GL version: 2.1 Metal - 76.3
- MAX_TEXTURE_SIZE: 16384
Screens:
- screen 1: resolution 1800x1169, scale 2.0
Settings path:
- /Users/jni/Library/Application Support/napari/all_f332943dd4a538ce2ff3134fd0ff74396d8d8b23/settings.yaml
Plugins:
- affinder: 0.2.3.dev12+g6d235f9 (2 contributions)
- napari: 0.4.17rc4.dev169+g57fa998e7.d20221219 (77 contributions)
- napari-console: 0.0.6 (0 contributions)
- napari-multiscale-rendering-prototype: 0.0.1 (6 contributions)
- napari-pymeshlab: 0.0.5 (17 contributions)
- napari-skeleton-curator: 0.1.dev21+gf2de859 (4 contributions)
- napari-svg: 0.1.6 (2 contributions)
- napari-watershed: 0.0.0 (4 contributions)
- skan: 0.11.0.dev0 (2 contributions)
- zarpaint: 0.1.1.dev20+g61ba4a0 (14 contributions)
```
## Additional context
Semi-related: #4633
</issue>
<code>
[start of napari/_vispy/overlays/axes.py]
1 import numpy as np
2
3 from napari._vispy.overlays.base import ViewerOverlayMixin, VispySceneOverlay
4 from napari._vispy.visuals.axes import Axes
5 from napari.utils.theme import get_theme
6
7
8 class VispyAxesOverlay(ViewerOverlayMixin, VispySceneOverlay):
9 """Axes indicating world coordinate origin and orientation."""
10
11 def __init__(self, *, viewer, overlay, parent=None) -> None:
12 self._scale = 1
13
14 # Target axes length in canvas pixels
15 self._target_length = 80
16
17 super().__init__(
18 node=Axes(), viewer=viewer, overlay=overlay, parent=parent
19 )
20 self.overlay.events.visible.connect(self._on_visible_change)
21 self.overlay.events.colored.connect(self._on_data_change)
22 self.overlay.events.dashed.connect(self._on_data_change)
23 self.overlay.events.labels.connect(self._on_labels_visible_change)
24 self.overlay.events.arrows.connect(self._on_data_change)
25
26 self.viewer.events.theme.connect(self._on_data_change)
27 self.viewer.camera.events.zoom.connect(self._on_zoom_change)
28 self.viewer.dims.events.order.connect(self._on_data_change)
29 self.viewer.dims.events.range.connect(self._on_data_change)
30 self.viewer.dims.events.ndisplay.connect(self._on_data_change)
31 self.viewer.dims.events.axis_labels.connect(
32 self._on_labels_text_change
33 )
34
35 self.reset()
36
37 def _on_data_change(self):
38 # Determine which axes are displayed
39 axes = self.viewer.dims.displayed[::-1]
40
41 # Counting backwards from total number of dimensions
42 # determine axes positions. This is done as by default
43 # the last NumPy axis corresponds to the first Vispy axis
44 reversed_axes = [self.viewer.dims.ndim - 1 - a for a in axes]
45
46 self.node.set_data(
47 axes=axes,
48 reversed_axes=reversed_axes,
49 colored=self.overlay.colored,
50 bg_color=get_theme(self.viewer.theme, False).canvas,
51 dashed=self.overlay.dashed,
52 arrows=self.overlay.arrows,
53 )
54
55 def _on_labels_visible_change(self):
56 self.node.text.visible = self.overlay.labels
57
58 def _on_labels_text_change(self):
59 axes = self.viewer.dims.displayed[::-1]
60 axes_labels = [self.viewer.dims.axis_labels[a] for a in axes]
61 self.node.text.text = axes_labels
62
63 def _on_zoom_change(self):
64 scale = 1 / self.viewer.camera.zoom
65
66 # If scale has not changed, do not redraw
67 if abs(np.log10(self._scale) - np.log10(scale)) < 1e-4:
68 return
69 self._scale = scale
70 scale = self._target_length * self._scale
71 # Update axes scale
72 self.node.transform.reset()
73 self.node.transform.scale([scale, scale, scale, 1])
74
75 def reset(self):
76 super().reset()
77 self._on_data_change()
78 self._on_labels_visible_change()
79 self._on_labels_text_change()
80 self._on_zoom_change()
81
[end of napari/_vispy/overlays/axes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/napari/_vispy/overlays/axes.py b/napari/_vispy/overlays/axes.py
--- a/napari/_vispy/overlays/axes.py
+++ b/napari/_vispy/overlays/axes.py
@@ -17,7 +17,6 @@
super().__init__(
node=Axes(), viewer=viewer, overlay=overlay, parent=parent
)
- self.overlay.events.visible.connect(self._on_visible_change)
self.overlay.events.colored.connect(self._on_data_change)
self.overlay.events.dashed.connect(self._on_data_change)
self.overlay.events.labels.connect(self._on_labels_visible_change)
@@ -52,6 +51,8 @@
arrows=self.overlay.arrows,
)
+ self._on_labels_text_change()
+
def _on_labels_visible_change(self):
self.node.text.visible = self.overlay.labels
@@ -76,5 +77,4 @@
super().reset()
self._on_data_change()
self._on_labels_visible_change()
- self._on_labels_text_change()
self._on_zoom_change()
| {"golden_diff": "diff --git a/napari/_vispy/overlays/axes.py b/napari/_vispy/overlays/axes.py\n--- a/napari/_vispy/overlays/axes.py\n+++ b/napari/_vispy/overlays/axes.py\n@@ -17,7 +17,6 @@\n super().__init__(\n node=Axes(), viewer=viewer, overlay=overlay, parent=parent\n )\n- self.overlay.events.visible.connect(self._on_visible_change)\n self.overlay.events.colored.connect(self._on_data_change)\n self.overlay.events.dashed.connect(self._on_data_change)\n self.overlay.events.labels.connect(self._on_labels_visible_change)\n@@ -52,6 +51,8 @@\n arrows=self.overlay.arrows,\n )\n \n+ self._on_labels_text_change()\n+\n def _on_labels_visible_change(self):\n self.node.text.visible = self.overlay.labels\n \n@@ -76,5 +77,4 @@\n super().reset()\n self._on_data_change()\n self._on_labels_visible_change()\n- self._on_labels_text_change()\n self._on_zoom_change()\n", "issue": "viewer axes point in basically random directions \ud83d\ude02\n## \ud83d\udc1b Bug\r\n\r\nI was trying to explain anisotropic image resolution to someone the other day and this napari bug severely undermined my otherwise excellent demonstration. \ud83d\ude02 TLDR: the axes 0, 1, 2 displayed in the viewer by default are not the same as the axes 0, 1, 2 on the input array (and by extension the positions of the `layer.scale` parameter).\r\n\r\n## To Reproduce\r\n\r\n```python\r\nimport numpy as np\r\nimport napari\r\n\r\nvolume = np.random.random((64, 64, 64))\r\n\r\nviewer, layer = napari.imshow(volume, scale=(1, 2, 4), ndisplay=3)\r\nviewer.axes.visible = True\r\n\r\nnapari.run()\r\n```\r\n\r\n<img width=\"948\" alt=\"Screen Shot 2023-02-07 at 9 05 39 pm\" src=\"https://user-images.githubusercontent.com/492549/217215889-c6aca123-f529-4a82-9893-4d585ac69256.png\">\r\n\r\n## Expected behavior\r\n\r\nThe code produces a 3D oblong/rectangular prism with sides of length 64, 128, and 256 in world-space. Given the scale setting above, one would expect axis 0 to point in the direction of the side of length 64, 1 for 128, and 2 for 256. Instead, you see 0 pointing along the side of length 128, 1 along the side of length 256, and 2 along the side of length 64. (**Edited:** [fixed typo](https://github.com/napari/napari/issues/5536#issuecomment-1420555774), 0->2)\r\n\r\n## Environment\r\n\r\n```\r\nnapari: 0.4.17rc4.dev169+g57fa998e7.d20221219\r\nPlatform: macOS-12.5.1-arm64-arm-64bit\r\nSystem: MacOS 12.5.1\r\nPython: 3.10.5 | packaged by conda-forge | (main, Jun 14 2022, 07:07:06) [Clang 13.0.1 ]\r\nQt: 5.15.4\r\nPyQt5: 5.15.7\r\nNumPy: 1.22.4\r\nSciPy: 1.8.1\r\nDask: 2022.9.1\r\nVisPy: 0.12.1\r\nmagicgui: 0.5.1\r\nsuperqt: 0.3.2\r\nin-n-out: 0.1.5\r\napp-model: 0.1.1\r\nnpe2: 0.6.1\r\n\r\nOpenGL:\r\n - GL version: 2.1 Metal - 76.3\r\n - MAX_TEXTURE_SIZE: 16384\r\n\r\nScreens:\r\n - screen 1: resolution 1800x1169, scale 2.0\r\n\r\nSettings path:\r\n - /Users/jni/Library/Application Support/napari/all_f332943dd4a538ce2ff3134fd0ff74396d8d8b23/settings.yaml\r\nPlugins:\r\n - affinder: 0.2.3.dev12+g6d235f9 (2 contributions)\r\n - napari: 0.4.17rc4.dev169+g57fa998e7.d20221219 (77 contributions)\r\n - napari-console: 0.0.6 (0 contributions)\r\n - napari-multiscale-rendering-prototype: 0.0.1 (6 contributions)\r\n - napari-pymeshlab: 0.0.5 (17 contributions)\r\n - napari-skeleton-curator: 0.1.dev21+gf2de859 (4 contributions)\r\n - napari-svg: 0.1.6 (2 contributions)\r\n - napari-watershed: 0.0.0 (4 contributions)\r\n - skan: 0.11.0.dev0 (2 contributions)\r\n - zarpaint: 0.1.1.dev20+g61ba4a0 (14 contributions)\r\n```\r\n\r\n## Additional context\r\n\r\nSemi-related: #4633\n", "before_files": [{"content": "import numpy as np\n\nfrom napari._vispy.overlays.base import ViewerOverlayMixin, VispySceneOverlay\nfrom napari._vispy.visuals.axes import Axes\nfrom napari.utils.theme import get_theme\n\n\nclass VispyAxesOverlay(ViewerOverlayMixin, VispySceneOverlay):\n \"\"\"Axes indicating world coordinate origin and orientation.\"\"\"\n\n def __init__(self, *, viewer, overlay, parent=None) -> None:\n self._scale = 1\n\n # Target axes length in canvas pixels\n self._target_length = 80\n\n super().__init__(\n node=Axes(), viewer=viewer, overlay=overlay, parent=parent\n )\n self.overlay.events.visible.connect(self._on_visible_change)\n self.overlay.events.colored.connect(self._on_data_change)\n self.overlay.events.dashed.connect(self._on_data_change)\n self.overlay.events.labels.connect(self._on_labels_visible_change)\n self.overlay.events.arrows.connect(self._on_data_change)\n\n self.viewer.events.theme.connect(self._on_data_change)\n self.viewer.camera.events.zoom.connect(self._on_zoom_change)\n self.viewer.dims.events.order.connect(self._on_data_change)\n self.viewer.dims.events.range.connect(self._on_data_change)\n self.viewer.dims.events.ndisplay.connect(self._on_data_change)\n self.viewer.dims.events.axis_labels.connect(\n self._on_labels_text_change\n )\n\n self.reset()\n\n def _on_data_change(self):\n # Determine which axes are displayed\n axes = self.viewer.dims.displayed[::-1]\n\n # Counting backwards from total number of dimensions\n # determine axes positions. This is done as by default\n # the last NumPy axis corresponds to the first Vispy axis\n reversed_axes = [self.viewer.dims.ndim - 1 - a for a in axes]\n\n self.node.set_data(\n axes=axes,\n reversed_axes=reversed_axes,\n colored=self.overlay.colored,\n bg_color=get_theme(self.viewer.theme, False).canvas,\n dashed=self.overlay.dashed,\n arrows=self.overlay.arrows,\n )\n\n def _on_labels_visible_change(self):\n self.node.text.visible = self.overlay.labels\n\n def _on_labels_text_change(self):\n axes = self.viewer.dims.displayed[::-1]\n axes_labels = [self.viewer.dims.axis_labels[a] for a in axes]\n self.node.text.text = axes_labels\n\n def _on_zoom_change(self):\n scale = 1 / self.viewer.camera.zoom\n\n # If scale has not changed, do not redraw\n if abs(np.log10(self._scale) - np.log10(scale)) < 1e-4:\n return\n self._scale = scale\n scale = self._target_length * self._scale\n # Update axes scale\n self.node.transform.reset()\n self.node.transform.scale([scale, scale, scale, 1])\n\n def reset(self):\n super().reset()\n self._on_data_change()\n self._on_labels_visible_change()\n self._on_labels_text_change()\n self._on_zoom_change()\n", "path": "napari/_vispy/overlays/axes.py"}]} | 2,396 | 247 |
gh_patches_debug_6440 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1976 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
jsonschema RefResolutionError for 0.6.3 or lower
### Summary
The newest version of `jsonschema` (4.15.0, released today) breaks the current release version of `pyhf` (0.6.3).
### OS / Environment
```console
NAME=Gentoo
ID=gentoo
PRETTY_NAME="Gentoo Linux"
ANSI_COLOR="1;32"
HOME_URL="https://www.gentoo.org/"
SUPPORT_URL="https://www.gentoo.org/support/"
BUG_REPORT_URL="https://bugs.gentoo.org/"
VERSION_ID="2.8"
```
### Steps to Reproduce
```bash
pip install 'jsonschema==4.15.0' 'pyhf==0.6.3'
```
```python
import pyhf
model = pyhf.simplemodels.uncorrelated_background(
signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
)
```
### File Upload (optional)
_No response_
### Expected Results
I expected not to get an error because this is the [Hello World example](https://pyhf.readthedocs.io/en/v0.6.3/examples/notebooks/hello-world.html). I confirmed that there's no error with `jsonschema==4.14.0` (the previous version) in the exact same environment otherwise.
### Actual Results
```console
Traceback (most recent call last):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 889, in resolve_from_url
document = self.store[url]
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/_utils.py", line 28, in __getitem__
return self.store[self.normalize(uri)]
KeyError: 'file:///home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 1505, in open_local_file
stats = os.stat(localfile)
FileNotFoundError: [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 892, in resolve_from_url
document = self.resolve_remote(url)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 1000, in resolve_remote
with urlopen(uri) as url:
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 216, in urlopen
return opener.open(url, data, timeout)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 519, in open
response = self._open(req, data)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 536, in _open
result = self._call_chain(self.handle_open, protocol, protocol +
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 496, in _call_chain
result = func(*args)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 1483, in file_open
return self.open_local_file(req)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py", line 1522, in open_local_file
raise URLError(exp)
urllib.error.URLError: <urlopen error [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/simplemodels.py", line 141, in uncorrelated_background
return Model(spec, batch_size=batch_size)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/pdf.py", line 682, in __init__
utils.validate(self.spec, self.schema, version=self.version)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/utils.py", line 62, in validate
return validator.validate(spec)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 302, in validate
for error in self.iter_errors(*args, **kwargs):
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 277, in iter_errors
for error in errors:
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/_validators.py", line 294, in ref
scope, resolved = validator.resolver.resolve(ref)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 880, in resolve
return url, self._remote_cache(url)
File "/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py", line 894, in resolve_from_url
raise exceptions.RefResolutionError(exc)
jsonschema.exceptions.RefResolutionError: <urlopen error [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'>
```
### pyhf Version
```console
pyhf, version 0.6.3
```
### Code of Conduct
- [X] I agree to follow the Code of Conduct
</issue>
<code>
[start of src/pyhf/schema/validator.py]
1 import jsonschema
2 import pyhf.exceptions
3 from pyhf.schema.loader import load_schema
4 from pyhf.schema import variables
5 from typing import Union, Mapping
6
7
8 def validate(spec: Mapping, schema_name: str, version: Union[str, None] = None):
9 """
10 Validate a provided specification against a schema.
11
12 Args:
13 spec (dict): The specification to validate.
14 schema_name (str): The name of the schema to use.
15 version (None or str): The version to use if not the default from :attr:`pyhf.schema.version`.
16
17 Returns:
18 None: schema validated fine
19
20 Raises:
21 pyhf.exceptions.InvalidSpecification: the specification is invalid
22 """
23
24 version = version or variables.SCHEMA_VERSION
25
26 schema = load_schema(f'{version}/{schema_name}')
27
28 # note: trailing slash needed for RefResolver to resolve correctly
29 resolver = jsonschema.RefResolver(
30 base_uri=f"file://{variables.schemas}/",
31 referrer=f"{version}/{schema_name}",
32 store=variables.SCHEMA_CACHE,
33 )
34 validator = jsonschema.Draft6Validator(
35 schema, resolver=resolver, format_checker=None
36 )
37
38 try:
39 return validator.validate(spec)
40 except jsonschema.ValidationError as err:
41 raise pyhf.exceptions.InvalidSpecification(err, schema_name)
42
[end of src/pyhf/schema/validator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyhf/schema/validator.py b/src/pyhf/schema/validator.py
--- a/src/pyhf/schema/validator.py
+++ b/src/pyhf/schema/validator.py
@@ -27,8 +27,8 @@
# note: trailing slash needed for RefResolver to resolve correctly
resolver = jsonschema.RefResolver(
- base_uri=f"file://{variables.schemas}/",
- referrer=f"{version}/{schema_name}",
+ base_uri=f"file://{variables.schemas}/{version}/",
+ referrer=f"{schema_name}",
store=variables.SCHEMA_CACHE,
)
validator = jsonschema.Draft6Validator(
| {"golden_diff": "diff --git a/src/pyhf/schema/validator.py b/src/pyhf/schema/validator.py\n--- a/src/pyhf/schema/validator.py\n+++ b/src/pyhf/schema/validator.py\n@@ -27,8 +27,8 @@\n \n # note: trailing slash needed for RefResolver to resolve correctly\n resolver = jsonschema.RefResolver(\n- base_uri=f\"file://{variables.schemas}/\",\n- referrer=f\"{version}/{schema_name}\",\n+ base_uri=f\"file://{variables.schemas}/{version}/\",\n+ referrer=f\"{schema_name}\",\n store=variables.SCHEMA_CACHE,\n )\n validator = jsonschema.Draft6Validator(\n", "issue": "jsonschema RefResolutionError for 0.6.3 or lower\n### Summary\n\nThe newest version of `jsonschema` (4.15.0, released today) breaks the current release version of `pyhf` (0.6.3).\n\n### OS / Environment\n\n```console\nNAME=Gentoo\r\nID=gentoo\r\nPRETTY_NAME=\"Gentoo Linux\"\r\nANSI_COLOR=\"1;32\"\r\nHOME_URL=\"https://www.gentoo.org/\"\r\nSUPPORT_URL=\"https://www.gentoo.org/support/\"\r\nBUG_REPORT_URL=\"https://bugs.gentoo.org/\"\r\nVERSION_ID=\"2.8\"\n```\n\n\n### Steps to Reproduce\n\n```bash\r\npip install 'jsonschema==4.15.0' 'pyhf==0.6.3'\r\n```\r\n\r\n```python\r\nimport pyhf\r\nmodel = pyhf.simplemodels.uncorrelated_background(\r\n signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]\r\n)\r\n```\n\n### File Upload (optional)\n\n_No response_\n\n### Expected Results\n\nI expected not to get an error because this is the [Hello World example](https://pyhf.readthedocs.io/en/v0.6.3/examples/notebooks/hello-world.html). I confirmed that there's no error with `jsonschema==4.14.0` (the previous version) in the exact same environment otherwise.\n\n### Actual Results\n\n```console\nTraceback (most recent call last):\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 889, in resolve_from_url\r\n document = self.store[url]\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/_utils.py\", line 28, in __getitem__\r\n return self.store[self.normalize(uri)]\r\nKeyError: 'file:///home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 1505, in open_local_file\r\n stats = os.stat(localfile)\r\nFileNotFoundError: [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 892, in resolve_from_url\r\n document = self.resolve_remote(url)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 1000, in resolve_remote\r\n with urlopen(uri) as url:\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 216, in urlopen\r\n return opener.open(url, data, timeout)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 519, in open\r\n response = self._open(req, data)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 536, in _open\r\n result = self._call_chain(self.handle_open, protocol, protocol +\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 496, in _call_chain\r\n result = func(*args)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 1483, in file_open\r\n return self.open_local_file(req)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/urllib/request.py\", line 1522, in open_local_file\r\n raise URLError(exp)\r\nurllib.error.URLError: <urlopen error [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'>\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/simplemodels.py\", line 141, in uncorrelated_background\r\n return Model(spec, batch_size=batch_size)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/pdf.py\", line 682, in __init__\r\n utils.validate(self.spec, self.schema, version=self.version)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/utils.py\", line 62, in validate\r\n return validator.validate(spec)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 302, in validate\r\n for error in self.iter_errors(*args, **kwargs):\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 277, in iter_errors\r\n for error in errors:\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/_validators.py\", line 294, in ref\r\n scope, resolved = validator.resolver.resolve(ref)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 880, in resolve\r\n return url, self._remote_cache(url)\r\n File \"/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/jsonschema/validators.py\", line 894, in resolve_from_url\r\n raise exceptions.RefResolutionError(exc)\r\njsonschema.exceptions.RefResolutionError: <urlopen error [Errno 2] No such file or directory: '/home/user/miniconda3/envs/abcd_pyhf/lib/python3.10/site-packages/pyhf/schemas/defs.json'>\n```\n\n\n### pyhf Version\n\n```console\npyhf, version 0.6.3\n```\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Code of Conduct\n", "before_files": [{"content": "import jsonschema\nimport pyhf.exceptions\nfrom pyhf.schema.loader import load_schema\nfrom pyhf.schema import variables\nfrom typing import Union, Mapping\n\n\ndef validate(spec: Mapping, schema_name: str, version: Union[str, None] = None):\n \"\"\"\n Validate a provided specification against a schema.\n\n Args:\n spec (dict): The specification to validate.\n schema_name (str): The name of the schema to use.\n version (None or str): The version to use if not the default from :attr:`pyhf.schema.version`.\n\n Returns:\n None: schema validated fine\n\n Raises:\n pyhf.exceptions.InvalidSpecification: the specification is invalid\n \"\"\"\n\n version = version or variables.SCHEMA_VERSION\n\n schema = load_schema(f'{version}/{schema_name}')\n\n # note: trailing slash needed for RefResolver to resolve correctly\n resolver = jsonschema.RefResolver(\n base_uri=f\"file://{variables.schemas}/\",\n referrer=f\"{version}/{schema_name}\",\n store=variables.SCHEMA_CACHE,\n )\n validator = jsonschema.Draft6Validator(\n schema, resolver=resolver, format_checker=None\n )\n\n try:\n return validator.validate(spec)\n except jsonschema.ValidationError as err:\n raise pyhf.exceptions.InvalidSpecification(err, schema_name)\n", "path": "src/pyhf/schema/validator.py"}]} | 2,423 | 143 |
gh_patches_debug_1588 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1804 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Django Component Governance vulnerability
Django 1.11 before 1.11.28, 2.2 before 2.2.10, and 3.0 before 3.0.3 allows SQL Injection if untrusted data is used as a StringAgg delimiter (e.g., in Django applications that offer downloads of data as a series of rows with a user-specified column delimiter). By passing a suitably crafted delimiter to a contrib.postgres.aggregates.StringAgg instance, it was possible to break escaping and inject malicious SQL.
https://dev.azure.com/FuseLabs/SDK_v4/_componentGovernance/112465/alert/2370216?typeId=4354877
</issue>
<code>
[start of libraries/botbuilder-applicationinsights/setup.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 REQUIRES = [
8 "applicationinsights==0.11.9",
9 "botbuilder-schema==4.15.0",
10 "botframework-connector==4.15.0",
11 "botbuilder-core==4.15.0",
12 ]
13 TESTS_REQUIRES = [
14 "aiounittest==1.3.0",
15 "django==2.2.6", # For samples
16 "djangorestframework==3.10.3", # For samples
17 "flask==1.1.1", # For samples
18 ]
19
20 root = os.path.abspath(os.path.dirname(__file__))
21
22 with open(os.path.join(root, "botbuilder", "applicationinsights", "about.py")) as f:
23 package_info = {}
24 info = f.read()
25 exec(info, package_info)
26
27 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
28 long_description = f.read()
29
30 setup(
31 name=package_info["__title__"],
32 version=package_info["__version__"],
33 url=package_info["__uri__"],
34 author=package_info["__author__"],
35 description=package_info["__description__"],
36 keywords=[
37 "BotBuilderApplicationInsights",
38 "bots",
39 "ai",
40 "botframework",
41 "botbuilder",
42 ],
43 long_description=long_description,
44 long_description_content_type="text/x-rst",
45 license=package_info["__license__"],
46 packages=[
47 "botbuilder.applicationinsights",
48 "botbuilder.applicationinsights.django",
49 "botbuilder.applicationinsights.flask",
50 "botbuilder.applicationinsights.processor",
51 ],
52 install_requires=REQUIRES + TESTS_REQUIRES,
53 tests_require=TESTS_REQUIRES,
54 include_package_data=True,
55 classifiers=[
56 "Programming Language :: Python :: 3.7",
57 "Intended Audience :: Developers",
58 "License :: OSI Approved :: MIT License",
59 "Operating System :: OS Independent",
60 "Development Status :: 5 - Production/Stable",
61 "Topic :: Scientific/Engineering :: Artificial Intelligence",
62 ],
63 )
64
[end of libraries/botbuilder-applicationinsights/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-applicationinsights/setup.py b/libraries/botbuilder-applicationinsights/setup.py
--- a/libraries/botbuilder-applicationinsights/setup.py
+++ b/libraries/botbuilder-applicationinsights/setup.py
@@ -12,7 +12,7 @@
]
TESTS_REQUIRES = [
"aiounittest==1.3.0",
- "django==2.2.6", # For samples
+ "django==2.2.10", # For samples
"djangorestframework==3.10.3", # For samples
"flask==1.1.1", # For samples
]
| {"golden_diff": "diff --git a/libraries/botbuilder-applicationinsights/setup.py b/libraries/botbuilder-applicationinsights/setup.py\n--- a/libraries/botbuilder-applicationinsights/setup.py\n+++ b/libraries/botbuilder-applicationinsights/setup.py\n@@ -12,7 +12,7 @@\n ]\n TESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n- \"django==2.2.6\", # For samples\n+ \"django==2.2.10\", # For samples\n \"djangorestframework==3.10.3\", # For samples\n \"flask==1.1.1\", # For samples\n ]\n", "issue": "Django Component Governance vulnerability\nDjango 1.11 before 1.11.28, 2.2 before 2.2.10, and 3.0 before 3.0.3 allows SQL Injection if untrusted data is used as a StringAgg delimiter (e.g., in Django applications that offer downloads of data as a series of rows with a user-specified column delimiter). By passing a suitably crafted delimiter to a contrib.postgres.aggregates.StringAgg instance, it was possible to break escaping and inject malicious SQL.\r\n\r\nhttps://dev.azure.com/FuseLabs/SDK_v4/_componentGovernance/112465/alert/2370216?typeId=4354877\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights==0.11.9\",\n \"botbuilder-schema==4.15.0\",\n \"botframework-connector==4.15.0\",\n \"botbuilder-core==4.15.0\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n \"django==2.2.6\", # For samples\n \"djangorestframework==3.10.3\", # For samples\n \"flask==1.1.1\", # For samples\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"applicationinsights\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.applicationinsights\",\n \"botbuilder.applicationinsights.django\",\n \"botbuilder.applicationinsights.flask\",\n \"botbuilder.applicationinsights.processor\",\n ],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-applicationinsights/setup.py"}]} | 1,312 | 152 |
gh_patches_debug_36006 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1366 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't configure source without arguments
Hello Team,
I'm trying to configure into HomeAssistant the source that I added Mamirolle info, but I have argument issues.
`args` is marked `required`, so if none is passed, the configuration is invalid.
```
Invalid config for [waste_collection_schedule]: required key not provided @ data['waste_collection_schedule']['sources'][0]['args']. Got None. (See /config/configuration.yaml, line 27).
```
If a dummy argument is passed. The configuration is valid but the source setup fails.
```
Error during setup of component waste_collection_schedule
Traceback (most recent call last):
File "/usr/src/homeassistant/homeassistant/setup.py", line 288, in _async_setup_component
result = await task
^^^^^^^^^^
File "/config/custom_components/waste_collection_schedule/__init__.py", line 109, in async_setup
api.add_source_shell(
File "/config/custom_components/waste_collection_schedule/__init__.py", line 202, in add_source_shell
SourceShell.create(
File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 196, in create
source = source_module.Source(**source_args) # type: ignore
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: Source() takes no arguments
```
I understand that we want the configuration to fail early but the real error will still be seen only when the source is actually instantiated. Because of that I think the arguments shouldn't be required.
What do you think about this?
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py]
1 import datetime
2
3 import requests
4 from bs4 import BeautifulSoup
5 from waste_collection_schedule import Collection
6
7 TITLE = "Mairie de Mamirolle"
8 DESCRIPTION = "Source script for mamirolle.info"
9 COUNTRY = "fr"
10 URL = "http://mamirolle.info/"
11
12 TEST_CASES = {"TestSource": {}}
13
14 ICON_MAP = {
15 "Poubelle grise": "mdi:trash-can",
16 "Poubelle jaune": "mdi:recycle",
17 }
18
19 MONTH_NAMES = [
20 "janvier",
21 "février",
22 "mars",
23 "avril",
24 "mai",
25 "juin",
26 "juillet",
27 "août",
28 "septembre",
29 "octobre",
30 "novembre",
31 "décembre",
32 ]
33
34
35 class Source:
36 def fetch(self):
37 now = datetime.datetime.now()
38 # get list of regions and weblinks
39 page = requests.get(URL)
40 # A lenient HTML parser is need
41 soup = BeautifulSoup(page.text.replace("<![endif]", ""), "html.parser")
42 trash_domestic = soup.find("i", class_="poubelle-grise")
43 _, day, month = trash_domestic.next_sibling.string.split()
44 date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
45 if date_domestic < now.date():
46 date_domestic = date_domestic.replace(year=date_domestic.year + 1)
47
48 trash_recycle = soup.find("i", class_="poubelle-jaune")
49 _, day, month = trash_recycle.next_sibling.string.split()
50 date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
51 if date_recycle < now.date():
52 date_recycle = date_recycle.replace(year=date_recycle.year + 1)
53
54 entries = [
55 Collection(
56 date=date_domestic,
57 t="Poubelle grise",
58 icon=ICON_MAP.get("Poubelle grise"),
59 ),
60 Collection(
61 date=date_recycle,
62 t="Poubelle jaune",
63 icon=ICON_MAP.get("Poubelle jaune"),
64 ),
65 ] # List that holds collection schedule
66
67 return entries
68
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py
@@ -9,7 +9,12 @@
COUNTRY = "fr"
URL = "http://mamirolle.info/"
-TEST_CASES = {"TestSource": {}}
+TEST_CASES = {
+ "TestSource": {},
+ "IgnoredArgument": {
+ "_": ""
+ }
+}
ICON_MAP = {
"Poubelle grise": "mdi:trash-can",
@@ -33,6 +38,9 @@
class Source:
+ def __init__(self, _=None):
+ pass
+
def fetch(self):
now = datetime.datetime.now()
# get list of regions and weblinks
@@ -40,28 +48,19 @@
# A lenient HTML parser is need
soup = BeautifulSoup(page.text.replace("<![endif]", ""), "html.parser")
trash_domestic = soup.find("i", class_="poubelle-grise")
- _, day, month = trash_domestic.next_sibling.string.split()
- date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
- if date_domestic < now.date():
- date_domestic = date_domestic.replace(year=date_domestic.year + 1)
-
trash_recycle = soup.find("i", class_="poubelle-jaune")
- _, day, month = trash_recycle.next_sibling.string.split()
- date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()
- if date_recycle < now.date():
- date_recycle = date_recycle.replace(year=date_recycle.year + 1)
- entries = [
- Collection(
- date=date_domestic,
- t="Poubelle grise",
- icon=ICON_MAP.get("Poubelle grise"),
- ),
- Collection(
- date=date_recycle,
- t="Poubelle jaune",
- icon=ICON_MAP.get("Poubelle jaune"),
- ),
- ] # List that holds collection schedule
+ entries = [] # List that holds collection schedule
+ for trash, label in [(trash_domestic, "Poubelle grise"), (trash_recycle, "Poubelle jaune")]:
+ _, day, month = trash.next_sibling.string.split()
+ date = now.replace(month=MONTH_NAMES.index(month) + 1, day=int(day)).date()
+ if date < now.date():
+ date = date.replace(year=date.year + 1)
+
+ entries.append(Collection(
+ date=date,
+ t=label,
+ icon=ICON_MAP.get(label),
+ ))
return entries
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py\n@@ -9,7 +9,12 @@\n COUNTRY = \"fr\"\n URL = \"http://mamirolle.info/\"\n \n-TEST_CASES = {\"TestSource\": {}}\n+TEST_CASES = {\n+ \"TestSource\": {},\n+ \"IgnoredArgument\": {\n+ \"_\": \"\"\n+ }\n+}\n \n ICON_MAP = {\n \"Poubelle grise\": \"mdi:trash-can\",\n@@ -33,6 +38,9 @@\n \n \n class Source:\n+ def __init__(self, _=None):\n+ pass\n+\n def fetch(self):\n now = datetime.datetime.now()\n # get list of regions and weblinks\n@@ -40,28 +48,19 @@\n # A lenient HTML parser is need\n soup = BeautifulSoup(page.text.replace(\"<![endif]\", \"\"), \"html.parser\")\n trash_domestic = soup.find(\"i\", class_=\"poubelle-grise\")\n- _, day, month = trash_domestic.next_sibling.string.split()\n- date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n- if date_domestic < now.date():\n- date_domestic = date_domestic.replace(year=date_domestic.year + 1)\n-\n trash_recycle = soup.find(\"i\", class_=\"poubelle-jaune\")\n- _, day, month = trash_recycle.next_sibling.string.split()\n- date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n- if date_recycle < now.date():\n- date_recycle = date_recycle.replace(year=date_recycle.year + 1)\n \n- entries = [\n- Collection(\n- date=date_domestic,\n- t=\"Poubelle grise\",\n- icon=ICON_MAP.get(\"Poubelle grise\"),\n- ),\n- Collection(\n- date=date_recycle,\n- t=\"Poubelle jaune\",\n- icon=ICON_MAP.get(\"Poubelle jaune\"),\n- ),\n- ] # List that holds collection schedule\n+ entries = [] # List that holds collection schedule\n+ for trash, label in [(trash_domestic, \"Poubelle grise\"), (trash_recycle, \"Poubelle jaune\")]:\n+ _, day, month = trash.next_sibling.string.split()\n+ date = now.replace(month=MONTH_NAMES.index(month) + 1, day=int(day)).date()\n+ if date < now.date():\n+ date = date.replace(year=date.year + 1)\n+\n+ entries.append(Collection(\n+ date=date,\n+ t=label,\n+ icon=ICON_MAP.get(label),\n+ ))\n \n return entries\n", "issue": "Can't configure source without arguments\nHello Team,\r\nI'm trying to configure into HomeAssistant the source that I added Mamirolle info, but I have argument issues.\r\n\r\n`args` is marked `required`, so if none is passed, the configuration is invalid.\r\n\r\n```\r\nInvalid config for [waste_collection_schedule]: required key not provided @ data['waste_collection_schedule']['sources'][0]['args']. Got None. (See /config/configuration.yaml, line 27). \r\n```\r\n\r\nIf a dummy argument is passed. The configuration is valid but the source setup fails.\r\n```\r\nError during setup of component waste_collection_schedule\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/src/homeassistant/homeassistant/setup.py\", line 288, in _async_setup_component\r\n result = await task\r\n ^^^^^^^^^^\r\n File \"/config/custom_components/waste_collection_schedule/__init__.py\", line 109, in async_setup\r\n api.add_source_shell(\r\n File \"/config/custom_components/waste_collection_schedule/__init__.py\", line 202, in add_source_shell\r\n SourceShell.create(\r\n File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 196, in create\r\n source = source_module.Source(**source_args) # type: ignore\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nTypeError: Source() takes no arguments\r\n```\r\nI understand that we want the configuration to fail early but the real error will still be seen only when the source is actually instantiated. Because of that I think the arguments shouldn't be required.\r\n\r\nWhat do you think about this?\n", "before_files": [{"content": "import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Mairie de Mamirolle\"\nDESCRIPTION = \"Source script for mamirolle.info\"\nCOUNTRY = \"fr\"\nURL = \"http://mamirolle.info/\"\n\nTEST_CASES = {\"TestSource\": {}}\n\nICON_MAP = {\n \"Poubelle grise\": \"mdi:trash-can\",\n \"Poubelle jaune\": \"mdi:recycle\",\n}\n\nMONTH_NAMES = [\n \"janvier\",\n \"f\u00e9vrier\",\n \"mars\",\n \"avril\",\n \"mai\",\n \"juin\",\n \"juillet\",\n \"ao\u00fbt\",\n \"septembre\",\n \"octobre\",\n \"novembre\",\n \"d\u00e9cembre\",\n]\n\n\nclass Source:\n def fetch(self):\n now = datetime.datetime.now()\n # get list of regions and weblinks\n page = requests.get(URL)\n # A lenient HTML parser is need\n soup = BeautifulSoup(page.text.replace(\"<![endif]\", \"\"), \"html.parser\")\n trash_domestic = soup.find(\"i\", class_=\"poubelle-grise\")\n _, day, month = trash_domestic.next_sibling.string.split()\n date_domestic = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n if date_domestic < now.date():\n date_domestic = date_domestic.replace(year=date_domestic.year + 1)\n\n trash_recycle = soup.find(\"i\", class_=\"poubelle-jaune\")\n _, day, month = trash_recycle.next_sibling.string.split()\n date_recycle = now.replace(month=MONTH_NAMES.index(month), day=int(day)).date()\n if date_recycle < now.date():\n date_recycle = date_recycle.replace(year=date_recycle.year + 1)\n\n entries = [\n Collection(\n date=date_domestic,\n t=\"Poubelle grise\",\n icon=ICON_MAP.get(\"Poubelle grise\"),\n ),\n Collection(\n date=date_recycle,\n t=\"Poubelle jaune\",\n icon=ICON_MAP.get(\"Poubelle jaune\"),\n ),\n ] # List that holds collection schedule\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/mamirolle_info.py"}]} | 1,512 | 665 |
gh_patches_debug_5170 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2781 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Traceback appears in Status Bar, when trying to replay live flow
##### Steps to reproduce the problem:
1. Run **pathod** : `pathod -a "/=200:p0,10"`
2. Run mitmproxy.
3. Send _get request_ to pathod through mitmproxy using **pathoc**:
`pathoc -c localhost:9999 localhost:8080 'get:/'`
4. Try to replay the corresponding live flow in mitmproxy by pressing `r`.
I am seeing:

##### Any other comments? What have you tried so far?
This issue is relevant for the situations, when server didn't have time to send a response yet, but a user tries to replay the corresponding flow.
I also faced this issue, when trying to replay `mitm.it` flow from onboardingapp.
##### System information
Mitmproxy: 3.0.0.dev1101 (commit d9d4d15) binary
Python: 3.5.2
OpenSSL: OpenSSL 1.1.0g 2 Nov 2017
Platform: Linux-4.4.0-104-generic-x86_64-with-debian-stretch-sid
</issue>
<code>
[start of mitmproxy/addons/clientplayback.py]
1 from mitmproxy import exceptions
2 from mitmproxy import ctx
3 from mitmproxy import io
4 from mitmproxy import flow
5 from mitmproxy import command
6 import mitmproxy.types
7
8 import typing
9
10
11 class ClientPlayback:
12 def __init__(self):
13 self.flows = [] # type: typing.List[flow.Flow]
14 self.current_thread = None
15 self.configured = False
16
17 def count(self) -> int:
18 if self.current_thread:
19 current = 1
20 else:
21 current = 0
22 return current + len(self.flows)
23
24 @command.command("replay.client.stop")
25 def stop_replay(self) -> None:
26 """
27 Stop client replay.
28 """
29 self.flows = []
30 ctx.log.alert("Client replay stopped.")
31 ctx.master.addons.trigger("update", [])
32
33 @command.command("replay.client")
34 def start_replay(self, flows: typing.Sequence[flow.Flow]) -> None:
35 """
36 Replay requests from flows.
37 """
38 self.flows = list(flows)
39 ctx.log.alert("Replaying %s flows." % len(self.flows))
40 ctx.master.addons.trigger("update", [])
41
42 @command.command("replay.client.file")
43 def load_file(self, path: mitmproxy.types.Path) -> None:
44 try:
45 flows = io.read_flows_from_paths([path])
46 except exceptions.FlowReadException as e:
47 raise exceptions.CommandError(str(e))
48 ctx.log.alert("Replaying %s flows." % len(self.flows))
49 self.flows = flows
50 ctx.master.addons.trigger("update", [])
51
52 def configure(self, updated):
53 if not self.configured and ctx.options.client_replay:
54 self.configured = True
55 ctx.log.info("Client Replay: {}".format(ctx.options.client_replay))
56 try:
57 flows = io.read_flows_from_paths(ctx.options.client_replay)
58 except exceptions.FlowReadException as e:
59 raise exceptions.OptionsError(str(e))
60 self.start_replay(flows)
61
62 def tick(self):
63 current_is_done = self.current_thread and not self.current_thread.is_alive()
64 can_start_new = not self.current_thread or current_is_done
65 will_start_new = can_start_new and self.flows
66
67 if current_is_done:
68 self.current_thread = None
69 ctx.master.addons.trigger("update", [])
70 if will_start_new:
71 f = self.flows.pop(0)
72 self.current_thread = ctx.master.replay_request(f)
73 ctx.master.addons.trigger("update", [f])
74 if current_is_done and not will_start_new:
75 ctx.master.addons.trigger("processing_complete")
76
[end of mitmproxy/addons/clientplayback.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/addons/clientplayback.py b/mitmproxy/addons/clientplayback.py
--- a/mitmproxy/addons/clientplayback.py
+++ b/mitmproxy/addons/clientplayback.py
@@ -35,6 +35,9 @@
"""
Replay requests from flows.
"""
+ for f in flows:
+ if f.live:
+ raise exceptions.CommandError("Can't replay live flow.")
self.flows = list(flows)
ctx.log.alert("Replaying %s flows." % len(self.flows))
ctx.master.addons.trigger("update", [])
| {"golden_diff": "diff --git a/mitmproxy/addons/clientplayback.py b/mitmproxy/addons/clientplayback.py\n--- a/mitmproxy/addons/clientplayback.py\n+++ b/mitmproxy/addons/clientplayback.py\n@@ -35,6 +35,9 @@\n \"\"\"\n Replay requests from flows.\n \"\"\"\n+ for f in flows:\n+ if f.live:\n+ raise exceptions.CommandError(\"Can't replay live flow.\")\n self.flows = list(flows)\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n ctx.master.addons.trigger(\"update\", [])\n", "issue": "Traceback appears in Status Bar, when trying to replay live flow\n##### Steps to reproduce the problem:\r\n\r\n1. Run **pathod** : `pathod -a \"/=200:p0,10\"`\r\n2. Run mitmproxy.\r\n3. Send _get request_ to pathod through mitmproxy using **pathoc**: \r\n`pathoc -c localhost:9999 localhost:8080 'get:/'`\r\n4. Try to replay the corresponding live flow in mitmproxy by pressing `r`.\r\n\r\nI am seeing:\r\n\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\nThis issue is relevant for the situations, when server didn't have time to send a response yet, but a user tries to replay the corresponding flow.\r\nI also faced this issue, when trying to replay `mitm.it` flow from onboardingapp.\r\n\r\n\r\n##### System information\r\n\r\nMitmproxy: 3.0.0.dev1101 (commit d9d4d15) binary\r\nPython: 3.5.2\r\nOpenSSL: OpenSSL 1.1.0g 2 Nov 2017\r\nPlatform: Linux-4.4.0-104-generic-x86_64-with-debian-stretch-sid\r\n\r\n \n", "before_files": [{"content": "from mitmproxy import exceptions\nfrom mitmproxy import ctx\nfrom mitmproxy import io\nfrom mitmproxy import flow\nfrom mitmproxy import command\nimport mitmproxy.types\n\nimport typing\n\n\nclass ClientPlayback:\n def __init__(self):\n self.flows = [] # type: typing.List[flow.Flow]\n self.current_thread = None\n self.configured = False\n\n def count(self) -> int:\n if self.current_thread:\n current = 1\n else:\n current = 0\n return current + len(self.flows)\n\n @command.command(\"replay.client.stop\")\n def stop_replay(self) -> None:\n \"\"\"\n Stop client replay.\n \"\"\"\n self.flows = []\n ctx.log.alert(\"Client replay stopped.\")\n ctx.master.addons.trigger(\"update\", [])\n\n @command.command(\"replay.client\")\n def start_replay(self, flows: typing.Sequence[flow.Flow]) -> None:\n \"\"\"\n Replay requests from flows.\n \"\"\"\n self.flows = list(flows)\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n ctx.master.addons.trigger(\"update\", [])\n\n @command.command(\"replay.client.file\")\n def load_file(self, path: mitmproxy.types.Path) -> None:\n try:\n flows = io.read_flows_from_paths([path])\n except exceptions.FlowReadException as e:\n raise exceptions.CommandError(str(e))\n ctx.log.alert(\"Replaying %s flows.\" % len(self.flows))\n self.flows = flows\n ctx.master.addons.trigger(\"update\", [])\n\n def configure(self, updated):\n if not self.configured and ctx.options.client_replay:\n self.configured = True\n ctx.log.info(\"Client Replay: {}\".format(ctx.options.client_replay))\n try:\n flows = io.read_flows_from_paths(ctx.options.client_replay)\n except exceptions.FlowReadException as e:\n raise exceptions.OptionsError(str(e))\n self.start_replay(flows)\n\n def tick(self):\n current_is_done = self.current_thread and not self.current_thread.is_alive()\n can_start_new = not self.current_thread or current_is_done\n will_start_new = can_start_new and self.flows\n\n if current_is_done:\n self.current_thread = None\n ctx.master.addons.trigger(\"update\", [])\n if will_start_new:\n f = self.flows.pop(0)\n self.current_thread = ctx.master.replay_request(f)\n ctx.master.addons.trigger(\"update\", [f])\n if current_is_done and not will_start_new:\n ctx.master.addons.trigger(\"processing_complete\")\n", "path": "mitmproxy/addons/clientplayback.py"}]} | 1,613 | 132 |
gh_patches_debug_20431 | rasdani/github-patches | git_diff | scikit-hep__awkward-2373 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`ak.unzip` touches all contents whilst typetracing
### Version of Awkward Array
HEAD, after #2370
### Description and code to reproduce
Even after applying #2370 (which fixes a similar bug), the following code:
```python
import json
import awkward as ak
def delta_r2(a, b):
return (a.eta - b.eta) ** 2 + (a.phi - b.phi) ** 2
fromjson = {
"class": "RecordArray",
"fields": ["muon", "jet"],
"contents": [
{
"class": "ListOffsetArray",
"offsets": "i64",
"content": {
"class": "RecordArray",
"fields": ["pt", "eta", "phi", "crossref"],
"contents": [
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "muon_pt!",
},
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "muon_eta!",
},
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "muon_phi!",
},
{
"class": "ListOffsetArray",
"offsets": "i64",
"content": {
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "muon_crossref_content!",
},
"parameters": {},
"form_key": "muon_crossref_index!",
},
],
"parameters": {},
"form_key": "muon_record!",
},
"parameters": {},
"form_key": "muon_list!",
},
{
"class": "ListOffsetArray",
"offsets": "i64",
"content": {
"class": "RecordArray",
"fields": ["pt", "eta", "phi", "crossref", "thing1"],
"contents": [
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "jet_pt!",
},
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "jet_eta!",
},
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "jet_phi!",
},
{
"class": "ListOffsetArray",
"offsets": "i64",
"content": {
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "jet_crossref_content!",
},
"parameters": {},
"form_key": "jet_crossref_index!",
},
{
"class": "NumpyArray",
"primitive": "int64",
"inner_shape": [],
"parameters": {},
"form_key": "jet_thing1!",
},
],
"parameters": {},
"form_key": "jet_record!",
},
"parameters": {},
"form_key": "jet_list!",
},
],
"parameters": {},
"form_key": "outer!",
}
form = ak.forms.from_json(json.dumps(fromjson))
ttlayout, report = ak._nplikes.typetracer.typetracer_with_report(form)
ttarray = ak.Array(ttlayout)
a, b = ak.unzip(ak.cartesian([ttarray.muon, ttarray.jet], axis=1, nested=True))
print("ab>>>", report.data_touched, "\n")
mval = delta_r2(a, b)
print("dr>>>>", report.data_touched, "\n")
mmin = ak.argmin(mval, axis=2)
print("mmin>>", report.data_touched, "\n")
ak.firsts(b[mmin], axis=2).pt
print("pt>>>>", report.data_touched, "\n")
```
produces the following output:
```
ab>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!']
dr>>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!']
mmin>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!']
pt>>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!', 'jet_pt!', 'jet_crossref_content!', 'jet_thing1!']
```
It's a little mysterious (but not a major pain point) that the "crossref indexes" are touched by the `ak.cartesian` step, which ought to treat the muons and jets as opaque objects. A little mysterious, and maybe related.
But the real problem is represented by the "jet thing" in the final output. The "jet thing" should never have been accessed by any of these operations: `delta_r2`, `ak.argmin`, the `b[mmin]` slice, or `ak.firsts`. It's also a stand-in for dozens of large fields, so it's a real pain point that needs to be addressed.
</issue>
<code>
[start of src/awkward/operations/ak_unzip.py]
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2 __all__ = ("unzip",)
3 import awkward as ak
4 from awkward._behavior import behavior_of
5 from awkward._layout import wrap_layout
6 from awkward._nplikes.numpylike import NumpyMetadata
7
8 np = NumpyMetadata.instance()
9
10
11 def unzip(array, *, highlevel=True, behavior=None):
12 """
13 Args:
14 array: Array-like data (anything #ak.to_layout recognizes).
15 highlevel (bool): If True, return an #ak.Array; otherwise, return
16 a low-level #ak.contents.Content subclass.
17 behavior (None or dict): Custom #ak.behavior for the output array, if
18 high-level.
19
20 If the `array` contains tuples or records, this operation splits them
21 into a Python tuple of arrays, one for each field.
22
23 If the `array` does not contain tuples or records, the single `array`
24 is placed in a length 1 Python tuple.
25
26 For example,
27
28 >>> array = ak.Array([{"x": 1.1, "y": [1]},
29 ... {"x": 2.2, "y": [2, 2]},
30 ... {"x": 3.3, "y": [3, 3, 3]}])
31 >>> x, y = ak.unzip(array)
32 >>> x
33 <Array [1.1, 2.2, 3.3] type='3 * float64'>
34 >>> y
35 <Array [[1], [2, 2], [3, 3, 3]] type='3 * var * int64'>
36 """
37 with ak._errors.OperationErrorContext(
38 "ak.unzip",
39 {"array": array, "highlevel": highlevel, "behavior": behavior},
40 ):
41 return _impl(array, highlevel, behavior)
42
43
44 def _impl(array, highlevel, behavior):
45 behavior = behavior_of(array, behavior=behavior)
46 layout = ak.operations.to_layout(array, allow_record=True, allow_other=False)
47 fields = ak.operations.fields(layout)
48
49 def check_for_union(layout, **kwargs):
50 if isinstance(layout, (ak.contents.RecordArray, ak.Record)):
51 pass # don't descend into nested records
52
53 elif isinstance(layout, ak.contents.UnionArray):
54 for content in layout.contents:
55 if set(ak.operations.fields(content)) != set(fields):
56 raise ak._errors.wrap_error(
57 ValueError("union of different sets of fields, cannot ak.unzip")
58 )
59
60 elif hasattr(layout, "content"):
61 check_for_union(layout.content)
62
63 ak._do.recursively_apply(layout, check_for_union, behavior, return_array=False)
64
65 if len(fields) == 0:
66 return (wrap_layout(layout, behavior, highlevel, allow_other=True),)
67 else:
68 return tuple(
69 wrap_layout(layout[n], behavior, highlevel, allow_other=True)
70 for n in fields
71 )
72
[end of src/awkward/operations/ak_unzip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/awkward/operations/ak_unzip.py b/src/awkward/operations/ak_unzip.py
--- a/src/awkward/operations/ak_unzip.py
+++ b/src/awkward/operations/ak_unzip.py
@@ -48,18 +48,15 @@
def check_for_union(layout, **kwargs):
if isinstance(layout, (ak.contents.RecordArray, ak.Record)):
- pass # don't descend into nested records
+ return layout # don't descend into nested records
- elif isinstance(layout, ak.contents.UnionArray):
+ elif layout.is_union:
for content in layout.contents:
if set(ak.operations.fields(content)) != set(fields):
raise ak._errors.wrap_error(
ValueError("union of different sets of fields, cannot ak.unzip")
)
- elif hasattr(layout, "content"):
- check_for_union(layout.content)
-
ak._do.recursively_apply(layout, check_for_union, behavior, return_array=False)
if len(fields) == 0:
| {"golden_diff": "diff --git a/src/awkward/operations/ak_unzip.py b/src/awkward/operations/ak_unzip.py\n--- a/src/awkward/operations/ak_unzip.py\n+++ b/src/awkward/operations/ak_unzip.py\n@@ -48,18 +48,15 @@\n \n def check_for_union(layout, **kwargs):\n if isinstance(layout, (ak.contents.RecordArray, ak.Record)):\n- pass # don't descend into nested records\n+ return layout # don't descend into nested records\n \n- elif isinstance(layout, ak.contents.UnionArray):\n+ elif layout.is_union:\n for content in layout.contents:\n if set(ak.operations.fields(content)) != set(fields):\n raise ak._errors.wrap_error(\n ValueError(\"union of different sets of fields, cannot ak.unzip\")\n )\n \n- elif hasattr(layout, \"content\"):\n- check_for_union(layout.content)\n-\n ak._do.recursively_apply(layout, check_for_union, behavior, return_array=False)\n \n if len(fields) == 0:\n", "issue": "`ak.unzip` touches all contents whilst typetracing\n### Version of Awkward Array\n\nHEAD, after #2370\n\n### Description and code to reproduce\n\nEven after applying #2370 (which fixes a similar bug), the following code:\r\n\r\n```python\r\nimport json\r\nimport awkward as ak\r\n\r\n\r\ndef delta_r2(a, b):\r\n return (a.eta - b.eta) ** 2 + (a.phi - b.phi) ** 2\r\n\r\n\r\nfromjson = {\r\n \"class\": \"RecordArray\",\r\n \"fields\": [\"muon\", \"jet\"],\r\n \"contents\": [\r\n {\r\n \"class\": \"ListOffsetArray\",\r\n \"offsets\": \"i64\",\r\n \"content\": {\r\n \"class\": \"RecordArray\",\r\n \"fields\": [\"pt\", \"eta\", \"phi\", \"crossref\"],\r\n \"contents\": [\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"muon_pt!\",\r\n },\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"muon_eta!\",\r\n },\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"muon_phi!\",\r\n },\r\n {\r\n \"class\": \"ListOffsetArray\",\r\n \"offsets\": \"i64\",\r\n \"content\": {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"muon_crossref_content!\",\r\n },\r\n \"parameters\": {},\r\n \"form_key\": \"muon_crossref_index!\",\r\n },\r\n ],\r\n \"parameters\": {},\r\n \"form_key\": \"muon_record!\",\r\n },\r\n \"parameters\": {},\r\n \"form_key\": \"muon_list!\",\r\n },\r\n {\r\n \"class\": \"ListOffsetArray\",\r\n \"offsets\": \"i64\",\r\n \"content\": {\r\n \"class\": \"RecordArray\",\r\n \"fields\": [\"pt\", \"eta\", \"phi\", \"crossref\", \"thing1\"],\r\n \"contents\": [\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_pt!\",\r\n },\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_eta!\",\r\n },\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_phi!\",\r\n },\r\n {\r\n \"class\": \"ListOffsetArray\",\r\n \"offsets\": \"i64\",\r\n \"content\": {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_crossref_content!\",\r\n },\r\n \"parameters\": {},\r\n \"form_key\": \"jet_crossref_index!\",\r\n },\r\n {\r\n \"class\": \"NumpyArray\",\r\n \"primitive\": \"int64\",\r\n \"inner_shape\": [],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_thing1!\",\r\n },\r\n ],\r\n \"parameters\": {},\r\n \"form_key\": \"jet_record!\",\r\n },\r\n \"parameters\": {},\r\n \"form_key\": \"jet_list!\",\r\n },\r\n ],\r\n \"parameters\": {},\r\n \"form_key\": \"outer!\",\r\n}\r\n\r\nform = ak.forms.from_json(json.dumps(fromjson))\r\n\r\nttlayout, report = ak._nplikes.typetracer.typetracer_with_report(form)\r\n\r\nttarray = ak.Array(ttlayout)\r\n\r\na, b = ak.unzip(ak.cartesian([ttarray.muon, ttarray.jet], axis=1, nested=True))\r\n\r\nprint(\"ab>>>\", report.data_touched, \"\\n\")\r\n\r\nmval = delta_r2(a, b)\r\n\r\nprint(\"dr>>>>\", report.data_touched, \"\\n\")\r\n\r\nmmin = ak.argmin(mval, axis=2)\r\n\r\nprint(\"mmin>>\", report.data_touched, \"\\n\")\r\n\r\nak.firsts(b[mmin], axis=2).pt\r\n\r\nprint(\"pt>>>>\", report.data_touched, \"\\n\")\r\n```\r\n\r\nproduces the following output:\r\n\r\n```\r\nab>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!'] \r\n\r\ndr>>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!'] \r\n\r\nmmin>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!'] \r\n\r\npt>>>> ['muon_list!', 'jet_list!', 'muon_crossref_index!', 'jet_crossref_index!', 'muon_eta!', 'jet_eta!', 'muon_phi!', 'jet_phi!', 'jet_pt!', 'jet_crossref_content!', 'jet_thing1!'] \r\n```\r\n\r\nIt's a little mysterious (but not a major pain point) that the \"crossref indexes\" are touched by the `ak.cartesian` step, which ought to treat the muons and jets as opaque objects. A little mysterious, and maybe related.\r\n\r\nBut the real problem is represented by the \"jet thing\" in the final output. The \"jet thing\" should never have been accessed by any of these operations: `delta_r2`, `ak.argmin`, the `b[mmin]` slice, or `ak.firsts`. It's also a stand-in for dozens of large fields, so it's a real pain point that needs to be addressed.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n__all__ = (\"unzip\",)\nimport awkward as ak\nfrom awkward._behavior import behavior_of\nfrom awkward._layout import wrap_layout\nfrom awkward._nplikes.numpylike import NumpyMetadata\n\nnp = NumpyMetadata.instance()\n\n\ndef unzip(array, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n If the `array` contains tuples or records, this operation splits them\n into a Python tuple of arrays, one for each field.\n\n If the `array` does not contain tuples or records, the single `array`\n is placed in a length 1 Python tuple.\n\n For example,\n\n >>> array = ak.Array([{\"x\": 1.1, \"y\": [1]},\n ... {\"x\": 2.2, \"y\": [2, 2]},\n ... {\"x\": 3.3, \"y\": [3, 3, 3]}])\n >>> x, y = ak.unzip(array)\n >>> x\n <Array [1.1, 2.2, 3.3] type='3 * float64'>\n >>> y\n <Array [[1], [2, 2], [3, 3, 3]] type='3 * var * int64'>\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.unzip\",\n {\"array\": array, \"highlevel\": highlevel, \"behavior\": behavior},\n ):\n return _impl(array, highlevel, behavior)\n\n\ndef _impl(array, highlevel, behavior):\n behavior = behavior_of(array, behavior=behavior)\n layout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n fields = ak.operations.fields(layout)\n\n def check_for_union(layout, **kwargs):\n if isinstance(layout, (ak.contents.RecordArray, ak.Record)):\n pass # don't descend into nested records\n\n elif isinstance(layout, ak.contents.UnionArray):\n for content in layout.contents:\n if set(ak.operations.fields(content)) != set(fields):\n raise ak._errors.wrap_error(\n ValueError(\"union of different sets of fields, cannot ak.unzip\")\n )\n\n elif hasattr(layout, \"content\"):\n check_for_union(layout.content)\n\n ak._do.recursively_apply(layout, check_for_union, behavior, return_array=False)\n\n if len(fields) == 0:\n return (wrap_layout(layout, behavior, highlevel, allow_other=True),)\n else:\n return tuple(\n wrap_layout(layout[n], behavior, highlevel, allow_other=True)\n for n in fields\n )\n", "path": "src/awkward/operations/ak_unzip.py"}]} | 2,648 | 232 |
gh_patches_debug_13140 | rasdani/github-patches | git_diff | google__flax-2553 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DynamicScale goes to infinity
### System information
- OS Platform and Distribution: Linux 5.15.74-3-MANJARO
- Flax, jax, jaxlib versions: flax 0.6.0, jax 0.3.21, jaxlib 0.3.20+cuda11.cudnn82
- Python version: 3.10.4
- GPU/TPU model and memory: NVIDIA RTX 2060 Super 8GB
- CUDA version (if applicable): cuda11, cudnn82
### Problem you have encountered:
I adapted the gradient scaling code of the imagenet example in order to use fp16 training for my model. During training, the `scale` parameter of `DynamicScaling` became `inf`, because it kept doubling every 2000 steps without the gradients ever becoming `nan`. Once the `scale` hit `inf` it never went down, since dividing `inf` by any number still returns `inf`.
### What you expected to happen:
Either have a `maximum_scale` parameter or add a check to see if `scale` is finite.
I think it's a one line change in the `DynamicScale` code, so I can make a pull request if you agree.
### Steps to reproduce:
```py
import jax.numpy as jnp
from flax.training import dynamic_scale
ds = dynamic_scale.DynamicScale(growth_factor=10.0, growth_interval=1)
x = jnp.float32(1)
for _ in range(70):
ds, is_fin, aux, grad = ds.value_and_grad(lambda p: p**2)(x)
x = x - 0.1 * grad
print(is_fin, ds.scale, x, grad)
```
### Code:
Could check here ([dynamic scale code](https://github.com/google/flax/blob/6b80cbb239b07e370fd0af6655b39ef40de061c0/flax/training/dynamic_scale.py#L139)) if `fin_scale` is finite.
</issue>
<code>
[start of flax/training/dynamic_scale.py]
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Dynamic loss scaling for mixed precision gradients.
16 """
17
18 import functools
19 from typing import Any, Callable, NamedTuple, Optional, Sequence, Union
20
21 from .. import struct
22
23 import jax
24 from jax import lax
25 import jax.numpy as jnp
26
27
28
29 Array = Any
30
31
32 class DynamicScaleResult(NamedTuple):
33 dynamic_scale: 'DynamicScale'
34 finite: Array
35 aux: Any
36 grad: Any
37
38
39 class DynamicScale(struct.PyTreeNode):
40 """Dynamic loss scaling for mixed precision gradients.
41
42 For many models gradient computations in float16 will result in numerical
43 issues because small/large gradients being flushed to zero/infinity.
44 Dynamic loss scaling is an algorithm that aims to find the largest scalar
45 multiple for which the gradient does not overflow. This way the risk of
46 underflow is minimized.
47
48 the `value_and_grad` method mimicks `jax.value_and_grad`. Beside the loss
49 and gradients it also ouputs and updated `DynamicScale` instance with the
50 current loss scale factor. This method also returns a boolean value indicating
51 whether the gradients are finite.
52
53 Example::
54
55 from flax.training.dynamic_scale import DynamicScale
56
57 def loss_fn(p):
58 return jnp.asarray(p, jnp.float16) ** 2
59 p = jnp.array(1., jnp.float32)
60
61 dyn_scale = DynamicScale(growth_interval=10)
62 compute_grad = jax.jit(lambda ds, p: ds.value_and_grad(loss_fn)(p))
63 for _ in range(100):
64 dyn_scale, is_fin, loss, grad = compute_grad(dyn_scale, p)
65 p += jnp.where(is_fin, 0.01 * grad, 0.)
66 print(loss)
67
68 Jax currently cannot execute conditionals efficiently on GPUs therefore we
69 selectifly ignore the gradient update using `jax.numpy.where` in case of
70 non-finite gradients.
71
72 Attributes:
73 growth_factor: how much to grow the scalar after a period of finite
74 gradients (default: 2.).
75 backoff_factor: how much to shrink the scalar after a non-finite gradient
76 (default: 0.5).
77 growth_interval: after how many steps of finite gradients the scale should
78 be increased (default: 2000).
79 fin_steps: indicates how many gradient steps in a row have been finite.
80 scale: the current scale by which the loss is multiplied.
81 """
82 growth_factor: float = struct.field(pytree_node=False, default=2.0)
83 backoff_factor: float = struct.field(pytree_node=False, default=0.5)
84 growth_interval: int = struct.field(pytree_node=False, default=2000)
85 fin_steps: Array = 0
86 scale: Array = 65536.0
87
88 def value_and_grad(self, fun: Callable[..., Any],
89 argnums: Union[int, Sequence[int]] = 0,
90 has_aux: bool = False,
91 axis_name: Optional[str] = None,
92 ) -> Callable[..., DynamicScaleResult]:
93 """Wrapper around `jax.value_and_grad`.
94
95 Args:
96 fun: Function to be differentiated. Its arguments at positions specified
97 by ``argnums`` should be arrays, scalars, or standard Python containers.
98 It should return a scalar (which includes arrays with shape ``()``
99 but not arrays with shape ``(1,)`` etc.)
100 argnums: Optional, integer or sequence of integers. Specifies which
101 positional argument(s) to differentiate with respect to (default 0).
102 has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where
103 the first element is considered the output of the mathematical function
104 to be differentiated and the second element is auxiliary data.
105 Default False.
106 axis_name: If an axis is given the gradients will be averaged across
107 replicas (default: None).
108 Returns:
109 A function that takes the same arguments as `fun` and
110 returns a DynamicScaleResult
111 """
112 @functools.wraps(fun)
113 def loss_wrapper(*args):
114 aux = fun(*args)
115 if has_aux:
116 return (self.scale * aux[0], aux[1])
117 else:
118 return self.scale * aux
119
120 grad_fn = jax.value_and_grad(loss_wrapper, argnums, has_aux)
121 def grad_fn_wrapper(*args):
122 aux, grad = grad_fn(*args)
123 aux = (aux[0] / self.scale, aux[1]) if has_aux else aux / self.scale
124
125 grad = jax.tree_util.tree_map(
126 lambda g: jnp.asarray(g, jnp.float32) / self.scale, grad)
127 if axis_name is not None:
128 grad = lax.pmean(grad, axis_name)
129
130 finite = jnp.array(True)
131 for g in jax.tree_util.tree_leaves(grad):
132 finite &= jnp.all(lax.is_finite(g))
133
134 grow = self.fin_steps == self.growth_interval
135 fin_scale = jnp.where(grow & finite,
136 self.scale * self.growth_factor,
137 self.scale)
138 inf_scale = self.scale * self.backoff_factor
139 new_scale = jnp.where(finite, fin_scale, inf_scale)
140 new_fin_steps = jnp.where(grow | (~finite), 0, self.fin_steps + 1)
141
142 new_self = self.replace(fin_steps=new_fin_steps, scale=new_scale)
143 return DynamicScaleResult(new_self, finite, aux, grad)
144 return grad_fn_wrapper
145
[end of flax/training/dynamic_scale.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/training/dynamic_scale.py b/flax/training/dynamic_scale.py
--- a/flax/training/dynamic_scale.py
+++ b/flax/training/dynamic_scale.py
@@ -132,9 +132,10 @@
finite &= jnp.all(lax.is_finite(g))
grow = self.fin_steps == self.growth_interval
- fin_scale = jnp.where(grow & finite,
- self.scale * self.growth_factor,
- self.scale)
+ fin_scale = jnp.where(
+ grow & finite,
+ jnp.minimum(self.scale * self.growth_factor, jnp.finfo(jnp.float32).max),
+ self.scale)
inf_scale = self.scale * self.backoff_factor
new_scale = jnp.where(finite, fin_scale, inf_scale)
new_fin_steps = jnp.where(grow | (~finite), 0, self.fin_steps + 1)
| {"golden_diff": "diff --git a/flax/training/dynamic_scale.py b/flax/training/dynamic_scale.py\n--- a/flax/training/dynamic_scale.py\n+++ b/flax/training/dynamic_scale.py\n@@ -132,9 +132,10 @@\n finite &= jnp.all(lax.is_finite(g))\n \n grow = self.fin_steps == self.growth_interval\n- fin_scale = jnp.where(grow & finite,\n- self.scale * self.growth_factor,\n- self.scale)\n+ fin_scale = jnp.where(\n+ grow & finite,\n+ jnp.minimum(self.scale * self.growth_factor, jnp.finfo(jnp.float32).max),\n+ self.scale)\n inf_scale = self.scale * self.backoff_factor\n new_scale = jnp.where(finite, fin_scale, inf_scale)\n new_fin_steps = jnp.where(grow | (~finite), 0, self.fin_steps + 1)\n", "issue": "DynamicScale goes to infinity\n### System information\r\n- OS Platform and Distribution: Linux 5.15.74-3-MANJARO\r\n- Flax, jax, jaxlib versions: flax 0.6.0, jax 0.3.21, jaxlib 0.3.20+cuda11.cudnn82\r\n- Python version: 3.10.4\r\n- GPU/TPU model and memory: NVIDIA RTX 2060 Super 8GB\r\n- CUDA version (if applicable): cuda11, cudnn82\r\n\r\n\r\n### Problem you have encountered:\r\nI adapted the gradient scaling code of the imagenet example in order to use fp16 training for my model. During training, the `scale` parameter of `DynamicScaling` became `inf`, because it kept doubling every 2000 steps without the gradients ever becoming `nan`. Once the `scale` hit `inf` it never went down, since dividing `inf` by any number still returns `inf`.\r\n\r\n### What you expected to happen:\r\nEither have a `maximum_scale` parameter or add a check to see if `scale` is finite.\r\nI think it's a one line change in the `DynamicScale` code, so I can make a pull request if you agree.\r\n\r\n### Steps to reproduce:\r\n```py\r\nimport jax.numpy as jnp\r\nfrom flax.training import dynamic_scale\r\n\r\nds = dynamic_scale.DynamicScale(growth_factor=10.0, growth_interval=1)\r\nx = jnp.float32(1)\r\n\r\nfor _ in range(70):\r\n ds, is_fin, aux, grad = ds.value_and_grad(lambda p: p**2)(x)\r\n x = x - 0.1 * grad\r\n print(is_fin, ds.scale, x, grad)\r\n```\r\n\r\n### Code:\r\nCould check here ([dynamic scale code](https://github.com/google/flax/blob/6b80cbb239b07e370fd0af6655b39ef40de061c0/flax/training/dynamic_scale.py#L139)) if `fin_scale` is finite.\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Dynamic loss scaling for mixed precision gradients.\n\"\"\"\n\nimport functools\nfrom typing import Any, Callable, NamedTuple, Optional, Sequence, Union\n\nfrom .. import struct\n\nimport jax\nfrom jax import lax\nimport jax.numpy as jnp\n\n\n\nArray = Any\n\n\nclass DynamicScaleResult(NamedTuple):\n dynamic_scale: 'DynamicScale'\n finite: Array\n aux: Any\n grad: Any\n\n\nclass DynamicScale(struct.PyTreeNode):\n \"\"\"Dynamic loss scaling for mixed precision gradients.\n\n For many models gradient computations in float16 will result in numerical\n issues because small/large gradients being flushed to zero/infinity.\n Dynamic loss scaling is an algorithm that aims to find the largest scalar\n multiple for which the gradient does not overflow. This way the risk of\n underflow is minimized.\n\n the `value_and_grad` method mimicks `jax.value_and_grad`. Beside the loss\n and gradients it also ouputs and updated `DynamicScale` instance with the\n current loss scale factor. This method also returns a boolean value indicating\n whether the gradients are finite.\n\n Example::\n\n from flax.training.dynamic_scale import DynamicScale\n\n def loss_fn(p):\n return jnp.asarray(p, jnp.float16) ** 2\n p = jnp.array(1., jnp.float32)\n\n dyn_scale = DynamicScale(growth_interval=10)\n compute_grad = jax.jit(lambda ds, p: ds.value_and_grad(loss_fn)(p))\n for _ in range(100):\n dyn_scale, is_fin, loss, grad = compute_grad(dyn_scale, p)\n p += jnp.where(is_fin, 0.01 * grad, 0.)\n print(loss)\n\n Jax currently cannot execute conditionals efficiently on GPUs therefore we\n selectifly ignore the gradient update using `jax.numpy.where` in case of\n non-finite gradients.\n\n Attributes:\n growth_factor: how much to grow the scalar after a period of finite\n gradients (default: 2.).\n backoff_factor: how much to shrink the scalar after a non-finite gradient\n (default: 0.5).\n growth_interval: after how many steps of finite gradients the scale should\n be increased (default: 2000).\n fin_steps: indicates how many gradient steps in a row have been finite.\n scale: the current scale by which the loss is multiplied.\n \"\"\"\n growth_factor: float = struct.field(pytree_node=False, default=2.0)\n backoff_factor: float = struct.field(pytree_node=False, default=0.5)\n growth_interval: int = struct.field(pytree_node=False, default=2000)\n fin_steps: Array = 0\n scale: Array = 65536.0\n\n def value_and_grad(self, fun: Callable[..., Any],\n argnums: Union[int, Sequence[int]] = 0,\n has_aux: bool = False,\n axis_name: Optional[str] = None,\n ) -> Callable[..., DynamicScaleResult]:\n \"\"\"Wrapper around `jax.value_and_grad`.\n\n Args:\n fun: Function to be differentiated. Its arguments at positions specified\n by ``argnums`` should be arrays, scalars, or standard Python containers.\n It should return a scalar (which includes arrays with shape ``()``\n but not arrays with shape ``(1,)`` etc.)\n argnums: Optional, integer or sequence of integers. Specifies which\n positional argument(s) to differentiate with respect to (default 0).\n has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where\n the first element is considered the output of the mathematical function\n to be differentiated and the second element is auxiliary data.\n Default False.\n axis_name: If an axis is given the gradients will be averaged across\n replicas (default: None).\n Returns:\n A function that takes the same arguments as `fun` and\n returns a DynamicScaleResult\n \"\"\"\n @functools.wraps(fun)\n def loss_wrapper(*args):\n aux = fun(*args)\n if has_aux:\n return (self.scale * aux[0], aux[1])\n else:\n return self.scale * aux\n\n grad_fn = jax.value_and_grad(loss_wrapper, argnums, has_aux)\n def grad_fn_wrapper(*args):\n aux, grad = grad_fn(*args)\n aux = (aux[0] / self.scale, aux[1]) if has_aux else aux / self.scale\n\n grad = jax.tree_util.tree_map(\n lambda g: jnp.asarray(g, jnp.float32) / self.scale, grad)\n if axis_name is not None:\n grad = lax.pmean(grad, axis_name)\n\n finite = jnp.array(True)\n for g in jax.tree_util.tree_leaves(grad):\n finite &= jnp.all(lax.is_finite(g))\n\n grow = self.fin_steps == self.growth_interval\n fin_scale = jnp.where(grow & finite,\n self.scale * self.growth_factor,\n self.scale)\n inf_scale = self.scale * self.backoff_factor\n new_scale = jnp.where(finite, fin_scale, inf_scale)\n new_fin_steps = jnp.where(grow | (~finite), 0, self.fin_steps + 1)\n\n new_self = self.replace(fin_steps=new_fin_steps, scale=new_scale)\n return DynamicScaleResult(new_self, finite, aux, grad)\n return grad_fn_wrapper\n", "path": "flax/training/dynamic_scale.py"}]} | 2,674 | 210 |
gh_patches_debug_9395 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-741 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix versioning on dependencies
Fix dependency package versions to be consistent with the rest of the libraries
</issue>
<code>
[start of libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 REQUIRES = [
8 "applicationinsights>=0.11.9",
9 "botbuilder-schema>=4.4.0b1",
10 "botframework-connector>=4.4.0b1",
11 "botbuilder-core>=4.4.0b1",
12 "botbuilder-applicationinsights>=4.4.0b1",
13 ]
14 TESTS_REQUIRES = [
15 "aiounittest==1.3.0",
16 "aiohttp==3.5.4",
17 ]
18
19 root = os.path.abspath(os.path.dirname(__file__))
20
21 with open(
22 os.path.join(
23 root, "botbuilder", "integration", "applicationinsights", "aiohttp", "about.py"
24 )
25 ) as f:
26 package_info = {}
27 info = f.read()
28 exec(info, package_info)
29
30 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
31 long_description = f.read()
32
33 setup(
34 name=package_info["__title__"],
35 version=package_info["__version__"],
36 url=package_info["__uri__"],
37 author=package_info["__author__"],
38 description=package_info["__description__"],
39 keywords=[
40 "BotBuilderApplicationInsights",
41 "bots",
42 "ai",
43 "botframework",
44 "botbuilder",
45 "aiohttp",
46 ],
47 long_description=long_description,
48 long_description_content_type="text/x-rst",
49 license=package_info["__license__"],
50 packages=["botbuilder.integration.applicationinsights.aiohttp"],
51 install_requires=REQUIRES + TESTS_REQUIRES,
52 tests_require=TESTS_REQUIRES,
53 include_package_data=True,
54 classifiers=[
55 "Programming Language :: Python :: 3.7",
56 "Intended Audience :: Developers",
57 "License :: OSI Approved :: MIT License",
58 "Operating System :: OS Independent",
59 "Development Status :: 5 - Production/Stable",
60 "Topic :: Scientific/Engineering :: Artificial Intelligence",
61 ],
62 )
63
[end of libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py
--- a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py
+++ b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py
@@ -6,14 +6,14 @@
REQUIRES = [
"applicationinsights>=0.11.9",
- "botbuilder-schema>=4.4.0b1",
- "botframework-connector>=4.4.0b1",
- "botbuilder-core>=4.4.0b1",
- "botbuilder-applicationinsights>=4.4.0b1",
+ "aiohttp==3.6.2",
+ "botbuilder-schema>=4.7.1",
+ "botframework-connector>=4.7.1",
+ "botbuilder-core>=4.7.1",
+ "botbuilder-applicationinsights>=4.7.1",
]
TESTS_REQUIRES = [
"aiounittest==1.3.0",
- "aiohttp==3.5.4",
]
root = os.path.abspath(os.path.dirname(__file__))
| {"golden_diff": "diff --git a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n--- a/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n+++ b/libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py\n@@ -6,14 +6,14 @@\n \n REQUIRES = [\n \"applicationinsights>=0.11.9\",\n- \"botbuilder-schema>=4.4.0b1\",\n- \"botframework-connector>=4.4.0b1\",\n- \"botbuilder-core>=4.4.0b1\",\n- \"botbuilder-applicationinsights>=4.4.0b1\",\n+ \"aiohttp==3.6.2\",\n+ \"botbuilder-schema>=4.7.1\",\n+ \"botframework-connector>=4.7.1\",\n+ \"botbuilder-core>=4.7.1\",\n+ \"botbuilder-applicationinsights>=4.7.1\",\n ]\n TESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n- \"aiohttp==3.5.4\",\n ]\n \n root = os.path.abspath(os.path.dirname(__file__))\n", "issue": "Fix versioning on dependencies\nFix dependency package versions to be consistent with the rest of the libraries\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nREQUIRES = [\n \"applicationinsights>=0.11.9\",\n \"botbuilder-schema>=4.4.0b1\",\n \"botframework-connector>=4.4.0b1\",\n \"botbuilder-core>=4.4.0b1\",\n \"botbuilder-applicationinsights>=4.4.0b1\",\n]\nTESTS_REQUIRES = [\n \"aiounittest==1.3.0\",\n \"aiohttp==3.5.4\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(\n os.path.join(\n root, \"botbuilder\", \"integration\", \"applicationinsights\", \"aiohttp\", \"about.py\"\n )\n) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\n \"BotBuilderApplicationInsights\",\n \"bots\",\n \"ai\",\n \"botframework\",\n \"botbuilder\",\n \"aiohttp\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\"botbuilder.integration.applicationinsights.aiohttp\"],\n install_requires=REQUIRES + TESTS_REQUIRES,\n tests_require=TESTS_REQUIRES,\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-integration-applicationinsights-aiohttp/setup.py"}]} | 1,150 | 278 |
gh_patches_debug_726 | rasdani/github-patches | git_diff | dotkom__onlineweb4-425 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"Startet studie" in Profile -> Medlemskap requires defined format without specifying it
"Started studie" is a datefield. The problem is that most browsers (like FF, Chrome) don't render these fields with any additional tools which makes filling them out a pain in the ass (Safari@iOS has that fancy datepicker-shit).
The field requires the format 'yyyy-mm-dd', but does not specify this anywhere. This should be fixed somehow.
</issue>
<code>
[start of apps/profiles/forms.py]
1 # -*- coding: utf-8 -*-
2
3 from django import forms
4 from django.utils.translation import ugettext as _
5
6 from apps.profiles.models import Privacy
7 from apps.authentication.models import OnlineUser, FIELD_OF_STUDY_CHOICES
8
9 class ProfileForm(forms.ModelForm):
10
11 class Meta:
12 model = OnlineUser
13
14 fields = ['nickname', 'website', 'phone_number', 'address', 'zip_code', 'allergies', 'mark_rules', ]
15 widgets = {
16 'allergies' : forms.Textarea(attrs={'id' : 'allergies'}),
17 }
18
19 def clean(self):
20 super(ProfileForm, self).clean()
21
22 cleaned_data = self.cleaned_data
23
24 # ZIP code digits only
25 zip_code = cleaned_data['zip_code']
26 if len(zip_code) != 0 and (len(zip_code) != 4 or not zip_code.isdigit()):
27 self._errors['zip_code'] = self.error_class([_(u"Postnummer må bestå av fire siffer.")])
28
29 return cleaned_data
30
31 class ImageForm(forms.ModelForm):
32
33 class Meta:
34 model = OnlineUser
35
36 fields = ['image']
37 widgets = {
38 'image': forms.FileInput(attrs={'class' : 'hidden-input', 'id' : 'image'}),
39 }
40
41 class PrivacyForm(forms.ModelForm):
42
43 class Meta:
44 model = Privacy
45 exclude = ['user']
46
47
48 class MailSettingsForm(forms.ModelForm):
49
50 class Meta:
51 model = OnlineUser
52 fields = ['infomail', ]
53
54
55 class MembershipSettingsForm(forms.ModelForm):
56
57 def __init__(self, *args, **kwargs):
58 super(MembershipSettingsForm, self).__init__(*args, **kwargs)
59 self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'
60
61 class Meta:
62 model = OnlineUser
63 fields = ['field_of_study', 'started_date', ]
64
[end of apps/profiles/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py
--- a/apps/profiles/forms.py
+++ b/apps/profiles/forms.py
@@ -61,3 +61,7 @@
class Meta:
model = OnlineUser
fields = ['field_of_study', 'started_date', ]
+
+ widgets = {
+ 'started_date' : forms.TextInput(attrs={'placeholder' : 'YYYY-MM-DD'}),
+ }
| {"golden_diff": "diff --git a/apps/profiles/forms.py b/apps/profiles/forms.py\n--- a/apps/profiles/forms.py\n+++ b/apps/profiles/forms.py\n@@ -61,3 +61,7 @@\n class Meta:\n model = OnlineUser\n fields = ['field_of_study', 'started_date', ]\n+\n+ widgets = {\n+ 'started_date' : forms.TextInput(attrs={'placeholder' : 'YYYY-MM-DD'}),\n+ }\n", "issue": "\"Startet studie\" in Profile -> Medlemskap requires defined format without specifying it\n\"Started studie\" is a datefield. The problem is that most browsers (like FF, Chrome) don't render these fields with any additional tools which makes filling them out a pain in the ass (Safari@iOS has that fancy datepicker-shit).\n\nThe field requires the format 'yyyy-mm-dd', but does not specify this anywhere. This should be fixed somehow.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.utils.translation import ugettext as _\n\nfrom apps.profiles.models import Privacy\nfrom apps.authentication.models import OnlineUser, FIELD_OF_STUDY_CHOICES\n\nclass ProfileForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n\n fields = ['nickname', 'website', 'phone_number', 'address', 'zip_code', 'allergies', 'mark_rules', ]\n widgets = {\n 'allergies' : forms.Textarea(attrs={'id' : 'allergies'}),\n }\n\n def clean(self):\n super(ProfileForm, self).clean()\n\n cleaned_data = self.cleaned_data\n\n # ZIP code digits only\n zip_code = cleaned_data['zip_code']\n if len(zip_code) != 0 and (len(zip_code) != 4 or not zip_code.isdigit()):\n self._errors['zip_code'] = self.error_class([_(u\"Postnummer m\u00e5 best\u00e5 av fire siffer.\")])\n\n return cleaned_data\n\nclass ImageForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n\n fields = ['image']\n widgets = {\n 'image': forms.FileInput(attrs={'class' : 'hidden-input', 'id' : 'image'}),\n }\n\nclass PrivacyForm(forms.ModelForm):\n\n class Meta:\n model = Privacy\n exclude = ['user']\n\n\nclass MailSettingsForm(forms.ModelForm):\n\n class Meta:\n model = OnlineUser\n fields = ['infomail', ]\n\n\nclass MembershipSettingsForm(forms.ModelForm):\n\n def __init__(self, *args, **kwargs):\n super(MembershipSettingsForm, self).__init__(*args, **kwargs)\n self.fields['started_date'].widget.attrs['class'] = 'hasDatePicker'\n\n class Meta:\n model = OnlineUser\n fields = ['field_of_study', 'started_date', ]\n", "path": "apps/profiles/forms.py"}]} | 1,158 | 95 |
gh_patches_debug_19174 | rasdani/github-patches | git_diff | translate__pootle-4496 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
calculate_checks requires running refresh_stats afterwards
If you run `calculate_checks` you will have to also run `refresh_stats` afterwards in order for the detected critical errors to be shown on the stats. This is necessary since if no errors are detected in the stats no links are properly rendered to directly see the errors in the editor.
Documentation says that `calculate_checks` **will flush existing caches and update the quality checks cache**, so probably the right fix is to align `calculate_checks` behavior with that statement.
</issue>
<code>
[start of pootle/core/checks/checker.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import logging
10 import time
11
12 from django.conf import settings
13 from django.utils import timezone
14 from django.utils.functional import cached_property
15 from django.utils.lru_cache import lru_cache
16
17 from pootle_misc.checks import run_given_filters
18 from pootle_misc.util import import_func
19 from pootle_store.models import QualityCheck, Store, Unit
20 from pootle_store.unit import UnitProxy
21 from pootle_store.util import OBSOLETE
22 from pootle_translationproject.models import TranslationProject
23
24
25 logger = logging.getLogger(__name__)
26
27
28 class CheckableUnit(UnitProxy):
29 """CheckableUnit wraps a `Unit` values dictionary to provide a `Unit` like
30 instance that can be used by UnitQualityCheck
31
32 At a minimum the dict should contain source_f, target_f, store__id, and
33 store__translation_project__id
34 """
35
36 @property
37 def store(self):
38 return self.store__id
39
40 @property
41 def tp(self):
42 return self.store__translation_project__id
43
44
45 class UnitQualityCheck(object):
46
47 def __init__(self, unit, checker, original_checks,
48 check_names, keep_false_positives=True):
49 """Refreshes QualityChecks for a Unit
50
51 As this class can work with either `Unit` or `CheckableUnit` it only
52 uses a minimum of `Unit` attributes from `self.unit`.
53
54 :param unit: an instance of Unit or CheckableUnit
55 :param checker: a Checker for this Unit.
56 :param original_checks: current QualityChecks for this Unit
57 :param check_names: limit checks to given list of quality check names.
58 :param keep_false_positives: when set to `False`, it will unmute any
59 existing false positive checks.
60 """
61 self.checker = checker
62 self.unit = unit
63 self.original_checks = original_checks
64 self.check_names = check_names
65 self.keep_false_positives = keep_false_positives
66 self.unmute_list = []
67
68 @cached_property
69 def check_failures(self):
70 """Current QualityCheck failure for the Unit
71 """
72 if self.check_names is None:
73 return self.checker.run_filters(
74 self.unit, categorised=True)
75 return run_given_filters(
76 self.checker, self.unit, self.check_names)
77
78 @cached_property
79 def checks_qs(self):
80 """QualityCheck queryset for the Unit
81 """
82 return QualityCheck.objects.filter(unit=self.unit.id)
83
84 def delete_checks(self, checks):
85 """Delete checks that are no longer used.
86 """
87 to_delete = self.checks_qs.filter(name__in=checks)
88 if to_delete.exists():
89 to_delete.delete()
90 return True
91 return False
92
93 def unmute_checks(self, checks):
94 """Unmute checks that should no longer be muted
95 """
96 to_unmute = self.checks_qs.filter(
97 name__in=checks, false_positive=True)
98 if to_unmute.exists():
99 to_unmute.update(false_positive=False)
100 return True
101 return False
102
103 def update(self):
104 """Update QualityChecks for a Unit, deleting and unmuting as appropriate.
105 """
106 # update the checks for this unit
107 updated = self.update_checks()
108
109 # delete any remaining checks that were only in the original list
110 deleted = (
111 self.original_checks and self.delete_checks(self.original_checks))
112
113 # unmute any checks that have been marked for unmuting
114 unmuted = (
115 self.unmute_list and self.unmute_checks(self.unmute_list))
116
117 return (updated or deleted or unmuted)
118
119 def update_checks(self):
120 """Compare self.original_checks to the Units calculated QualityCheck failures.
121
122 Removes members of self.original_checks as they have been compared.
123 """
124 updated = False
125 for name in self.check_failures.iterkeys():
126 if name in self.original_checks:
127 # keep false-positive checks if check is active
128 unmute = (
129 self.original_checks[name]['false_positive']
130 and not self.keep_false_positives)
131 if unmute:
132 self.unmute_list.append(name)
133 # if the check is valid remove from the list and continue
134 del self.original_checks[name]
135 continue
136
137 # the check didnt exist previously - so create it
138 self.checks_qs.create(
139 unit_id=self.unit.id,
140 name=name,
141 message=self.check_failures[name]['message'],
142 category=self.check_failures[name]['category'])
143 updated = True
144
145 return updated
146
147
148 class QualityCheckUpdater(object):
149
150 def __init__(self, check_names=None, translation_project=None,
151 keep_false_positives=True):
152 """Refreshes QualityChecks for Units
153
154 :param check_names: limit checks to given list of quality check names.
155 :param translation_project: an instance of `TranslationProject` to
156 restrict the update to.
157 :param keep_false_positives: when set to `False`, it will unmute any
158 existing false positive checks.
159 """
160
161 self.check_names = check_names
162 self.translation_project = translation_project
163 self.keep_false_positives = keep_false_positives
164 self.stores = set()
165 self._store_to_expire = None
166
167 @cached_property
168 def checks(self):
169 """Existing checks in the database for all units
170 """
171 checks = self.checks_qs
172 check_keys = (
173 'id', 'name', 'unit_id',
174 'category', 'false_positive')
175
176 if self.check_names is not None:
177 checks = checks.filter(name__in=self.check_names)
178
179 all_units_checks = {}
180 for check in checks.values(*check_keys):
181 all_units_checks.setdefault(
182 check['unit_id'], {})[check['name']] = check
183 return all_units_checks
184
185 @cached_property
186 def checks_qs(self):
187 """QualityCheck queryset for all units, restricted to TP if set
188 """
189 checks_qs = QualityCheck.objects.all()
190
191 if self.translation_project is not None:
192 tp_pk = self.translation_project.pk
193 checks_qs = checks_qs.filter(
194 unit__store__translation_project__pk=tp_pk)
195 return checks_qs
196
197 @cached_property
198 def units(self):
199 """Result set of Units, restricted to TP if set
200 """
201 units = Unit.simple_objects.all()
202 if self.translation_project is not None:
203 units = units.filter(
204 store__translation_project=self.translation_project)
205 return units
206
207 def clear_checks(self):
208 QualityCheck.delete_unknown_checks()
209
210 @lru_cache(maxsize=None)
211 def get_checker(self, tp_pk):
212 """Return the site QualityChecker or the QualityCheck associated with
213 the a Unit's TP otherwise.
214 """
215 if settings.POOTLE_QUALITY_CHECKER:
216 return import_func(settings.POOTLE_QUALITY_CHECKER)()
217 try:
218 return TranslationProject.objects.get(id=tp_pk).checker
219 except TranslationProject.DoesNotExist:
220 # There seems to be a risk of dangling Stores with no TP
221 return None
222
223 def expire_store_cache(self, store_pk=None):
224 """Whenever a store_pk is found it is queued for cache expiry
225
226 if a new store_pk is called the old one has its cache expired,
227 and the new store_pk is saved
228
229 call with None to expire the current Store's cache
230 """
231 if self._store_to_expire is None:
232 # there is no Store set - queue it for expiry
233 self._store_to_expire = store_pk
234 return
235 if store_pk == self._store_to_expire:
236 # its the same Store that we saw last time
237 return
238 # there was a _store_to_expire set and its changed - expire the cache
239 self.update_store_caches([self._store_to_expire])
240
241 # remember the new store_pk
242 self._store_to_expire = store_pk
243
244 def update(self):
245 """Update/purge all QualityChecks for Units, and expire Store caches.
246 """
247 start = time.time()
248 logger.debug("Clearing unknown checks...")
249 self.clear_checks()
250 logger.debug(
251 "Cleared unknown checks in %s seconds"
252 % (time.time() - start))
253
254 start = time.time()
255 logger.debug("Deleting checks for untranslated units...")
256 untrans = self.update_untranslated()
257 logger.debug(
258 "Deleted %s checks for untranslated units in %s seconds"
259 % (untrans, (time.time() - start)))
260
261 start = time.time()
262 logger.debug("Updating checks - this may take some time...")
263 trans = self.update_translated()
264 logger.debug(
265 "Updated checks for %s units in %s seconds"
266 % (trans, (time.time() - start)))
267
268 def update_store_caches(self, stores):
269 """After completing QualityCheck updates expire caches for affected Stores.
270 """
271 for store in Store.objects.filter(pk__in=stores):
272 store.update_dirty_cache()
273
274 def update_translated_unit(self, unit, checker=None):
275 """Update checks for a translated Unit
276 """
277 unit = CheckableUnit(unit)
278 checker = UnitQualityCheck(
279 unit,
280 checker,
281 self.checks.get(unit.id, {}),
282 self.check_names,
283 self.keep_false_positives)
284 if checker.update():
285 self.expire_store_cache(unit.store)
286 self.units.filter(id=unit.id).update(mtime=timezone.now())
287 return True
288 return False
289
290 def update_translated(self):
291 """Update checks for translated Units
292 """
293 unit_fields = [
294 "id", "source_f", "target_f", "locations", "store__id"]
295
296 tp_key = "store__translation_project__id"
297 if self.translation_project is None:
298 unit_fields.append(tp_key)
299
300 checker = None
301 if self.translation_project is not None:
302 # we only need to get the checker once if TP is set
303 checker = self.get_checker(self.translation_project.id)
304
305 translated = (
306 self.units.filter(state__gte=OBSOLETE)
307 .order_by("store", "index"))
308 updated_count = 0
309 for unit in translated.values(*unit_fields).iterator():
310 if self.translation_project is not None:
311 # if TP is set then manually add TP.id to the Unit value dict
312 unit[tp_key] = self.translation_project.id
313 if checker is None:
314 checker = self.get_checker(unit[tp_key])
315 if checker and self.update_translated_unit(unit, checker=checker):
316 updated_count += 1
317 # clear the cache of the remaining Store
318 self.expire_store_cache()
319 return updated_count
320
321 def update_untranslated(self):
322 """Delete QualityChecks for untranslated Units
323 """
324 checks_qs = self.checks_qs.exclude(unit__state__gte=OBSOLETE)
325 self.update_store_caches(
326 set(checks_qs.values_list("unit__store__pk", flat=True).distinct()))
327 deleted = checks_qs.count()
328 checks_qs.delete()
329 return deleted
330
[end of pootle/core/checks/checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/core/checks/checker.py b/pootle/core/checks/checker.py
--- a/pootle/core/checks/checker.py
+++ b/pootle/core/checks/checker.py
@@ -14,6 +14,7 @@
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
+from pootle.core.mixins.treeitem import CachedMethods
from pootle_misc.checks import run_given_filters
from pootle_misc.util import import_func
from pootle_store.models import QualityCheck, Store, Unit
@@ -269,6 +270,7 @@
"""After completing QualityCheck updates expire caches for affected Stores.
"""
for store in Store.objects.filter(pk__in=stores):
+ store.mark_dirty(CachedMethods.CHECKS, CachedMethods.MTIME)
store.update_dirty_cache()
def update_translated_unit(self, unit, checker=None):
| {"golden_diff": "diff --git a/pootle/core/checks/checker.py b/pootle/core/checks/checker.py\n--- a/pootle/core/checks/checker.py\n+++ b/pootle/core/checks/checker.py\n@@ -14,6 +14,7 @@\n from django.utils.functional import cached_property\n from django.utils.lru_cache import lru_cache\n \n+from pootle.core.mixins.treeitem import CachedMethods\n from pootle_misc.checks import run_given_filters\n from pootle_misc.util import import_func\n from pootle_store.models import QualityCheck, Store, Unit\n@@ -269,6 +270,7 @@\n \"\"\"After completing QualityCheck updates expire caches for affected Stores.\n \"\"\"\n for store in Store.objects.filter(pk__in=stores):\n+ store.mark_dirty(CachedMethods.CHECKS, CachedMethods.MTIME)\n store.update_dirty_cache()\n \n def update_translated_unit(self, unit, checker=None):\n", "issue": "calculate_checks requires running refresh_stats afterwards\nIf you run `calculate_checks` you will have to also run `refresh_stats` afterwards in order for the detected critical errors to be shown on the stats. This is necessary since if no errors are detected in the stats no links are properly rendered to directly see the errors in the editor.\n\nDocumentation says that `calculate_checks` **will flush existing caches and update the quality checks cache**, so probably the right fix is to align `calculate_checks` behavior with that statement.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\nimport time\n\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.lru_cache import lru_cache\n\nfrom pootle_misc.checks import run_given_filters\nfrom pootle_misc.util import import_func\nfrom pootle_store.models import QualityCheck, Store, Unit\nfrom pootle_store.unit import UnitProxy\nfrom pootle_store.util import OBSOLETE\nfrom pootle_translationproject.models import TranslationProject\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CheckableUnit(UnitProxy):\n \"\"\"CheckableUnit wraps a `Unit` values dictionary to provide a `Unit` like\n instance that can be used by UnitQualityCheck\n\n At a minimum the dict should contain source_f, target_f, store__id, and\n store__translation_project__id\n \"\"\"\n\n @property\n def store(self):\n return self.store__id\n\n @property\n def tp(self):\n return self.store__translation_project__id\n\n\nclass UnitQualityCheck(object):\n\n def __init__(self, unit, checker, original_checks,\n check_names, keep_false_positives=True):\n \"\"\"Refreshes QualityChecks for a Unit\n\n As this class can work with either `Unit` or `CheckableUnit` it only\n uses a minimum of `Unit` attributes from `self.unit`.\n\n :param unit: an instance of Unit or CheckableUnit\n :param checker: a Checker for this Unit.\n :param original_checks: current QualityChecks for this Unit\n :param check_names: limit checks to given list of quality check names.\n :param keep_false_positives: when set to `False`, it will unmute any\n existing false positive checks.\n \"\"\"\n self.checker = checker\n self.unit = unit\n self.original_checks = original_checks\n self.check_names = check_names\n self.keep_false_positives = keep_false_positives\n self.unmute_list = []\n\n @cached_property\n def check_failures(self):\n \"\"\"Current QualityCheck failure for the Unit\n \"\"\"\n if self.check_names is None:\n return self.checker.run_filters(\n self.unit, categorised=True)\n return run_given_filters(\n self.checker, self.unit, self.check_names)\n\n @cached_property\n def checks_qs(self):\n \"\"\"QualityCheck queryset for the Unit\n \"\"\"\n return QualityCheck.objects.filter(unit=self.unit.id)\n\n def delete_checks(self, checks):\n \"\"\"Delete checks that are no longer used.\n \"\"\"\n to_delete = self.checks_qs.filter(name__in=checks)\n if to_delete.exists():\n to_delete.delete()\n return True\n return False\n\n def unmute_checks(self, checks):\n \"\"\"Unmute checks that should no longer be muted\n \"\"\"\n to_unmute = self.checks_qs.filter(\n name__in=checks, false_positive=True)\n if to_unmute.exists():\n to_unmute.update(false_positive=False)\n return True\n return False\n\n def update(self):\n \"\"\"Update QualityChecks for a Unit, deleting and unmuting as appropriate.\n \"\"\"\n # update the checks for this unit\n updated = self.update_checks()\n\n # delete any remaining checks that were only in the original list\n deleted = (\n self.original_checks and self.delete_checks(self.original_checks))\n\n # unmute any checks that have been marked for unmuting\n unmuted = (\n self.unmute_list and self.unmute_checks(self.unmute_list))\n\n return (updated or deleted or unmuted)\n\n def update_checks(self):\n \"\"\"Compare self.original_checks to the Units calculated QualityCheck failures.\n\n Removes members of self.original_checks as they have been compared.\n \"\"\"\n updated = False\n for name in self.check_failures.iterkeys():\n if name in self.original_checks:\n # keep false-positive checks if check is active\n unmute = (\n self.original_checks[name]['false_positive']\n and not self.keep_false_positives)\n if unmute:\n self.unmute_list.append(name)\n # if the check is valid remove from the list and continue\n del self.original_checks[name]\n continue\n\n # the check didnt exist previously - so create it\n self.checks_qs.create(\n unit_id=self.unit.id,\n name=name,\n message=self.check_failures[name]['message'],\n category=self.check_failures[name]['category'])\n updated = True\n\n return updated\n\n\nclass QualityCheckUpdater(object):\n\n def __init__(self, check_names=None, translation_project=None,\n keep_false_positives=True):\n \"\"\"Refreshes QualityChecks for Units\n\n :param check_names: limit checks to given list of quality check names.\n :param translation_project: an instance of `TranslationProject` to\n restrict the update to.\n :param keep_false_positives: when set to `False`, it will unmute any\n existing false positive checks.\n \"\"\"\n\n self.check_names = check_names\n self.translation_project = translation_project\n self.keep_false_positives = keep_false_positives\n self.stores = set()\n self._store_to_expire = None\n\n @cached_property\n def checks(self):\n \"\"\"Existing checks in the database for all units\n \"\"\"\n checks = self.checks_qs\n check_keys = (\n 'id', 'name', 'unit_id',\n 'category', 'false_positive')\n\n if self.check_names is not None:\n checks = checks.filter(name__in=self.check_names)\n\n all_units_checks = {}\n for check in checks.values(*check_keys):\n all_units_checks.setdefault(\n check['unit_id'], {})[check['name']] = check\n return all_units_checks\n\n @cached_property\n def checks_qs(self):\n \"\"\"QualityCheck queryset for all units, restricted to TP if set\n \"\"\"\n checks_qs = QualityCheck.objects.all()\n\n if self.translation_project is not None:\n tp_pk = self.translation_project.pk\n checks_qs = checks_qs.filter(\n unit__store__translation_project__pk=tp_pk)\n return checks_qs\n\n @cached_property\n def units(self):\n \"\"\"Result set of Units, restricted to TP if set\n \"\"\"\n units = Unit.simple_objects.all()\n if self.translation_project is not None:\n units = units.filter(\n store__translation_project=self.translation_project)\n return units\n\n def clear_checks(self):\n QualityCheck.delete_unknown_checks()\n\n @lru_cache(maxsize=None)\n def get_checker(self, tp_pk):\n \"\"\"Return the site QualityChecker or the QualityCheck associated with\n the a Unit's TP otherwise.\n \"\"\"\n if settings.POOTLE_QUALITY_CHECKER:\n return import_func(settings.POOTLE_QUALITY_CHECKER)()\n try:\n return TranslationProject.objects.get(id=tp_pk).checker\n except TranslationProject.DoesNotExist:\n # There seems to be a risk of dangling Stores with no TP\n return None\n\n def expire_store_cache(self, store_pk=None):\n \"\"\"Whenever a store_pk is found it is queued for cache expiry\n\n if a new store_pk is called the old one has its cache expired,\n and the new store_pk is saved\n\n call with None to expire the current Store's cache\n \"\"\"\n if self._store_to_expire is None:\n # there is no Store set - queue it for expiry\n self._store_to_expire = store_pk\n return\n if store_pk == self._store_to_expire:\n # its the same Store that we saw last time\n return\n # there was a _store_to_expire set and its changed - expire the cache\n self.update_store_caches([self._store_to_expire])\n\n # remember the new store_pk\n self._store_to_expire = store_pk\n\n def update(self):\n \"\"\"Update/purge all QualityChecks for Units, and expire Store caches.\n \"\"\"\n start = time.time()\n logger.debug(\"Clearing unknown checks...\")\n self.clear_checks()\n logger.debug(\n \"Cleared unknown checks in %s seconds\"\n % (time.time() - start))\n\n start = time.time()\n logger.debug(\"Deleting checks for untranslated units...\")\n untrans = self.update_untranslated()\n logger.debug(\n \"Deleted %s checks for untranslated units in %s seconds\"\n % (untrans, (time.time() - start)))\n\n start = time.time()\n logger.debug(\"Updating checks - this may take some time...\")\n trans = self.update_translated()\n logger.debug(\n \"Updated checks for %s units in %s seconds\"\n % (trans, (time.time() - start)))\n\n def update_store_caches(self, stores):\n \"\"\"After completing QualityCheck updates expire caches for affected Stores.\n \"\"\"\n for store in Store.objects.filter(pk__in=stores):\n store.update_dirty_cache()\n\n def update_translated_unit(self, unit, checker=None):\n \"\"\"Update checks for a translated Unit\n \"\"\"\n unit = CheckableUnit(unit)\n checker = UnitQualityCheck(\n unit,\n checker,\n self.checks.get(unit.id, {}),\n self.check_names,\n self.keep_false_positives)\n if checker.update():\n self.expire_store_cache(unit.store)\n self.units.filter(id=unit.id).update(mtime=timezone.now())\n return True\n return False\n\n def update_translated(self):\n \"\"\"Update checks for translated Units\n \"\"\"\n unit_fields = [\n \"id\", \"source_f\", \"target_f\", \"locations\", \"store__id\"]\n\n tp_key = \"store__translation_project__id\"\n if self.translation_project is None:\n unit_fields.append(tp_key)\n\n checker = None\n if self.translation_project is not None:\n # we only need to get the checker once if TP is set\n checker = self.get_checker(self.translation_project.id)\n\n translated = (\n self.units.filter(state__gte=OBSOLETE)\n .order_by(\"store\", \"index\"))\n updated_count = 0\n for unit in translated.values(*unit_fields).iterator():\n if self.translation_project is not None:\n # if TP is set then manually add TP.id to the Unit value dict\n unit[tp_key] = self.translation_project.id\n if checker is None:\n checker = self.get_checker(unit[tp_key])\n if checker and self.update_translated_unit(unit, checker=checker):\n updated_count += 1\n # clear the cache of the remaining Store\n self.expire_store_cache()\n return updated_count\n\n def update_untranslated(self):\n \"\"\"Delete QualityChecks for untranslated Units\n \"\"\"\n checks_qs = self.checks_qs.exclude(unit__state__gte=OBSOLETE)\n self.update_store_caches(\n set(checks_qs.values_list(\"unit__store__pk\", flat=True).distinct()))\n deleted = checks_qs.count()\n checks_qs.delete()\n return deleted\n", "path": "pootle/core/checks/checker.py"}]} | 3,960 | 208 |
gh_patches_debug_15567 | rasdani/github-patches | git_diff | arviz-devs__arviz-1761 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adding plot_forest example for multiple datasets
I was thinking that we could add an example of how to use `plot_forest` for multiple datasets. It would help newcomers and also the example would help us demonstrate the feature of legends as it is only valid for multiple datasets
</issue>
<code>
[start of arviz/plots/forestplot.py]
1 """Forest plot."""
2 from ..data import convert_to_dataset
3 from ..labels import BaseLabeller, NoModelLabeller
4 from ..rcparams import rcParams
5 from ..utils import _var_names, get_coords
6 from .plot_utils import get_plotting_function
7
8
9 def plot_forest(
10 data,
11 kind="forestplot",
12 model_names=None,
13 var_names=None,
14 filter_vars=None,
15 transform=None,
16 coords=None,
17 combined=False,
18 hdi_prob=None,
19 rope=None,
20 quartiles=True,
21 ess=False,
22 r_hat=False,
23 colors="cycle",
24 textsize=None,
25 linewidth=None,
26 markersize=None,
27 legend=True,
28 labeller=None,
29 ridgeplot_alpha=None,
30 ridgeplot_overlap=2,
31 ridgeplot_kind="auto",
32 ridgeplot_truncate=True,
33 ridgeplot_quantiles=None,
34 figsize=None,
35 ax=None,
36 backend=None,
37 backend_config=None,
38 backend_kwargs=None,
39 show=None,
40 ):
41 """Forest plot to compare HDI intervals from a number of distributions.
42
43 Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces.
44
45 Parameters
46 ----------
47 data: obj or list[obj]
48 Any object that can be converted to an az.InferenceData object
49 Refer to documentation of az.convert_to_dataset for details
50 kind: str
51 Choose kind of plot for main axis. Supports "forestplot" or "ridgeplot"
52 model_names: list[str], optional
53 List with names for the models in the list of data. Useful when plotting more that one
54 dataset
55 var_names: list[str], optional
56 List of variables to plot (defaults to None, which results in all variables plotted)
57 Prefix the variables by `~` when you want to exclude them from the plot.
58 filter_vars: {None, "like", "regex"}, optional, default=None
59 If `None` (default), interpret var_names as the real variables names. If "like", interpret
60 var_names as substrings of the real variables names. If "regex", interpret var_names as
61 regular expressions on the real variables names. A la `pandas.filter`.
62 transform: callable
63 Function to transform data (defaults to None i.e.the identity function)
64 coords: dict, optional
65 Coordinates of var_names to be plotted. Passed to `Dataset.sel`
66 combined: bool
67 Flag for combining multiple chains into a single chain. If False (default), chains will be
68 plotted separately.
69 hdi_prob: float, optional
70 Plots highest posterior density interval for chosen percentage of density. Defaults to 0.94.
71 rope: tuple or dictionary of tuples
72 Lower and upper values of the Region Of Practical Equivalence. If a list with one interval
73 only is provided, the ROPE will be displayed across the y-axis. If more than one interval is
74 provided the length of the list should match the number of variables.
75 quartiles: bool, optional
76 Flag for plotting the interquartile range, in addition to the hdi_prob intervals.
77 Defaults to True
78 r_hat: bool, optional
79 Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False
80 ess: bool, optional
81 Flag for plotting the effective sample size. Defaults to False
82 colors: list or string, optional
83 list with valid matplotlib colors, one color per model. Alternative a string can be passed.
84 If the string is `cycle`, it will automatically chose a color per model from the matplotlibs
85 cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all
86 models. Defaults to 'cycle'.
87 textsize: float
88 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
89 on figsize.
90 linewidth: int
91 Line width throughout. If None it will be autoscaled based on figsize.
92 markersize: int
93 Markersize throughout. If None it will be autoscaled based on figsize.
94 legend : bool, optional
95 Show a legend with the color encoded model information.
96 Defaults to true if there are multiple models
97 labeller : labeller instance, optional
98 Class providing the method `make_model_label` to generate the labels in the plot.
99 Read the :ref:`label_guide` for more details and usage examples.
100 ridgeplot_alpha: float
101 Transparency for ridgeplot fill. If 0, border is colored by model, otherwise
102 a black outline is used.
103 ridgeplot_overlap: float
104 Overlap height for ridgeplots.
105 ridgeplot_kind: string
106 By default ("auto") continuous variables are plotted using KDEs and discrete ones using
107 histograms. To override this use "hist" to plot histograms and "density" for KDEs
108 ridgeplot_truncate: bool
109 Whether to truncate densities according to the value of hdi_prop. Defaults to True
110 ridgeplot_quantiles: list
111 Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
112 Defaults to None.
113 figsize: tuple
114 Figure size. If None it will be defined automatically.
115 ax: axes, optional
116 Matplotlib axes or bokeh figures.
117 backend: str, optional
118 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
119 backend_config: dict, optional
120 Currently specifies the bounds to use for bokeh axes. Defaults to value set in rcParams.
121 backend_kwargs: bool, optional
122 These are kwargs specific to the backend being used. For additional documentation
123 check the plotting method of the backend.
124 show: bool, optional
125 Call backend show function.
126
127 Returns
128 -------
129 gridspec: matplotlib GridSpec or bokeh figures
130
131 Examples
132 --------
133 Forestpĺot
134
135 .. plot::
136 :context: close-figs
137
138 >>> import arviz as az
139 >>> non_centered_data = az.load_arviz_data('non_centered_eight')
140 >>> axes = az.plot_forest(non_centered_data,
141 >>> kind='forestplot',
142 >>> var_names=["^the"],
143 >>> filter_vars="regex",
144 >>> combined=True,
145 >>> figsize=(9, 7))
146 >>> axes[0].set_title('Estimated theta for 8 schools model')
147
148 Forestpĺot with ropes
149
150 .. plot::
151 :context: close-figs
152
153 >>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]}
154 >>> axes = az.plot_forest(non_centered_data,
155 >>> rope=rope,
156 >>> var_names='~tau',
157 >>> combined=True,
158 >>> figsize=(9, 7))
159 >>> axes[0].set_title('Estimated theta for 8 schools model')
160
161
162 Ridgeplot
163
164 .. plot::
165 :context: close-figs
166
167 >>> axes = az.plot_forest(non_centered_data,
168 >>> kind='ridgeplot',
169 >>> var_names=['theta'],
170 >>> combined=True,
171 >>> ridgeplot_overlap=3,
172 >>> colors='white',
173 >>> figsize=(9, 7))
174 >>> axes[0].set_title('Estimated theta for 8 schools model')
175
176 Ridgeplot non-truncated and with quantiles
177
178 .. plot::
179 :context: close-figs
180
181 >>> axes = az.plot_forest(non_centered_data,
182 >>> kind='ridgeplot',
183 >>> var_names=['theta'],
184 >>> combined=True,
185 >>> ridgeplot_truncate=False,
186 >>> ridgeplot_quantiles=[.25, .5, .75],
187 >>> ridgeplot_overlap=0.7,
188 >>> colors='white',
189 >>> figsize=(9, 7))
190 >>> axes[0].set_title('Estimated theta for 8 schools model')
191 """
192 if not isinstance(data, (list, tuple)):
193 data = [data]
194 if len(data) == 1:
195 legend = False
196
197 if coords is None:
198 coords = {}
199
200 if labeller is None:
201 labeller = NoModelLabeller() if legend else BaseLabeller()
202
203 datasets = [convert_to_dataset(datum) for datum in reversed(data)]
204 if transform is not None:
205 datasets = [transform(dataset) for dataset in datasets]
206 datasets = get_coords(
207 datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords
208 )
209
210 var_names = _var_names(var_names, datasets, filter_vars)
211
212 ncols, width_ratios = 1, [3]
213
214 if ess:
215 ncols += 1
216 width_ratios.append(1)
217
218 if r_hat:
219 ncols += 1
220 width_ratios.append(1)
221
222 if hdi_prob is None:
223 hdi_prob = rcParams["stats.hdi_prob"]
224 else:
225 if not 1 >= hdi_prob > 0:
226 raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
227
228 plot_forest_kwargs = dict(
229 ax=ax,
230 datasets=datasets,
231 var_names=var_names,
232 model_names=model_names,
233 combined=combined,
234 colors=colors,
235 figsize=figsize,
236 width_ratios=width_ratios,
237 linewidth=linewidth,
238 markersize=markersize,
239 kind=kind,
240 ncols=ncols,
241 hdi_prob=hdi_prob,
242 quartiles=quartiles,
243 rope=rope,
244 ridgeplot_overlap=ridgeplot_overlap,
245 ridgeplot_alpha=ridgeplot_alpha,
246 ridgeplot_kind=ridgeplot_kind,
247 ridgeplot_truncate=ridgeplot_truncate,
248 ridgeplot_quantiles=ridgeplot_quantiles,
249 textsize=textsize,
250 legend=legend,
251 labeller=labeller,
252 ess=ess,
253 r_hat=r_hat,
254 backend_kwargs=backend_kwargs,
255 backend_config=backend_config,
256 show=show,
257 )
258
259 if backend is None:
260 backend = rcParams["plot.backend"]
261 backend = backend.lower()
262
263 # TODO: Add backend kwargs
264 plot = get_plotting_function("plot_forest", "forestplot", backend)
265 axes = plot(**plot_forest_kwargs)
266 return axes
267
[end of arviz/plots/forestplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/arviz/plots/forestplot.py b/arviz/plots/forestplot.py
--- a/arviz/plots/forestplot.py
+++ b/arviz/plots/forestplot.py
@@ -145,6 +145,21 @@
>>> figsize=(9, 7))
>>> axes[0].set_title('Estimated theta for 8 schools model')
+ Forestplot with multiple datasets
+
+ .. plot::
+ :context: close-figs
+
+ >>> centered_data = az.load_arviz_data('centered_eight')
+ >>> axes = az.plot_forest([non_centered_data, centered_data],
+ >>> model_names = ["non centered eight", "centered eight"],
+ >>> kind='forestplot',
+ >>> var_names=["^the"],
+ >>> filter_vars="regex",
+ >>> combined=True,
+ >>> figsize=(9, 7))
+ >>> axes[0].set_title('Estimated theta for 8 schools models')
+
Forestpĺot with ropes
.. plot::
| {"golden_diff": "diff --git a/arviz/plots/forestplot.py b/arviz/plots/forestplot.py\n--- a/arviz/plots/forestplot.py\n+++ b/arviz/plots/forestplot.py\n@@ -145,6 +145,21 @@\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n \n+ Forestplot with multiple datasets\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> centered_data = az.load_arviz_data('centered_eight')\n+ >>> axes = az.plot_forest([non_centered_data, centered_data],\n+ >>> model_names = [\"non centered eight\", \"centered eight\"],\n+ >>> kind='forestplot',\n+ >>> var_names=[\"^the\"],\n+ >>> filter_vars=\"regex\",\n+ >>> combined=True,\n+ >>> figsize=(9, 7))\n+ >>> axes[0].set_title('Estimated theta for 8 schools models')\n+\n Forestp\u013aot with ropes\n \n .. plot::\n", "issue": "Adding plot_forest example for multiple datasets\nI was thinking that we could add an example of how to use `plot_forest` for multiple datasets. It would help newcomers and also the example would help us demonstrate the feature of legends as it is only valid for multiple datasets\n", "before_files": [{"content": "\"\"\"Forest plot.\"\"\"\nfrom ..data import convert_to_dataset\nfrom ..labels import BaseLabeller, NoModelLabeller\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names, get_coords\nfrom .plot_utils import get_plotting_function\n\n\ndef plot_forest(\n data,\n kind=\"forestplot\",\n model_names=None,\n var_names=None,\n filter_vars=None,\n transform=None,\n coords=None,\n combined=False,\n hdi_prob=None,\n rope=None,\n quartiles=True,\n ess=False,\n r_hat=False,\n colors=\"cycle\",\n textsize=None,\n linewidth=None,\n markersize=None,\n legend=True,\n labeller=None,\n ridgeplot_alpha=None,\n ridgeplot_overlap=2,\n ridgeplot_kind=\"auto\",\n ridgeplot_truncate=True,\n ridgeplot_quantiles=None,\n figsize=None,\n ax=None,\n backend=None,\n backend_config=None,\n backend_kwargs=None,\n show=None,\n):\n \"\"\"Forest plot to compare HDI intervals from a number of distributions.\n\n Generates a forest plot of 100*(hdi_prob)% HDI intervals from a trace or list of traces.\n\n Parameters\n ----------\n data: obj or list[obj]\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n kind: str\n Choose kind of plot for main axis. Supports \"forestplot\" or \"ridgeplot\"\n model_names: list[str], optional\n List with names for the models in the list of data. Useful when plotting more that one\n dataset\n var_names: list[str], optional\n List of variables to plot (defaults to None, which results in all variables plotted)\n Prefix the variables by `~` when you want to exclude them from the plot.\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\", interpret\n var_names as substrings of the real variables names. If \"regex\", interpret var_names as\n regular expressions on the real variables names. A la `pandas.filter`.\n transform: callable\n Function to transform data (defaults to None i.e.the identity function)\n coords: dict, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n combined: bool\n Flag for combining multiple chains into a single chain. If False (default), chains will be\n plotted separately.\n hdi_prob: float, optional\n Plots highest posterior density interval for chosen percentage of density. Defaults to 0.94.\n rope: tuple or dictionary of tuples\n Lower and upper values of the Region Of Practical Equivalence. If a list with one interval\n only is provided, the ROPE will be displayed across the y-axis. If more than one interval is\n provided the length of the list should match the number of variables.\n quartiles: bool, optional\n Flag for plotting the interquartile range, in addition to the hdi_prob intervals.\n Defaults to True\n r_hat: bool, optional\n Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False\n ess: bool, optional\n Flag for plotting the effective sample size. Defaults to False\n colors: list or string, optional\n list with valid matplotlib colors, one color per model. Alternative a string can be passed.\n If the string is `cycle`, it will automatically chose a color per model from the matplotlibs\n cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all\n models. Defaults to 'cycle'.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n linewidth: int\n Line width throughout. If None it will be autoscaled based on figsize.\n markersize: int\n Markersize throughout. If None it will be autoscaled based on figsize.\n legend : bool, optional\n Show a legend with the color encoded model information.\n Defaults to true if there are multiple models\n labeller : labeller instance, optional\n Class providing the method `make_model_label` to generate the labels in the plot.\n Read the :ref:`label_guide` for more details and usage examples.\n ridgeplot_alpha: float\n Transparency for ridgeplot fill. If 0, border is colored by model, otherwise\n a black outline is used.\n ridgeplot_overlap: float\n Overlap height for ridgeplots.\n ridgeplot_kind: string\n By default (\"auto\") continuous variables are plotted using KDEs and discrete ones using\n histograms. To override this use \"hist\" to plot histograms and \"density\" for KDEs\n ridgeplot_truncate: bool\n Whether to truncate densities according to the value of hdi_prop. Defaults to True\n ridgeplot_quantiles: list\n Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.\n Defaults to None.\n figsize: tuple\n Figure size. If None it will be defined automatically.\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_config: dict, optional\n Currently specifies the bounds to use for bokeh axes. Defaults to value set in rcParams.\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n show: bool, optional\n Call backend show function.\n\n Returns\n -------\n gridspec: matplotlib GridSpec or bokeh figures\n\n Examples\n --------\n Forestp\u013aot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> non_centered_data = az.load_arviz_data('non_centered_eight')\n >>> axes = az.plot_forest(non_centered_data,\n >>> kind='forestplot',\n >>> var_names=[\"^the\"],\n >>> filter_vars=\"regex\",\n >>> combined=True,\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n\n Forestp\u013aot with ropes\n\n .. plot::\n :context: close-figs\n\n >>> rope = {'theta': [{'school': 'Choate', 'rope': (2, 4)}], 'mu': [{'rope': (-2, 2)}]}\n >>> axes = az.plot_forest(non_centered_data,\n >>> rope=rope,\n >>> var_names='~tau',\n >>> combined=True,\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n\n\n Ridgeplot\n\n .. plot::\n :context: close-figs\n\n >>> axes = az.plot_forest(non_centered_data,\n >>> kind='ridgeplot',\n >>> var_names=['theta'],\n >>> combined=True,\n >>> ridgeplot_overlap=3,\n >>> colors='white',\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n\n Ridgeplot non-truncated and with quantiles\n\n .. plot::\n :context: close-figs\n\n >>> axes = az.plot_forest(non_centered_data,\n >>> kind='ridgeplot',\n >>> var_names=['theta'],\n >>> combined=True,\n >>> ridgeplot_truncate=False,\n >>> ridgeplot_quantiles=[.25, .5, .75],\n >>> ridgeplot_overlap=0.7,\n >>> colors='white',\n >>> figsize=(9, 7))\n >>> axes[0].set_title('Estimated theta for 8 schools model')\n \"\"\"\n if not isinstance(data, (list, tuple)):\n data = [data]\n if len(data) == 1:\n legend = False\n\n if coords is None:\n coords = {}\n\n if labeller is None:\n labeller = NoModelLabeller() if legend else BaseLabeller()\n\n datasets = [convert_to_dataset(datum) for datum in reversed(data)]\n if transform is not None:\n datasets = [transform(dataset) for dataset in datasets]\n datasets = get_coords(\n datasets, list(reversed(coords)) if isinstance(coords, (list, tuple)) else coords\n )\n\n var_names = _var_names(var_names, datasets, filter_vars)\n\n ncols, width_ratios = 1, [3]\n\n if ess:\n ncols += 1\n width_ratios.append(1)\n\n if r_hat:\n ncols += 1\n width_ratios.append(1)\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n plot_forest_kwargs = dict(\n ax=ax,\n datasets=datasets,\n var_names=var_names,\n model_names=model_names,\n combined=combined,\n colors=colors,\n figsize=figsize,\n width_ratios=width_ratios,\n linewidth=linewidth,\n markersize=markersize,\n kind=kind,\n ncols=ncols,\n hdi_prob=hdi_prob,\n quartiles=quartiles,\n rope=rope,\n ridgeplot_overlap=ridgeplot_overlap,\n ridgeplot_alpha=ridgeplot_alpha,\n ridgeplot_kind=ridgeplot_kind,\n ridgeplot_truncate=ridgeplot_truncate,\n ridgeplot_quantiles=ridgeplot_quantiles,\n textsize=textsize,\n legend=legend,\n labeller=labeller,\n ess=ess,\n r_hat=r_hat,\n backend_kwargs=backend_kwargs,\n backend_config=backend_config,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_forest\", \"forestplot\", backend)\n axes = plot(**plot_forest_kwargs)\n return axes\n", "path": "arviz/plots/forestplot.py"}]} | 3,560 | 241 |
gh_patches_debug_24469 | rasdani/github-patches | git_diff | sanic-org__sanic-2606 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HTTP 1 request headers decoded using default encoding instead of ISO-8859-1
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
headers are decoded here without specifying their encoding:
https://github.com/sanic-org/sanic/blob/ad4e526c775fc3ce950503d6476d9d344492b0dd/sanic/http/http1.py#L205
On my system (osx using python 3.10.8 installed via homebrew) this causes bytes that are valid characters in ISO-8859-1 but not in UTF-8 to be decoded as surrogate escape characters, e.g. `b"\x80"` becomes `"\udf80"` instead of `"\x80"`
### Code snippet
_No response_
### Expected Behavior
headers encoded as ISO-8859-1 with no MIME type to be decoded correctly without using UTF-8 surrogate escape characters.
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
linux
### Sanic Version
22.9.1
### Additional context
this used to work as expected in Sanic<=20.12.7
</issue>
<code>
[start of sanic/asgi.py]
1 from __future__ import annotations
2
3 import warnings
4
5 from typing import TYPE_CHECKING, Optional
6 from urllib.parse import quote
7
8 from sanic.compat import Header
9 from sanic.exceptions import ServerError
10 from sanic.helpers import Default
11 from sanic.http import Stage
12 from sanic.log import error_logger, logger
13 from sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport
14 from sanic.request import Request
15 from sanic.response import BaseHTTPResponse
16 from sanic.server import ConnInfo
17 from sanic.server.websockets.connection import WebSocketConnection
18
19
20 if TYPE_CHECKING:
21 from sanic import Sanic
22
23
24 class Lifespan:
25 def __init__(
26 self, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend
27 ) -> None:
28 self.sanic_app = sanic_app
29 self.scope = scope
30 self.receive = receive
31 self.send = send
32
33 if "server.init.before" in self.sanic_app.signal_router.name_index:
34 logger.debug(
35 'You have set a listener for "before_server_start" '
36 "in ASGI mode. "
37 "It will be executed as early as possible, but not before "
38 "the ASGI server is started.",
39 extra={"verbosity": 1},
40 )
41 if "server.shutdown.after" in self.sanic_app.signal_router.name_index:
42 logger.debug(
43 'You have set a listener for "after_server_stop" '
44 "in ASGI mode. "
45 "It will be executed as late as possible, but not after "
46 "the ASGI server is stopped.",
47 extra={"verbosity": 1},
48 )
49
50 async def startup(self) -> None:
51 """
52 Gather the listeners to fire on server start.
53 Because we are using a third-party server and not Sanic server, we do
54 not have access to fire anything BEFORE the server starts.
55 Therefore, we fire before_server_start and after_server_start
56 in sequence since the ASGI lifespan protocol only supports a single
57 startup event.
58 """
59 await self.sanic_app._startup()
60 await self.sanic_app._server_event("init", "before")
61 await self.sanic_app._server_event("init", "after")
62
63 if not isinstance(self.sanic_app.config.USE_UVLOOP, Default):
64 warnings.warn(
65 "You have set the USE_UVLOOP configuration option, but Sanic "
66 "cannot control the event loop when running in ASGI mode."
67 "This option will be ignored."
68 )
69
70 async def shutdown(self) -> None:
71 """
72 Gather the listeners to fire on server stop.
73 Because we are using a third-party server and not Sanic server, we do
74 not have access to fire anything AFTER the server stops.
75 Therefore, we fire before_server_stop and after_server_stop
76 in sequence since the ASGI lifespan protocol only supports a single
77 shutdown event.
78 """
79 await self.sanic_app._server_event("shutdown", "before")
80 await self.sanic_app._server_event("shutdown", "after")
81
82 async def __call__(self) -> None:
83 while True:
84 message = await self.receive()
85 if message["type"] == "lifespan.startup":
86 try:
87 await self.startup()
88 except Exception as e:
89 error_logger.exception(e)
90 await self.send(
91 {"type": "lifespan.startup.failed", "message": str(e)}
92 )
93 else:
94 await self.send({"type": "lifespan.startup.complete"})
95 elif message["type"] == "lifespan.shutdown":
96 try:
97 await self.shutdown()
98 except Exception as e:
99 error_logger.exception(e)
100 await self.send(
101 {"type": "lifespan.shutdown.failed", "message": str(e)}
102 )
103 else:
104 await self.send({"type": "lifespan.shutdown.complete"})
105 return
106
107
108 class ASGIApp:
109 sanic_app: Sanic
110 request: Request
111 transport: MockTransport
112 lifespan: Lifespan
113 ws: Optional[WebSocketConnection]
114 stage: Stage
115 response: Optional[BaseHTTPResponse]
116
117 @classmethod
118 async def create(
119 cls,
120 sanic_app: Sanic,
121 scope: ASGIScope,
122 receive: ASGIReceive,
123 send: ASGISend,
124 ) -> ASGIApp:
125 instance = cls()
126 instance.ws = None
127 instance.sanic_app = sanic_app
128 instance.transport = MockTransport(scope, receive, send)
129 instance.transport.loop = sanic_app.loop
130 instance.stage = Stage.IDLE
131 instance.response = None
132 instance.sanic_app.state.is_started = True
133 setattr(instance.transport, "add_task", sanic_app.loop.create_task)
134
135 headers = Header(
136 [
137 (key.decode("latin-1"), value.decode("latin-1"))
138 for key, value in scope.get("headers", [])
139 ]
140 )
141 path = (
142 scope["path"][1:]
143 if scope["path"].startswith("/")
144 else scope["path"]
145 )
146 url = "/".join([scope.get("root_path", ""), quote(path)])
147 url_bytes = url.encode("latin-1")
148 url_bytes += b"?" + scope["query_string"]
149
150 if scope["type"] == "http":
151 version = scope["http_version"]
152 method = scope["method"]
153 elif scope["type"] == "websocket":
154 version = "1.1"
155 method = "GET"
156
157 instance.ws = instance.transport.create_websocket_connection(
158 send, receive
159 )
160 else:
161 raise ServerError("Received unknown ASGI scope")
162
163 request_class = sanic_app.request_class or Request
164 instance.request = request_class(
165 url_bytes,
166 headers,
167 version,
168 method,
169 instance.transport,
170 sanic_app,
171 )
172 instance.request.stream = instance # type: ignore
173 instance.request_body = True
174 instance.request.conn_info = ConnInfo(instance.transport)
175
176 await instance.sanic_app.dispatch(
177 "http.lifecycle.request",
178 inline=True,
179 context={"request": instance.request},
180 fail_not_found=False,
181 )
182
183 return instance
184
185 async def read(self) -> Optional[bytes]:
186 """
187 Read and stream the body in chunks from an incoming ASGI message.
188 """
189 if self.stage is Stage.IDLE:
190 self.stage = Stage.REQUEST
191 message = await self.transport.receive()
192 body = message.get("body", b"")
193 if not message.get("more_body", False):
194 self.request_body = False
195 if not body:
196 return None
197 return body
198
199 async def __aiter__(self):
200 while self.request_body:
201 data = await self.read()
202 if data:
203 yield data
204
205 def respond(self, response: BaseHTTPResponse):
206 if self.stage is not Stage.HANDLER:
207 self.stage = Stage.FAILED
208 raise RuntimeError("Response already started")
209 if self.response is not None:
210 self.response.stream = None
211 response.stream, self.response = self, response
212 return response
213
214 async def send(self, data, end_stream):
215 self.stage = Stage.IDLE if end_stream else Stage.RESPONSE
216 if self.response:
217 response, self.response = self.response, None
218 await self.transport.send(
219 {
220 "type": "http.response.start",
221 "status": response.status,
222 "headers": response.processed_headers,
223 }
224 )
225 response_body = getattr(response, "body", None)
226 if response_body:
227 data = response_body + data if data else response_body
228 await self.transport.send(
229 {
230 "type": "http.response.body",
231 "body": data.encode() if hasattr(data, "encode") else data,
232 "more_body": not end_stream,
233 }
234 )
235
236 _asgi_single_callable = True # We conform to ASGI 3.0 single-callable
237
238 async def __call__(self) -> None:
239 """
240 Handle the incoming request.
241 """
242 try:
243 self.stage = Stage.HANDLER
244 await self.sanic_app.handle_request(self.request)
245 except Exception as e:
246 try:
247 await self.sanic_app.handle_exception(self.request, e)
248 except Exception as exc:
249 await self.sanic_app.handle_exception(self.request, exc, False)
250
[end of sanic/asgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sanic/asgi.py b/sanic/asgi.py
--- a/sanic/asgi.py
+++ b/sanic/asgi.py
@@ -6,7 +6,7 @@
from urllib.parse import quote
from sanic.compat import Header
-from sanic.exceptions import ServerError
+from sanic.exceptions import BadRequest, ServerError
from sanic.helpers import Default
from sanic.http import Stage
from sanic.log import error_logger, logger
@@ -132,12 +132,20 @@
instance.sanic_app.state.is_started = True
setattr(instance.transport, "add_task", sanic_app.loop.create_task)
- headers = Header(
- [
- (key.decode("latin-1"), value.decode("latin-1"))
- for key, value in scope.get("headers", [])
- ]
- )
+ try:
+ headers = Header(
+ [
+ (
+ key.decode("ASCII"),
+ value.decode(errors="surrogateescape"),
+ )
+ for key, value in scope.get("headers", [])
+ ]
+ )
+ except UnicodeDecodeError:
+ raise BadRequest(
+ "Header names can only contain US-ASCII characters"
+ )
path = (
scope["path"][1:]
if scope["path"].startswith("/")
| {"golden_diff": "diff --git a/sanic/asgi.py b/sanic/asgi.py\n--- a/sanic/asgi.py\n+++ b/sanic/asgi.py\n@@ -6,7 +6,7 @@\n from urllib.parse import quote\n \n from sanic.compat import Header\n-from sanic.exceptions import ServerError\n+from sanic.exceptions import BadRequest, ServerError\n from sanic.helpers import Default\n from sanic.http import Stage\n from sanic.log import error_logger, logger\n@@ -132,12 +132,20 @@\n instance.sanic_app.state.is_started = True\n setattr(instance.transport, \"add_task\", sanic_app.loop.create_task)\n \n- headers = Header(\n- [\n- (key.decode(\"latin-1\"), value.decode(\"latin-1\"))\n- for key, value in scope.get(\"headers\", [])\n- ]\n- )\n+ try:\n+ headers = Header(\n+ [\n+ (\n+ key.decode(\"ASCII\"),\n+ value.decode(errors=\"surrogateescape\"),\n+ )\n+ for key, value in scope.get(\"headers\", [])\n+ ]\n+ )\n+ except UnicodeDecodeError:\n+ raise BadRequest(\n+ \"Header names can only contain US-ASCII characters\"\n+ )\n path = (\n scope[\"path\"][1:]\n if scope[\"path\"].startswith(\"/\")\n", "issue": "HTTP 1 request headers decoded using default encoding instead of ISO-8859-1\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Describe the bug\n\nheaders are decoded here without specifying their encoding:\r\n\r\nhttps://github.com/sanic-org/sanic/blob/ad4e526c775fc3ce950503d6476d9d344492b0dd/sanic/http/http1.py#L205\r\n\r\nOn my system (osx using python 3.10.8 installed via homebrew) this causes bytes that are valid characters in ISO-8859-1 but not in UTF-8 to be decoded as surrogate escape characters, e.g. `b\"\\x80\"` becomes `\"\\udf80\"` instead of `\"\\x80\"`\n\n### Code snippet\n\n_No response_\n\n### Expected Behavior\n\nheaders encoded as ISO-8859-1 with no MIME type to be decoded correctly without using UTF-8 surrogate escape characters.\n\n### How do you run Sanic?\n\nAs a script (`app.run` or `Sanic.serve`)\n\n### Operating System\n\nlinux\n\n### Sanic Version\n\n22.9.1\n\n### Additional context\n\nthis used to work as expected in Sanic<=20.12.7\n", "before_files": [{"content": "from __future__ import annotations\n\nimport warnings\n\nfrom typing import TYPE_CHECKING, Optional\nfrom urllib.parse import quote\n\nfrom sanic.compat import Header\nfrom sanic.exceptions import ServerError\nfrom sanic.helpers import Default\nfrom sanic.http import Stage\nfrom sanic.log import error_logger, logger\nfrom sanic.models.asgi import ASGIReceive, ASGIScope, ASGISend, MockTransport\nfrom sanic.request import Request\nfrom sanic.response import BaseHTTPResponse\nfrom sanic.server import ConnInfo\nfrom sanic.server.websockets.connection import WebSocketConnection\n\n\nif TYPE_CHECKING:\n from sanic import Sanic\n\n\nclass Lifespan:\n def __init__(\n self, sanic_app, scope: ASGIScope, receive: ASGIReceive, send: ASGISend\n ) -> None:\n self.sanic_app = sanic_app\n self.scope = scope\n self.receive = receive\n self.send = send\n\n if \"server.init.before\" in self.sanic_app.signal_router.name_index:\n logger.debug(\n 'You have set a listener for \"before_server_start\" '\n \"in ASGI mode. \"\n \"It will be executed as early as possible, but not before \"\n \"the ASGI server is started.\",\n extra={\"verbosity\": 1},\n )\n if \"server.shutdown.after\" in self.sanic_app.signal_router.name_index:\n logger.debug(\n 'You have set a listener for \"after_server_stop\" '\n \"in ASGI mode. \"\n \"It will be executed as late as possible, but not after \"\n \"the ASGI server is stopped.\",\n extra={\"verbosity\": 1},\n )\n\n async def startup(self) -> None:\n \"\"\"\n Gather the listeners to fire on server start.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything BEFORE the server starts.\n Therefore, we fire before_server_start and after_server_start\n in sequence since the ASGI lifespan protocol only supports a single\n startup event.\n \"\"\"\n await self.sanic_app._startup()\n await self.sanic_app._server_event(\"init\", \"before\")\n await self.sanic_app._server_event(\"init\", \"after\")\n\n if not isinstance(self.sanic_app.config.USE_UVLOOP, Default):\n warnings.warn(\n \"You have set the USE_UVLOOP configuration option, but Sanic \"\n \"cannot control the event loop when running in ASGI mode.\"\n \"This option will be ignored.\"\n )\n\n async def shutdown(self) -> None:\n \"\"\"\n Gather the listeners to fire on server stop.\n Because we are using a third-party server and not Sanic server, we do\n not have access to fire anything AFTER the server stops.\n Therefore, we fire before_server_stop and after_server_stop\n in sequence since the ASGI lifespan protocol only supports a single\n shutdown event.\n \"\"\"\n await self.sanic_app._server_event(\"shutdown\", \"before\")\n await self.sanic_app._server_event(\"shutdown\", \"after\")\n\n async def __call__(self) -> None:\n while True:\n message = await self.receive()\n if message[\"type\"] == \"lifespan.startup\":\n try:\n await self.startup()\n except Exception as e:\n error_logger.exception(e)\n await self.send(\n {\"type\": \"lifespan.startup.failed\", \"message\": str(e)}\n )\n else:\n await self.send({\"type\": \"lifespan.startup.complete\"})\n elif message[\"type\"] == \"lifespan.shutdown\":\n try:\n await self.shutdown()\n except Exception as e:\n error_logger.exception(e)\n await self.send(\n {\"type\": \"lifespan.shutdown.failed\", \"message\": str(e)}\n )\n else:\n await self.send({\"type\": \"lifespan.shutdown.complete\"})\n return\n\n\nclass ASGIApp:\n sanic_app: Sanic\n request: Request\n transport: MockTransport\n lifespan: Lifespan\n ws: Optional[WebSocketConnection]\n stage: Stage\n response: Optional[BaseHTTPResponse]\n\n @classmethod\n async def create(\n cls,\n sanic_app: Sanic,\n scope: ASGIScope,\n receive: ASGIReceive,\n send: ASGISend,\n ) -> ASGIApp:\n instance = cls()\n instance.ws = None\n instance.sanic_app = sanic_app\n instance.transport = MockTransport(scope, receive, send)\n instance.transport.loop = sanic_app.loop\n instance.stage = Stage.IDLE\n instance.response = None\n instance.sanic_app.state.is_started = True\n setattr(instance.transport, \"add_task\", sanic_app.loop.create_task)\n\n headers = Header(\n [\n (key.decode(\"latin-1\"), value.decode(\"latin-1\"))\n for key, value in scope.get(\"headers\", [])\n ]\n )\n path = (\n scope[\"path\"][1:]\n if scope[\"path\"].startswith(\"/\")\n else scope[\"path\"]\n )\n url = \"/\".join([scope.get(\"root_path\", \"\"), quote(path)])\n url_bytes = url.encode(\"latin-1\")\n url_bytes += b\"?\" + scope[\"query_string\"]\n\n if scope[\"type\"] == \"http\":\n version = scope[\"http_version\"]\n method = scope[\"method\"]\n elif scope[\"type\"] == \"websocket\":\n version = \"1.1\"\n method = \"GET\"\n\n instance.ws = instance.transport.create_websocket_connection(\n send, receive\n )\n else:\n raise ServerError(\"Received unknown ASGI scope\")\n\n request_class = sanic_app.request_class or Request\n instance.request = request_class(\n url_bytes,\n headers,\n version,\n method,\n instance.transport,\n sanic_app,\n )\n instance.request.stream = instance # type: ignore\n instance.request_body = True\n instance.request.conn_info = ConnInfo(instance.transport)\n\n await instance.sanic_app.dispatch(\n \"http.lifecycle.request\",\n inline=True,\n context={\"request\": instance.request},\n fail_not_found=False,\n )\n\n return instance\n\n async def read(self) -> Optional[bytes]:\n \"\"\"\n Read and stream the body in chunks from an incoming ASGI message.\n \"\"\"\n if self.stage is Stage.IDLE:\n self.stage = Stage.REQUEST\n message = await self.transport.receive()\n body = message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n self.request_body = False\n if not body:\n return None\n return body\n\n async def __aiter__(self):\n while self.request_body:\n data = await self.read()\n if data:\n yield data\n\n def respond(self, response: BaseHTTPResponse):\n if self.stage is not Stage.HANDLER:\n self.stage = Stage.FAILED\n raise RuntimeError(\"Response already started\")\n if self.response is not None:\n self.response.stream = None\n response.stream, self.response = self, response\n return response\n\n async def send(self, data, end_stream):\n self.stage = Stage.IDLE if end_stream else Stage.RESPONSE\n if self.response:\n response, self.response = self.response, None\n await self.transport.send(\n {\n \"type\": \"http.response.start\",\n \"status\": response.status,\n \"headers\": response.processed_headers,\n }\n )\n response_body = getattr(response, \"body\", None)\n if response_body:\n data = response_body + data if data else response_body\n await self.transport.send(\n {\n \"type\": \"http.response.body\",\n \"body\": data.encode() if hasattr(data, \"encode\") else data,\n \"more_body\": not end_stream,\n }\n )\n\n _asgi_single_callable = True # We conform to ASGI 3.0 single-callable\n\n async def __call__(self) -> None:\n \"\"\"\n Handle the incoming request.\n \"\"\"\n try:\n self.stage = Stage.HANDLER\n await self.sanic_app.handle_request(self.request)\n except Exception as e:\n try:\n await self.sanic_app.handle_exception(self.request, e)\n except Exception as exc:\n await self.sanic_app.handle_exception(self.request, exc, False)\n", "path": "sanic/asgi.py"}]} | 3,272 | 289 |
gh_patches_debug_35056 | rasdani/github-patches | git_diff | opsdroid__opsdroid-142 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make crontab parser timezone aware
The crontab matcher should take a timezone as a kwarg. It should also be possible to set a global timezone in the config. Default should be UTC.
</issue>
<code>
[start of opsdroid/matchers.py]
1 """Decorator functions to use when creating skill modules."""
2
3 import logging
4
5 from opsdroid.helper import get_opsdroid
6 from opsdroid.web import Web
7
8
9 _LOGGER = logging.getLogger(__name__)
10
11
12 def match_regex(regex):
13 """Return regex match decorator."""
14 def matcher(func):
15 """Add decorated function to skills list for regex matching."""
16 opsdroid = get_opsdroid()
17 opsdroid.skills.append({"regex": regex, "skill": func,
18 "config":
19 opsdroid.loader.current_import_config})
20 return func
21 return matcher
22
23
24 def match_apiai_action(action):
25 """Return apiai action match decorator."""
26 def matcher(func):
27 """Add decorated function to skills list for apiai matching."""
28 opsdroid = get_opsdroid()
29 opsdroid.skills.append({"apiai_action": action, "skill": func,
30 "config":
31 opsdroid.loader.current_import_config})
32 return func
33 return matcher
34
35
36 def match_apiai_intent(intent):
37 """Return apiai intent match decorator."""
38 def matcher(func):
39 """Add decorated function to skills list for apiai matching."""
40 opsdroid = get_opsdroid()
41 opsdroid.skills.append({"apiai_intent": intent, "skill": func,
42 "config":
43 opsdroid.loader.current_import_config})
44 return func
45 return matcher
46
47
48 def match_crontab(crontab):
49 """Return crontab match decorator."""
50 def matcher(func):
51 """Add decorated function to skills list for crontab matching."""
52 opsdroid = get_opsdroid()
53 opsdroid.skills.append({"crontab": crontab, "skill": func,
54 "config":
55 opsdroid.loader.current_import_config})
56 return func
57 return matcher
58
59
60 def match_webhook(webhook):
61 """Return webhook match decorator."""
62 def matcher(func):
63 """Add decorated function to skills list for webhook matching."""
64 opsdroid = get_opsdroid()
65 config = opsdroid.loader.current_import_config
66 opsdroid.skills.append({"webhook": webhook, "skill": func,
67 "config": config})
68
69 async def wrapper(req, opsdroid=opsdroid, config=config):
70 """Wrap up the aiohttp handler."""
71 _LOGGER.info("Running skill %s via webhook", webhook)
72 opsdroid.stats["webhooks_called"] = \
73 opsdroid.stats["webhooks_called"] + 1
74 await func(opsdroid, config, req)
75 return Web.build_response(200, {"called_skill": webhook})
76
77 opsdroid.web_server.web_app.router.add_post(
78 "/skill/{}/{}".format(config["name"], webhook), wrapper)
79 opsdroid.web_server.web_app.router.add_post(
80 "/skill/{}/{}/".format(config["name"], webhook), wrapper)
81
82 return func
83 return matcher
84
[end of opsdroid/matchers.py]
[start of opsdroid/parsers/crontab.py]
1 """A helper function for parsing and executing crontab skills."""
2
3 import logging
4 import asyncio
5 from datetime import datetime
6
7 import pycron
8
9
10 _LOGGER = logging.getLogger(__name__)
11
12
13 async def parse_crontab(opsdroid):
14 """Parse all crontab skills against the current time."""
15 # pylint: disable=broad-except
16 # We want to catch all exceptions coming from a skill module and not
17 # halt the application. If a skill throws an exception it just doesn't
18 # give a response to the user, so an error response should be given.
19 while opsdroid.eventloop.is_running():
20 await asyncio.sleep(60 - datetime.now().time().second)
21 _LOGGER.debug("Running crontab skills")
22 for skill in opsdroid.skills:
23 if "crontab" in skill and pycron.is_now(skill["crontab"]):
24 try:
25 await skill["skill"](opsdroid, skill["config"], None)
26 except Exception:
27 _LOGGER.exception("Exception when executing cron skill.")
28
[end of opsdroid/parsers/crontab.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/matchers.py b/opsdroid/matchers.py
--- a/opsdroid/matchers.py
+++ b/opsdroid/matchers.py
@@ -45,14 +45,14 @@
return matcher
-def match_crontab(crontab):
+def match_crontab(crontab, timezone=None):
"""Return crontab match decorator."""
def matcher(func):
"""Add decorated function to skills list for crontab matching."""
opsdroid = get_opsdroid()
+ config = opsdroid.loader.current_import_config
opsdroid.skills.append({"crontab": crontab, "skill": func,
- "config":
- opsdroid.loader.current_import_config})
+ "config": config, "timezone": timezone})
return func
return matcher
diff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py
--- a/opsdroid/parsers/crontab.py
+++ b/opsdroid/parsers/crontab.py
@@ -1,9 +1,9 @@
"""A helper function for parsing and executing crontab skills."""
-import logging
import asyncio
-from datetime import datetime
+import logging
+import arrow
import pycron
@@ -17,11 +17,17 @@
# halt the application. If a skill throws an exception it just doesn't
# give a response to the user, so an error response should be given.
while opsdroid.eventloop.is_running():
- await asyncio.sleep(60 - datetime.now().time().second)
+ await asyncio.sleep(60 - arrow.now().time().second)
_LOGGER.debug("Running crontab skills")
for skill in opsdroid.skills:
- if "crontab" in skill and pycron.is_now(skill["crontab"]):
- try:
- await skill["skill"](opsdroid, skill["config"], None)
- except Exception:
- _LOGGER.exception("Exception when executing cron skill.")
+ if "crontab" in skill:
+ if skill["timezone"] is not None:
+ timezone = skill["timezone"]
+ else:
+ timezone = opsdroid.config.get("timezone", "UTC")
+ if pycron.is_now(skill["crontab"], arrow.now(tz=timezone)):
+ try:
+ await skill["skill"](opsdroid, skill["config"], None)
+ except Exception:
+ _LOGGER.exception(
+ "Exception when executing cron skill.")
| {"golden_diff": "diff --git a/opsdroid/matchers.py b/opsdroid/matchers.py\n--- a/opsdroid/matchers.py\n+++ b/opsdroid/matchers.py\n@@ -45,14 +45,14 @@\n return matcher\n \n \n-def match_crontab(crontab):\n+def match_crontab(crontab, timezone=None):\n \"\"\"Return crontab match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for crontab matching.\"\"\"\n opsdroid = get_opsdroid()\n+ config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"crontab\": crontab, \"skill\": func,\n- \"config\":\n- opsdroid.loader.current_import_config})\n+ \"config\": config, \"timezone\": timezone})\n return func\n return matcher\n \ndiff --git a/opsdroid/parsers/crontab.py b/opsdroid/parsers/crontab.py\n--- a/opsdroid/parsers/crontab.py\n+++ b/opsdroid/parsers/crontab.py\n@@ -1,9 +1,9 @@\n \"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n \n-import logging\n import asyncio\n-from datetime import datetime\n+import logging\n \n+import arrow\n import pycron\n \n \n@@ -17,11 +17,17 @@\n # halt the application. If a skill throws an exception it just doesn't\n # give a response to the user, so an error response should be given.\n while opsdroid.eventloop.is_running():\n- await asyncio.sleep(60 - datetime.now().time().second)\n+ await asyncio.sleep(60 - arrow.now().time().second)\n _LOGGER.debug(\"Running crontab skills\")\n for skill in opsdroid.skills:\n- if \"crontab\" in skill and pycron.is_now(skill[\"crontab\"]):\n- try:\n- await skill[\"skill\"](opsdroid, skill[\"config\"], None)\n- except Exception:\n- _LOGGER.exception(\"Exception when executing cron skill.\")\n+ if \"crontab\" in skill:\n+ if skill[\"timezone\"] is not None:\n+ timezone = skill[\"timezone\"]\n+ else:\n+ timezone = opsdroid.config.get(\"timezone\", \"UTC\")\n+ if pycron.is_now(skill[\"crontab\"], arrow.now(tz=timezone)):\n+ try:\n+ await skill[\"skill\"](opsdroid, skill[\"config\"], None)\n+ except Exception:\n+ _LOGGER.exception(\n+ \"Exception when executing cron skill.\")\n", "issue": "Make crontab parser timezone aware\nThe crontab matcher should take a timezone as a kwarg. It should also be possible to set a global timezone in the config. Default should be UTC.\n", "before_files": [{"content": "\"\"\"Decorator functions to use when creating skill modules.\"\"\"\n\nimport logging\n\nfrom opsdroid.helper import get_opsdroid\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef match_regex(regex):\n \"\"\"Return regex match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for regex matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"regex\": regex, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_action(action):\n \"\"\"Return apiai action match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_action\": action, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_apiai_intent(intent):\n \"\"\"Return apiai intent match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for apiai matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"apiai_intent\": intent, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_crontab(crontab):\n \"\"\"Return crontab match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for crontab matching.\"\"\"\n opsdroid = get_opsdroid()\n opsdroid.skills.append({\"crontab\": crontab, \"skill\": func,\n \"config\":\n opsdroid.loader.current_import_config})\n return func\n return matcher\n\n\ndef match_webhook(webhook):\n \"\"\"Return webhook match decorator.\"\"\"\n def matcher(func):\n \"\"\"Add decorated function to skills list for webhook matching.\"\"\"\n opsdroid = get_opsdroid()\n config = opsdroid.loader.current_import_config\n opsdroid.skills.append({\"webhook\": webhook, \"skill\": func,\n \"config\": config})\n\n async def wrapper(req, opsdroid=opsdroid, config=config):\n \"\"\"Wrap up the aiohttp handler.\"\"\"\n _LOGGER.info(\"Running skill %s via webhook\", webhook)\n opsdroid.stats[\"webhooks_called\"] = \\\n opsdroid.stats[\"webhooks_called\"] + 1\n await func(opsdroid, config, req)\n return Web.build_response(200, {\"called_skill\": webhook})\n\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}\".format(config[\"name\"], webhook), wrapper)\n opsdroid.web_server.web_app.router.add_post(\n \"/skill/{}/{}/\".format(config[\"name\"], webhook), wrapper)\n\n return func\n return matcher\n", "path": "opsdroid/matchers.py"}, {"content": "\"\"\"A helper function for parsing and executing crontab skills.\"\"\"\n\nimport logging\nimport asyncio\nfrom datetime import datetime\n\nimport pycron\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def parse_crontab(opsdroid):\n \"\"\"Parse all crontab skills against the current time.\"\"\"\n # pylint: disable=broad-except\n # We want to catch all exceptions coming from a skill module and not\n # halt the application. If a skill throws an exception it just doesn't\n # give a response to the user, so an error response should be given.\n while opsdroid.eventloop.is_running():\n await asyncio.sleep(60 - datetime.now().time().second)\n _LOGGER.debug(\"Running crontab skills\")\n for skill in opsdroid.skills:\n if \"crontab\" in skill and pycron.is_now(skill[\"crontab\"]):\n try:\n await skill[\"skill\"](opsdroid, skill[\"config\"], None)\n except Exception:\n _LOGGER.exception(\"Exception when executing cron skill.\")\n", "path": "opsdroid/parsers/crontab.py"}]} | 1,663 | 573 |
gh_patches_debug_33329 | rasdani/github-patches | git_diff | translate__pootle-4714 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
fix update_stores not changing fuzzy state
regression from 80d35df2b2c3987470873c541c288dc95c0ec79e
ignoring change in state makes it impossible to update the fuzzy state
of a translatin unit by modifying the files on disk, and running
update_stores. With state ignored in the comparison, those changes are
silently ignored (and thenget overridden again on next sync_stores)
</issue>
<code>
[start of pootle/apps/pootle_store/diff.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import difflib
10 from collections import OrderedDict
11
12 from django.utils.functional import cached_property
13
14 from .fields import to_python as multistring_to_python
15 from .unit import UnitProxy
16 from .util import OBSOLETE
17
18
19 class UnitDiffProxy(UnitProxy):
20 """Wraps File/DB Unit dicts used by StoreDiff for equality comparison"""
21
22 match_attrs = ["context", "developer_comment", "locations",
23 "source", "target", "translator_comment"]
24
25 def __eq__(self, other):
26 return all(getattr(self, k) == getattr(other, k)
27 for k in self.match_attrs)
28
29 def __ne__(self, other):
30 return not self == other
31
32
33 class DBUnit(UnitDiffProxy):
34
35 pass
36
37
38 class FileUnit(UnitDiffProxy):
39
40 @property
41 def locations(self):
42 return "\n".join(self.unit["locations"])
43
44 @property
45 def source(self):
46 return multistring_to_python(self.unit["source"])
47
48 @property
49 def target(self):
50 return multistring_to_python(self.unit["target"])
51
52
53 class StoreDiff(object):
54
55 def __init__(self, db_store, file_store, file_revision):
56 self.db_store = db_store
57 self.file_store = file_store
58 self.db_revision = db_store.get_max_unit_revision()
59 self.file_revision = file_revision
60
61 @cached_property
62 def active_units(self):
63 return [unitid for unitid, unit in self.db_units.items()
64 if unit['state'] != OBSOLETE]
65
66 @cached_property
67 def db_units(self):
68 """All of the db units regardless of state or revision"""
69 db_units = OrderedDict()
70 unit_fields = ("unitid", "state", "id", "index", "revision",
71 "source_f", "target_f", "developer_comment",
72 "translator_comment", "locations", "context")
73 for unit in self.db_store.unit_set.values(*unit_fields).order_by("index"):
74 db_units[unit["unitid"]] = unit
75 return db_units
76
77 @cached_property
78 def file_units(self):
79 file_units = OrderedDict()
80 for unit in self.file_store.units:
81 if unit.isheader():
82 continue
83 file_units[unit.getid()] = {
84 "unitid": unit.getid(),
85 "context": unit.getcontext(),
86 "locations": unit.getlocations(),
87 "source": unit.source,
88 "target": unit.target,
89 "state": unit.get_state_n(),
90 "developer_comment": unit.getnotes(origin="developer"),
91 "translator_comment": unit.getnotes(origin="translator")}
92 return file_units
93
94 @cached_property
95 def insert_points(self):
96 """Returns a list of insert points with update index info.
97 :return: a list of tuples
98 ``(insert_at, uids_to_add, next_index, update_index_delta)`` where
99 ``insert_at`` is the point for inserting
100 ``uids_to_add`` are the units to be inserted
101 ``update_index_delta`` is the offset for index updating
102 ``next_index`` is the starting point after which
103 ``update_index_delta`` should be applied.
104 """
105 inserts = []
106 new_unitid_list = self.new_unit_list
107 for (tag, i1, i2, j1, j2) in self.opcodes:
108 if tag == 'insert':
109 update_index_delta = 0
110 insert_at = 0
111 if i1 > 0:
112 insert_at = (
113 self.db_units[self.active_units[i1 - 1]]['index'])
114 next_index = insert_at + 1
115 if i1 < len(self.active_units):
116 next_index = self.db_units[self.active_units[i1]]["index"]
117 update_index_delta = (
118 j2 - j1 - next_index + insert_at + 1)
119
120 inserts.append((insert_at,
121 new_unitid_list[j1:j2],
122 next_index,
123 update_index_delta))
124
125 elif tag == 'replace':
126 insert_at = self.db_units[self.active_units[i1 - 1]]['index']
127 next_index = self.db_units[self.active_units[i2 - 1]]['index']
128 inserts.append((insert_at,
129 new_unitid_list[j1:j2],
130 next_index,
131 j2 - j1 - insert_at + next_index))
132
133 return inserts
134
135 @cached_property
136 def new_unit_list(self):
137 # If file_revision is gte than the db_revision then new unit list
138 # will be exactly what is in the file
139 if self.file_revision >= self.db_revision:
140 return self.file_units.keys()
141
142 # These units are kept as they have been updated since file_revision
143 # but do not appear in the file
144 new_units = [u for u in self.updated_db_units
145 if u not in self.file_units]
146
147 # These unit are either present in both or only in the file so are
148 # kept in the file order
149 new_units += [u for u in self.file_units.keys()
150 if u not in self.obsoleted_db_units]
151
152 return new_units
153
154 @cached_property
155 def obsoleted_db_units(self):
156 return [unitid for unitid, unit in self.db_units.items()
157 if (unit['state'] == OBSOLETE
158 and unit["revision"] > self.file_revision)]
159
160 @cached_property
161 def opcodes(self):
162 sm = difflib.SequenceMatcher(None,
163 self.active_units,
164 self.new_unit_list)
165 return sm.get_opcodes()
166
167 @cached_property
168 def updated_db_units(self):
169 return [unitid for unitid, unit in self.db_units.items()
170 if (unit['revision'] > self.file_revision
171 and unit["state"] != OBSOLETE)]
172
173 def diff(self):
174 """Return a dictionary of change actions or None if there are no
175 changes to be made.
176 """
177 diff = {"index": self.get_indexes_to_update(),
178 "obsolete": self.get_units_to_obsolete(),
179 "add": self.get_units_to_add(),
180 "update": self.get_units_to_update()}
181 if self.has_changes(diff):
182 return diff
183 return None
184
185 def get_indexes_to_update(self):
186 offset = 0
187 index_updates = []
188 for (insert_at, uids_add, next_index, delta) in self.insert_points:
189 if delta > 0:
190 index_updates += [(next_index + offset, delta)]
191 offset += delta
192 return index_updates
193
194 def get_units_to_add(self):
195 offset = 0
196 to_add = []
197 for (insert_at, uids_add, next_index, delta) in self.insert_points:
198 for index, uid in enumerate(uids_add):
199 file_unit = self.file_store.findid(uid)
200 if file_unit and file_unit.getid() not in self.db_units:
201 new_unit_index = insert_at + index + 1 + offset
202 to_add += [(file_unit, new_unit_index)]
203 if delta > 0:
204 offset += delta
205 return to_add
206
207 def get_units_to_obsolete(self):
208 return [unit['id'] for unitid, unit in self.db_units.items()
209 if (unitid not in self.file_units
210 and unitid in self.active_units
211 and unitid not in self.updated_db_units)]
212
213 def get_units_to_update(self):
214 uid_index_map = {}
215 offset = 0
216
217 for (insert_at, uids_add, next_index, delta) in self.insert_points:
218 for index, uid in enumerate(uids_add):
219 new_unit_index = insert_at + index + 1 + offset
220 if uid in self.db_units:
221 uid_index_map[uid] = {
222 'dbid': self.db_units[uid]['id'],
223 'index': new_unit_index}
224 if delta > 0:
225 offset += delta
226 update_dbids = self.get_updated_dbids()
227 update_dbids.update({x['dbid'] for x in uid_index_map.values()})
228 return (update_dbids, uid_index_map)
229
230 def get_updated_dbids(self):
231 """Returns a set of unit DB ids to be updated.
232 """
233 update_dbids = set()
234
235 for (tag, i1, i2, j1, j2) in self.opcodes:
236 if tag == 'equal':
237 update_dbids.update(
238 set(self.db_units[uid]['id']
239 for uid in self.active_units[i1:i2]
240 if (uid in self.file_units
241 and (DBUnit(self.db_units[uid])
242 != FileUnit(self.file_units[uid])))))
243 return update_dbids
244
245 def has_changes(self, diff):
246 for k, v in diff.items():
247 if k == "update":
248 if len(v[0]) > 0:
249 return True
250 else:
251 if len(v) > 0:
252 return True
253 return False
254
[end of pootle/apps/pootle_store/diff.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_store/diff.py b/pootle/apps/pootle_store/diff.py
--- a/pootle/apps/pootle_store/diff.py
+++ b/pootle/apps/pootle_store/diff.py
@@ -13,14 +13,14 @@
from .fields import to_python as multistring_to_python
from .unit import UnitProxy
-from .util import OBSOLETE
+from .util import FUZZY, OBSOLETE, TRANSLATED, UNTRANSLATED
class UnitDiffProxy(UnitProxy):
"""Wraps File/DB Unit dicts used by StoreDiff for equality comparison"""
match_attrs = ["context", "developer_comment", "locations",
- "source", "target", "translator_comment"]
+ "source", "state", "target", "translator_comment"]
def __eq__(self, other):
return all(getattr(self, k) == getattr(other, k)
@@ -31,7 +31,6 @@
class DBUnit(UnitDiffProxy):
-
pass
@@ -80,13 +79,20 @@
for unit in self.file_store.units:
if unit.isheader():
continue
+ state = UNTRANSLATED
+ if unit.isobsolete():
+ state = OBSOLETE
+ elif unit.istranslated():
+ state = TRANSLATED
+ elif unit.isfuzzy():
+ state = FUZZY
file_units[unit.getid()] = {
"unitid": unit.getid(),
"context": unit.getcontext(),
"locations": unit.getlocations(),
"source": unit.source,
"target": unit.target,
- "state": unit.get_state_n(),
+ "state": state,
"developer_comment": unit.getnotes(origin="developer"),
"translator_comment": unit.getnotes(origin="translator")}
return file_units
| {"golden_diff": "diff --git a/pootle/apps/pootle_store/diff.py b/pootle/apps/pootle_store/diff.py\n--- a/pootle/apps/pootle_store/diff.py\n+++ b/pootle/apps/pootle_store/diff.py\n@@ -13,14 +13,14 @@\n \n from .fields import to_python as multistring_to_python\n from .unit import UnitProxy\n-from .util import OBSOLETE\n+from .util import FUZZY, OBSOLETE, TRANSLATED, UNTRANSLATED\n \n \n class UnitDiffProxy(UnitProxy):\n \"\"\"Wraps File/DB Unit dicts used by StoreDiff for equality comparison\"\"\"\n \n match_attrs = [\"context\", \"developer_comment\", \"locations\",\n- \"source\", \"target\", \"translator_comment\"]\n+ \"source\", \"state\", \"target\", \"translator_comment\"]\n \n def __eq__(self, other):\n return all(getattr(self, k) == getattr(other, k)\n@@ -31,7 +31,6 @@\n \n \n class DBUnit(UnitDiffProxy):\n-\n pass\n \n \n@@ -80,13 +79,20 @@\n for unit in self.file_store.units:\n if unit.isheader():\n continue\n+ state = UNTRANSLATED\n+ if unit.isobsolete():\n+ state = OBSOLETE\n+ elif unit.istranslated():\n+ state = TRANSLATED\n+ elif unit.isfuzzy():\n+ state = FUZZY\n file_units[unit.getid()] = {\n \"unitid\": unit.getid(),\n \"context\": unit.getcontext(),\n \"locations\": unit.getlocations(),\n \"source\": unit.source,\n \"target\": unit.target,\n- \"state\": unit.get_state_n(),\n+ \"state\": state,\n \"developer_comment\": unit.getnotes(origin=\"developer\"),\n \"translator_comment\": unit.getnotes(origin=\"translator\")}\n return file_units\n", "issue": "fix update_stores not changing fuzzy state\nregression from 80d35df2b2c3987470873c541c288dc95c0ec79e\nignoring change in state makes it impossible to update the fuzzy state\nof a translatin unit by modifying the files on disk, and running\nupdate_stores. With state ignored in the comparison, those changes are\nsilently ignored (and thenget overridden again on next sync_stores)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport difflib\nfrom collections import OrderedDict\n\nfrom django.utils.functional import cached_property\n\nfrom .fields import to_python as multistring_to_python\nfrom .unit import UnitProxy\nfrom .util import OBSOLETE\n\n\nclass UnitDiffProxy(UnitProxy):\n \"\"\"Wraps File/DB Unit dicts used by StoreDiff for equality comparison\"\"\"\n\n match_attrs = [\"context\", \"developer_comment\", \"locations\",\n \"source\", \"target\", \"translator_comment\"]\n\n def __eq__(self, other):\n return all(getattr(self, k) == getattr(other, k)\n for k in self.match_attrs)\n\n def __ne__(self, other):\n return not self == other\n\n\nclass DBUnit(UnitDiffProxy):\n\n pass\n\n\nclass FileUnit(UnitDiffProxy):\n\n @property\n def locations(self):\n return \"\\n\".join(self.unit[\"locations\"])\n\n @property\n def source(self):\n return multistring_to_python(self.unit[\"source\"])\n\n @property\n def target(self):\n return multistring_to_python(self.unit[\"target\"])\n\n\nclass StoreDiff(object):\n\n def __init__(self, db_store, file_store, file_revision):\n self.db_store = db_store\n self.file_store = file_store\n self.db_revision = db_store.get_max_unit_revision()\n self.file_revision = file_revision\n\n @cached_property\n def active_units(self):\n return [unitid for unitid, unit in self.db_units.items()\n if unit['state'] != OBSOLETE]\n\n @cached_property\n def db_units(self):\n \"\"\"All of the db units regardless of state or revision\"\"\"\n db_units = OrderedDict()\n unit_fields = (\"unitid\", \"state\", \"id\", \"index\", \"revision\",\n \"source_f\", \"target_f\", \"developer_comment\",\n \"translator_comment\", \"locations\", \"context\")\n for unit in self.db_store.unit_set.values(*unit_fields).order_by(\"index\"):\n db_units[unit[\"unitid\"]] = unit\n return db_units\n\n @cached_property\n def file_units(self):\n file_units = OrderedDict()\n for unit in self.file_store.units:\n if unit.isheader():\n continue\n file_units[unit.getid()] = {\n \"unitid\": unit.getid(),\n \"context\": unit.getcontext(),\n \"locations\": unit.getlocations(),\n \"source\": unit.source,\n \"target\": unit.target,\n \"state\": unit.get_state_n(),\n \"developer_comment\": unit.getnotes(origin=\"developer\"),\n \"translator_comment\": unit.getnotes(origin=\"translator\")}\n return file_units\n\n @cached_property\n def insert_points(self):\n \"\"\"Returns a list of insert points with update index info.\n :return: a list of tuples\n ``(insert_at, uids_to_add, next_index, update_index_delta)`` where\n ``insert_at`` is the point for inserting\n ``uids_to_add`` are the units to be inserted\n ``update_index_delta`` is the offset for index updating\n ``next_index`` is the starting point after which\n ``update_index_delta`` should be applied.\n \"\"\"\n inserts = []\n new_unitid_list = self.new_unit_list\n for (tag, i1, i2, j1, j2) in self.opcodes:\n if tag == 'insert':\n update_index_delta = 0\n insert_at = 0\n if i1 > 0:\n insert_at = (\n self.db_units[self.active_units[i1 - 1]]['index'])\n next_index = insert_at + 1\n if i1 < len(self.active_units):\n next_index = self.db_units[self.active_units[i1]][\"index\"]\n update_index_delta = (\n j2 - j1 - next_index + insert_at + 1)\n\n inserts.append((insert_at,\n new_unitid_list[j1:j2],\n next_index,\n update_index_delta))\n\n elif tag == 'replace':\n insert_at = self.db_units[self.active_units[i1 - 1]]['index']\n next_index = self.db_units[self.active_units[i2 - 1]]['index']\n inserts.append((insert_at,\n new_unitid_list[j1:j2],\n next_index,\n j2 - j1 - insert_at + next_index))\n\n return inserts\n\n @cached_property\n def new_unit_list(self):\n # If file_revision is gte than the db_revision then new unit list\n # will be exactly what is in the file\n if self.file_revision >= self.db_revision:\n return self.file_units.keys()\n\n # These units are kept as they have been updated since file_revision\n # but do not appear in the file\n new_units = [u for u in self.updated_db_units\n if u not in self.file_units]\n\n # These unit are either present in both or only in the file so are\n # kept in the file order\n new_units += [u for u in self.file_units.keys()\n if u not in self.obsoleted_db_units]\n\n return new_units\n\n @cached_property\n def obsoleted_db_units(self):\n return [unitid for unitid, unit in self.db_units.items()\n if (unit['state'] == OBSOLETE\n and unit[\"revision\"] > self.file_revision)]\n\n @cached_property\n def opcodes(self):\n sm = difflib.SequenceMatcher(None,\n self.active_units,\n self.new_unit_list)\n return sm.get_opcodes()\n\n @cached_property\n def updated_db_units(self):\n return [unitid for unitid, unit in self.db_units.items()\n if (unit['revision'] > self.file_revision\n and unit[\"state\"] != OBSOLETE)]\n\n def diff(self):\n \"\"\"Return a dictionary of change actions or None if there are no\n changes to be made.\n \"\"\"\n diff = {\"index\": self.get_indexes_to_update(),\n \"obsolete\": self.get_units_to_obsolete(),\n \"add\": self.get_units_to_add(),\n \"update\": self.get_units_to_update()}\n if self.has_changes(diff):\n return diff\n return None\n\n def get_indexes_to_update(self):\n offset = 0\n index_updates = []\n for (insert_at, uids_add, next_index, delta) in self.insert_points:\n if delta > 0:\n index_updates += [(next_index + offset, delta)]\n offset += delta\n return index_updates\n\n def get_units_to_add(self):\n offset = 0\n to_add = []\n for (insert_at, uids_add, next_index, delta) in self.insert_points:\n for index, uid in enumerate(uids_add):\n file_unit = self.file_store.findid(uid)\n if file_unit and file_unit.getid() not in self.db_units:\n new_unit_index = insert_at + index + 1 + offset\n to_add += [(file_unit, new_unit_index)]\n if delta > 0:\n offset += delta\n return to_add\n\n def get_units_to_obsolete(self):\n return [unit['id'] for unitid, unit in self.db_units.items()\n if (unitid not in self.file_units\n and unitid in self.active_units\n and unitid not in self.updated_db_units)]\n\n def get_units_to_update(self):\n uid_index_map = {}\n offset = 0\n\n for (insert_at, uids_add, next_index, delta) in self.insert_points:\n for index, uid in enumerate(uids_add):\n new_unit_index = insert_at + index + 1 + offset\n if uid in self.db_units:\n uid_index_map[uid] = {\n 'dbid': self.db_units[uid]['id'],\n 'index': new_unit_index}\n if delta > 0:\n offset += delta\n update_dbids = self.get_updated_dbids()\n update_dbids.update({x['dbid'] for x in uid_index_map.values()})\n return (update_dbids, uid_index_map)\n\n def get_updated_dbids(self):\n \"\"\"Returns a set of unit DB ids to be updated.\n \"\"\"\n update_dbids = set()\n\n for (tag, i1, i2, j1, j2) in self.opcodes:\n if tag == 'equal':\n update_dbids.update(\n set(self.db_units[uid]['id']\n for uid in self.active_units[i1:i2]\n if (uid in self.file_units\n and (DBUnit(self.db_units[uid])\n != FileUnit(self.file_units[uid])))))\n return update_dbids\n\n def has_changes(self, diff):\n for k, v in diff.items():\n if k == \"update\":\n if len(v[0]) > 0:\n return True\n else:\n if len(v) > 0:\n return True\n return False\n", "path": "pootle/apps/pootle_store/diff.py"}]} | 3,312 | 420 |
gh_patches_debug_37008 | rasdani/github-patches | git_diff | great-expectations__great_expectations-2966 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/rule_based_profiler/profiler.py]
1 import uuid
2 from typing import Dict, List, Optional, Union
3
4 import great_expectations.exceptions as ge_exceptions
5 from great_expectations import DataContext
6 from great_expectations.core import ExpectationConfiguration, ExpectationSuite
7 from great_expectations.data_context.util import instantiate_class_from_config
8 from great_expectations.rule_based_profiler.domain_builder.domain_builder import (
9 DomainBuilder,
10 )
11 from great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import (
12 ExpectationConfigurationBuilder,
13 )
14 from great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (
15 ParameterBuilder,
16 )
17 from great_expectations.rule_based_profiler.parameter_builder.parameter_container import (
18 ParameterContainer,
19 build_parameter_container_for_variables,
20 )
21 from great_expectations.rule_based_profiler.rule.rule import Rule
22
23
24 class Profiler:
25 """
26 Profiler object serves to profile, or automatically evaluate a set of rules, upon a given
27 batch / multiple batches of data.
28 """
29
30 def __init__(
31 self,
32 *,
33 profiler_config: Optional[Dict[str, Dict[str, Dict]]] = None,
34 data_context: Optional[DataContext] = None,
35 ):
36 """
37 Create a new Profiler using configured rules.
38 For a rule or an item in a rule configuration, instantiates the following if
39 available: a domain builder, a parameter builder, and a configuration builder.
40 These will be used to define profiler computation patterns.
41
42 Args:
43 profiler_config: Variables and Rules configuration as a dictionary
44 data_context: DataContext object that defines a full runtime environment (data access, etc.)
45 """
46 self._data_context = data_context
47 self._rules = []
48
49 rules_configs: Dict[str, Dict] = profiler_config.get("rules", {})
50 rule_name: str
51 rule_config: dict
52
53 for rule_name, rule_config in rules_configs.items():
54 domain_builder_config: dict = rule_config.get("domain_builder")
55
56 if domain_builder_config is None:
57 raise ge_exceptions.ProfilerConfigurationError(
58 message=f'Invalid rule "{rule_name}": no domain_builder found.'
59 )
60
61 domain_builder: DomainBuilder = instantiate_class_from_config(
62 config=domain_builder_config,
63 runtime_environment={"data_context": data_context},
64 config_defaults={
65 "module_name": "great_expectations.rule_based_profiler.domain_builder"
66 },
67 )
68
69 parameter_builders: List[ParameterBuilder] = []
70
71 parameter_builder_configs: dict = rule_config.get("parameter_builders")
72
73 if parameter_builder_configs:
74 parameter_builder_config: dict
75 for parameter_builder_config in parameter_builder_configs:
76 parameter_builders.append(
77 instantiate_class_from_config(
78 config=parameter_builder_config,
79 runtime_environment={"data_context": data_context},
80 config_defaults={
81 "module_name": "great_expectations.rule_based_profiler.parameter_builder"
82 },
83 )
84 )
85
86 expectation_configuration_builders: List[
87 ExpectationConfigurationBuilder
88 ] = []
89
90 expectation_configuration_builder_configs: dict = rule_config.get(
91 "expectation_configuration_builders"
92 )
93
94 if expectation_configuration_builder_configs:
95 expectation_configuration_builder_config: dict
96 for (
97 expectation_configuration_builder_config
98 ) in expectation_configuration_builder_configs:
99 expectation_configuration_builders.append(
100 instantiate_class_from_config(
101 config=expectation_configuration_builder_config,
102 runtime_environment={},
103 config_defaults={
104 "class_name": "DefaultExpectationConfigurationBuilder",
105 "module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
106 },
107 )
108 )
109
110 variables_configs: Dict[str, Dict] = profiler_config.get("variables", {})
111 variables: Optional[ParameterContainer] = None
112
113 if variables_configs:
114 variables = build_parameter_container_for_variables(
115 variables_configs=variables_configs
116 )
117
118 self._rules.append(
119 Rule(
120 name=rule_name,
121 domain_builder=domain_builder,
122 parameter_builders=parameter_builders,
123 expectation_configuration_builders=expectation_configuration_builders,
124 variables=variables,
125 )
126 )
127
128 def profile(
129 self,
130 *,
131 expectation_suite_name: Optional[str] = None,
132 ) -> ExpectationSuite:
133 """
134 Args:
135 :param expectation_suite_name: A name for returned Expectation suite.
136 :return: Set of rule evaluation results in the form of an ExpectationSuite
137 """
138 if expectation_suite_name is None:
139 expectation_suite_name = (
140 f"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}"
141 )
142
143 expectation_suite: ExpectationSuite = ExpectationSuite(
144 expectation_suite_name=expectation_suite_name
145 )
146
147 rule: Rule
148 for rule in self._rules:
149 expectation_configurations: List[ExpectationConfiguration] = rule.generate()
150 expectation_configuration: ExpectationConfiguration
151 for expectation_configuration in expectation_configurations:
152 expectation_suite.add_expectation(
153 expectation_configuration=expectation_configuration
154 )
155
156 return expectation_suite
157
[end of great_expectations/rule_based_profiler/profiler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/great_expectations/rule_based_profiler/profiler.py b/great_expectations/rule_based_profiler/profiler.py
--- a/great_expectations/rule_based_profiler/profiler.py
+++ b/great_expectations/rule_based_profiler/profiler.py
@@ -43,10 +43,11 @@
profiler_config: Variables and Rules configuration as a dictionary
data_context: DataContext object that defines a full runtime environment (data access, etc.)
"""
+ self._profiler_config = profiler_config
self._data_context = data_context
self._rules = []
- rules_configs: Dict[str, Dict] = profiler_config.get("rules", {})
+ rules_configs: Dict[str, Dict] = self._profiler_config.get("rules", {})
rule_name: str
rule_config: dict
@@ -107,7 +108,9 @@
)
)
- variables_configs: Dict[str, Dict] = profiler_config.get("variables", {})
+ variables_configs: Dict[str, Dict] = self._profiler_config.get(
+ "variables", {}
+ )
variables: Optional[ParameterContainer] = None
if variables_configs:
@@ -129,10 +132,12 @@
self,
*,
expectation_suite_name: Optional[str] = None,
+ include_citation: bool = True,
) -> ExpectationSuite:
"""
Args:
:param expectation_suite_name: A name for returned Expectation suite.
+ :param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler
:return: Set of rule evaluation results in the form of an ExpectationSuite
"""
if expectation_suite_name is None:
@@ -144,6 +149,12 @@
expectation_suite_name=expectation_suite_name
)
+ if include_citation:
+ expectation_suite.add_citation(
+ comment="Suite created by Rule-Based Profiler with the following config",
+ profiler_config=self._profiler_config,
+ )
+
rule: Rule
for rule in self._rules:
expectation_configurations: List[ExpectationConfiguration] = rule.generate()
| {"golden_diff": "diff --git a/great_expectations/rule_based_profiler/profiler.py b/great_expectations/rule_based_profiler/profiler.py\n--- a/great_expectations/rule_based_profiler/profiler.py\n+++ b/great_expectations/rule_based_profiler/profiler.py\n@@ -43,10 +43,11 @@\n profiler_config: Variables and Rules configuration as a dictionary\n data_context: DataContext object that defines a full runtime environment (data access, etc.)\n \"\"\"\n+ self._profiler_config = profiler_config\n self._data_context = data_context\n self._rules = []\n \n- rules_configs: Dict[str, Dict] = profiler_config.get(\"rules\", {})\n+ rules_configs: Dict[str, Dict] = self._profiler_config.get(\"rules\", {})\n rule_name: str\n rule_config: dict\n \n@@ -107,7 +108,9 @@\n )\n )\n \n- variables_configs: Dict[str, Dict] = profiler_config.get(\"variables\", {})\n+ variables_configs: Dict[str, Dict] = self._profiler_config.get(\n+ \"variables\", {}\n+ )\n variables: Optional[ParameterContainer] = None\n \n if variables_configs:\n@@ -129,10 +132,12 @@\n self,\n *,\n expectation_suite_name: Optional[str] = None,\n+ include_citation: bool = True,\n ) -> ExpectationSuite:\n \"\"\"\n Args:\n :param expectation_suite_name: A name for returned Expectation suite.\n+ :param include_citation: Whether or not to include the Profiler config in the metadata for the ExpectationSuite produced by the Profiler\n :return: Set of rule evaluation results in the form of an ExpectationSuite\n \"\"\"\n if expectation_suite_name is None:\n@@ -144,6 +149,12 @@\n expectation_suite_name=expectation_suite_name\n )\n \n+ if include_citation:\n+ expectation_suite.add_citation(\n+ comment=\"Suite created by Rule-Based Profiler with the following config\",\n+ profiler_config=self._profiler_config,\n+ )\n+\n rule: Rule\n for rule in self._rules:\n expectation_configurations: List[ExpectationConfiguration] = rule.generate()\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import uuid\nfrom typing import Dict, List, Optional, Union\n\nimport great_expectations.exceptions as ge_exceptions\nfrom great_expectations import DataContext\nfrom great_expectations.core import ExpectationConfiguration, ExpectationSuite\nfrom great_expectations.data_context.util import instantiate_class_from_config\nfrom great_expectations.rule_based_profiler.domain_builder.domain_builder import (\n DomainBuilder,\n)\nfrom great_expectations.rule_based_profiler.expectation_configuration_builder.expectation_configuration_builder import (\n ExpectationConfigurationBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_builder import (\n ParameterBuilder,\n)\nfrom great_expectations.rule_based_profiler.parameter_builder.parameter_container import (\n ParameterContainer,\n build_parameter_container_for_variables,\n)\nfrom great_expectations.rule_based_profiler.rule.rule import Rule\n\n\nclass Profiler:\n \"\"\"\n Profiler object serves to profile, or automatically evaluate a set of rules, upon a given\n batch / multiple batches of data.\n \"\"\"\n\n def __init__(\n self,\n *,\n profiler_config: Optional[Dict[str, Dict[str, Dict]]] = None,\n data_context: Optional[DataContext] = None,\n ):\n \"\"\"\n Create a new Profiler using configured rules.\n For a rule or an item in a rule configuration, instantiates the following if\n available: a domain builder, a parameter builder, and a configuration builder.\n These will be used to define profiler computation patterns.\n\n Args:\n profiler_config: Variables and Rules configuration as a dictionary\n data_context: DataContext object that defines a full runtime environment (data access, etc.)\n \"\"\"\n self._data_context = data_context\n self._rules = []\n\n rules_configs: Dict[str, Dict] = profiler_config.get(\"rules\", {})\n rule_name: str\n rule_config: dict\n\n for rule_name, rule_config in rules_configs.items():\n domain_builder_config: dict = rule_config.get(\"domain_builder\")\n\n if domain_builder_config is None:\n raise ge_exceptions.ProfilerConfigurationError(\n message=f'Invalid rule \"{rule_name}\": no domain_builder found.'\n )\n\n domain_builder: DomainBuilder = instantiate_class_from_config(\n config=domain_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.domain_builder\"\n },\n )\n\n parameter_builders: List[ParameterBuilder] = []\n\n parameter_builder_configs: dict = rule_config.get(\"parameter_builders\")\n\n if parameter_builder_configs:\n parameter_builder_config: dict\n for parameter_builder_config in parameter_builder_configs:\n parameter_builders.append(\n instantiate_class_from_config(\n config=parameter_builder_config,\n runtime_environment={\"data_context\": data_context},\n config_defaults={\n \"module_name\": \"great_expectations.rule_based_profiler.parameter_builder\"\n },\n )\n )\n\n expectation_configuration_builders: List[\n ExpectationConfigurationBuilder\n ] = []\n\n expectation_configuration_builder_configs: dict = rule_config.get(\n \"expectation_configuration_builders\"\n )\n\n if expectation_configuration_builder_configs:\n expectation_configuration_builder_config: dict\n for (\n expectation_configuration_builder_config\n ) in expectation_configuration_builder_configs:\n expectation_configuration_builders.append(\n instantiate_class_from_config(\n config=expectation_configuration_builder_config,\n runtime_environment={},\n config_defaults={\n \"class_name\": \"DefaultExpectationConfigurationBuilder\",\n \"module_name\": \"great_expectations.rule_based_profiler.expectation_configuration_builder\",\n },\n )\n )\n\n variables_configs: Dict[str, Dict] = profiler_config.get(\"variables\", {})\n variables: Optional[ParameterContainer] = None\n\n if variables_configs:\n variables = build_parameter_container_for_variables(\n variables_configs=variables_configs\n )\n\n self._rules.append(\n Rule(\n name=rule_name,\n domain_builder=domain_builder,\n parameter_builders=parameter_builders,\n expectation_configuration_builders=expectation_configuration_builders,\n variables=variables,\n )\n )\n\n def profile(\n self,\n *,\n expectation_suite_name: Optional[str] = None,\n ) -> ExpectationSuite:\n \"\"\"\n Args:\n :param expectation_suite_name: A name for returned Expectation suite.\n :return: Set of rule evaluation results in the form of an ExpectationSuite\n \"\"\"\n if expectation_suite_name is None:\n expectation_suite_name = (\n f\"tmp.profiler_{self.__class__.__name__}_suite_{str(uuid.uuid4())[:8]}\"\n )\n\n expectation_suite: ExpectationSuite = ExpectationSuite(\n expectation_suite_name=expectation_suite_name\n )\n\n rule: Rule\n for rule in self._rules:\n expectation_configurations: List[ExpectationConfiguration] = rule.generate()\n expectation_configuration: ExpectationConfiguration\n for expectation_configuration in expectation_configurations:\n expectation_suite.add_expectation(\n expectation_configuration=expectation_configuration\n )\n\n return expectation_suite\n", "path": "great_expectations/rule_based_profiler/profiler.py"}]} | 1,994 | 493 |
gh_patches_debug_36047 | rasdani/github-patches | git_diff | ivy-llc__ivy-15973 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Sparse Array Functions to Paddle Backend
Add [Sparse Array Functions](https://www.paddlepaddle.org.cn/documentation/docs/en/api/index\_en.html) to Paddle backend
\_
>Please keep in mind that the proper way to link an issue to this list is to comment "- [ ] #issue\_number" while the issue's title only includes the name of the function you've chosen.
\_
## Experimental
- [x] is\_native\_sparse\_array
- [x] native\_sparse\_array
- [x] native\_sparse\_array\_to\_indices\_values\_and\_shape
</issue>
<code>
[start of ivy/functional/backends/paddle/experimental/sparse_array.py]
1 from ivy.utils.exceptions import IvyNotImplementedException
2 import paddle
3
4
5 def is_native_sparse_array(x: paddle.Tensor) -> bool:
6 return x.is_sparse_coo() or x.is_sparse_csr()
7
8
9 def native_sparse_array(
10 data=None,
11 *,
12 coo_indices=None,
13 crow_indices=None,
14 col_indices=None,
15 ccol_indices=None,
16 row_indices=None,
17 values=None,
18 dense_shape=None,
19 format="coo",
20 ):
21 raise IvyNotImplementedException()
22
23
24 def native_sparse_array_to_indices_values_and_shape(x):
25 raise IvyNotImplementedException()
26
[end of ivy/functional/backends/paddle/experimental/sparse_array.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ivy/functional/backends/paddle/experimental/sparse_array.py b/ivy/functional/backends/paddle/experimental/sparse_array.py
--- a/ivy/functional/backends/paddle/experimental/sparse_array.py
+++ b/ivy/functional/backends/paddle/experimental/sparse_array.py
@@ -1,11 +1,26 @@
+import ivy
+from ivy.functional.ivy.experimental.sparse_array import (
+ _verify_coo_components,
+ _verify_csr_components,
+ _is_data_not_indices_values_and_shape,
+)
+from ivy.func_wrapper import (
+ with_unsupported_device_and_dtypes,
+)
from ivy.utils.exceptions import IvyNotImplementedException
import paddle
+# local
+from .. import backend_version
+
def is_native_sparse_array(x: paddle.Tensor) -> bool:
return x.is_sparse_coo() or x.is_sparse_csr()
+@with_unsupported_device_and_dtypes(
+ {"2.4.2 and below": {"cpu": ("int8",)}}, backend_version
+)
def native_sparse_array(
data=None,
*,
@@ -17,9 +32,67 @@
values=None,
dense_shape=None,
format="coo",
-):
- raise IvyNotImplementedException()
+) -> paddle.Tensor:
+ format = format.lower()
+
+ if format not in ["coo", "csr"]:
+ raise IvyNotImplementedException(
+ "paddle only supports 'coo' and 'csr' sparse formats."
+ )
+
+ if _is_data_not_indices_values_and_shape(
+ data,
+ coo_indices,
+ crow_indices,
+ col_indices,
+ ccol_indices,
+ row_indices,
+ values,
+ dense_shape,
+ ):
+ ivy.utils.assertions.check_true(
+ ivy.is_native_sparse_array(data), message="not a sparse array"
+ )
+ return data
+
+ if format == "coo":
+ _verify_coo_components(
+ indices=coo_indices, values=values, dense_shape=dense_shape
+ )
+ return paddle.sparse.sparse_coo_tensor(
+ indices=coo_indices,
+ values=values,
+ shape=dense_shape,
+ dtype=dtype,
+ place=device,
+ stop_gradient=not requires_grad,
+ )
+ else:
+ _verify_csr_components(
+ crow_indices=crow_indices,
+ col_indices=col_indices,
+ values=values,
+ dense_shape=dense_shape,
+ )
+ return paddle.sparse.sparse_csr_tensor(
+ crows=crow_indices,
+ cols=col_indices,
+ values=values,
+ shape=dense_shape,
+ dtype=dtype,
+ place=device,
+ stop_gradient=not requires_grad,
+ )
def native_sparse_array_to_indices_values_and_shape(x):
- raise IvyNotImplementedException()
+ if not is_native_sparse_array(x):
+ raise ivy.utils.exceptions.IvyException("not a Paddle Sparse Array")
+ if x.is_sparse_coo():
+ return {"coo_indices": x.indices()}, x.values(), x.shape
+ else:
+ return (
+ {"crow_indices": x.crows(), "col_indices": x.cols()},
+ x.values(),
+ x.shape,
+ )
| {"golden_diff": "diff --git a/ivy/functional/backends/paddle/experimental/sparse_array.py b/ivy/functional/backends/paddle/experimental/sparse_array.py\n--- a/ivy/functional/backends/paddle/experimental/sparse_array.py\n+++ b/ivy/functional/backends/paddle/experimental/sparse_array.py\n@@ -1,11 +1,26 @@\n+import ivy\n+from ivy.functional.ivy.experimental.sparse_array import (\n+ _verify_coo_components,\n+ _verify_csr_components,\n+ _is_data_not_indices_values_and_shape,\n+)\n+from ivy.func_wrapper import (\n+ with_unsupported_device_and_dtypes,\n+)\n from ivy.utils.exceptions import IvyNotImplementedException\n import paddle\n \n+# local\n+from .. import backend_version\n+\n \n def is_native_sparse_array(x: paddle.Tensor) -> bool:\n return x.is_sparse_coo() or x.is_sparse_csr()\n \n \n+@with_unsupported_device_and_dtypes(\n+ {\"2.4.2 and below\": {\"cpu\": (\"int8\",)}}, backend_version\n+)\n def native_sparse_array(\n data=None,\n *,\n@@ -17,9 +32,67 @@\n values=None,\n dense_shape=None,\n format=\"coo\",\n-):\n- raise IvyNotImplementedException()\n+) -> paddle.Tensor:\n+ format = format.lower()\n+\n+ if format not in [\"coo\", \"csr\"]:\n+ raise IvyNotImplementedException(\n+ \"paddle only supports 'coo' and 'csr' sparse formats.\"\n+ )\n+\n+ if _is_data_not_indices_values_and_shape(\n+ data,\n+ coo_indices,\n+ crow_indices,\n+ col_indices,\n+ ccol_indices,\n+ row_indices,\n+ values,\n+ dense_shape,\n+ ):\n+ ivy.utils.assertions.check_true(\n+ ivy.is_native_sparse_array(data), message=\"not a sparse array\"\n+ )\n+ return data\n+\n+ if format == \"coo\":\n+ _verify_coo_components(\n+ indices=coo_indices, values=values, dense_shape=dense_shape\n+ )\n+ return paddle.sparse.sparse_coo_tensor(\n+ indices=coo_indices,\n+ values=values,\n+ shape=dense_shape,\n+ dtype=dtype,\n+ place=device,\n+ stop_gradient=not requires_grad,\n+ )\n+ else:\n+ _verify_csr_components(\n+ crow_indices=crow_indices,\n+ col_indices=col_indices,\n+ values=values,\n+ dense_shape=dense_shape,\n+ )\n+ return paddle.sparse.sparse_csr_tensor(\n+ crows=crow_indices,\n+ cols=col_indices,\n+ values=values,\n+ shape=dense_shape,\n+ dtype=dtype,\n+ place=device,\n+ stop_gradient=not requires_grad,\n+ )\n \n \n def native_sparse_array_to_indices_values_and_shape(x):\n- raise IvyNotImplementedException()\n+ if not is_native_sparse_array(x):\n+ raise ivy.utils.exceptions.IvyException(\"not a Paddle Sparse Array\")\n+ if x.is_sparse_coo():\n+ return {\"coo_indices\": x.indices()}, x.values(), x.shape\n+ else:\n+ return (\n+ {\"crow_indices\": x.crows(), \"col_indices\": x.cols()},\n+ x.values(),\n+ x.shape,\n+ )\n", "issue": "Add Sparse Array Functions to Paddle Backend\nAdd [Sparse Array Functions](https://www.paddlepaddle.org.cn/documentation/docs/en/api/index\\_en.html) to Paddle backend\r\n\r\n\\_\r\n\r\n>Please keep in mind that the proper way to link an issue to this list is to comment \"- [ ] #issue\\_number\" while the issue's title only includes the name of the function you've chosen.\r\n\r\n\\_\r\n\r\n## Experimental\r\n\r\n- [x] is\\_native\\_sparse\\_array\r\n- [x] native\\_sparse\\_array\r\n- [x] native\\_sparse\\_array\\_to\\_indices\\_values\\_and\\_shape\n", "before_files": [{"content": "from ivy.utils.exceptions import IvyNotImplementedException\nimport paddle\n\n\ndef is_native_sparse_array(x: paddle.Tensor) -> bool:\n return x.is_sparse_coo() or x.is_sparse_csr()\n\n\ndef native_sparse_array(\n data=None,\n *,\n coo_indices=None,\n crow_indices=None,\n col_indices=None,\n ccol_indices=None,\n row_indices=None,\n values=None,\n dense_shape=None,\n format=\"coo\",\n):\n raise IvyNotImplementedException()\n\n\ndef native_sparse_array_to_indices_values_and_shape(x):\n raise IvyNotImplementedException()\n", "path": "ivy/functional/backends/paddle/experimental/sparse_array.py"}]} | 852 | 742 |
gh_patches_debug_30271 | rasdani/github-patches | git_diff | rasterio__rasterio-886 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
rio overview --ls should not modify file
Currently running `rio overview --ls` to inspect the overviews modifies the file. We could detect the `--ls` option and open in read-only mode.
</issue>
<code>
[start of rasterio/rio/overview.py]
1 # coding: utf-8
2 """Manage overviews of a dataset."""
3
4 from functools import reduce
5 import logging
6 import operator
7
8 import click
9
10 from . import options
11 import rasterio
12 from rasterio.enums import Resampling
13
14
15 def build_handler(ctx, param, value):
16 if value:
17 try:
18 if '^' in value:
19 base, exp_range = value.split('^')
20 exp_min, exp_max = (int(v) for v in exp_range.split('..'))
21 value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]
22 else:
23 value = [int(v) for v in value.split(',')]
24 except Exception:
25 raise click.BadParameter(u"must match 'n,n,n,…' or 'n^n..n'.")
26 return value
27
28
29 @click.command('overview', short_help="Construct overviews in an existing dataset.")
30 @options.file_in_arg
31 @click.option('--build', callback=build_handler, metavar=u"f1,f2,…|b^min..max",
32 help="A sequence of decimation factors specied as "
33 "comma-separated list of numbers or a base and range of "
34 "exponents.")
35 @click.option('--ls', help="Print the overviews for each band.",
36 is_flag=True, default=False)
37 @click.option('--rebuild', help="Reconstruct existing overviews.",
38 is_flag=True, default=False)
39 @click.option('--resampling', help="Resampling algorithm.",
40 type=click.Choice(
41 [it.name for it in Resampling if it.value in [0, 2, 5, 6, 7]]),
42 default='nearest', show_default=True)
43 @click.pass_context
44 def overview(ctx, input, build, ls, rebuild, resampling):
45 """Construct overviews in an existing dataset.
46
47 A pyramid of overviews computed once and stored in the dataset can
48 improve performance in some applications.
49
50 The decimation levels at which to build overviews can be specified as
51 a comma separated list
52
53 rio overview --build 2,4,8,16
54
55 or a base and range of exponents.
56
57 rio overview --build 2^1..4
58
59 Note that overviews can not currently be removed and are not
60 automatically updated when the dataset's primary bands are
61 modified.
62
63 Information about existing overviews can be printed using the --ls
64 option.
65
66 rio overview --ls
67
68 """
69 with ctx.obj['env']:
70 with rasterio.open(input, 'r+') as dst:
71
72 if ls:
73 resampling_method = dst.tags(
74 ns='rio_overview').get('resampling') or 'unknown'
75
76 click.echo("Overview factors:")
77 for idx in dst.indexes:
78 click.echo(" Band %d: %s (method: '%s')" % (
79 idx, dst.overviews(idx) or 'None', resampling_method))
80
81 elif rebuild:
82 # Build the same overviews for all bands.
83 factors = reduce(
84 operator.or_,
85 [set(dst.overviews(i)) for i in dst.indexes])
86
87 # Attempt to recover the resampling method from dataset tags.
88 resampling_method = dst.tags(
89 ns='rio_overview').get('resampling') or resampling
90
91 dst.build_overviews(
92 list(factors), Resampling[resampling_method])
93
94 elif build:
95 dst.build_overviews(build, Resampling[resampling])
96
97 # Save the resampling method to a tag.
98 dst.update_tags(ns='rio_overview', resampling=resampling)
99
[end of rasterio/rio/overview.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rasterio/rio/overview.py b/rasterio/rio/overview.py
--- a/rasterio/rio/overview.py
+++ b/rasterio/rio/overview.py
@@ -67,9 +67,8 @@
"""
with ctx.obj['env']:
- with rasterio.open(input, 'r+') as dst:
-
- if ls:
+ if ls:
+ with rasterio.open(input, 'r') as dst:
resampling_method = dst.tags(
ns='rio_overview').get('resampling') or 'unknown'
@@ -77,8 +76,8 @@
for idx in dst.indexes:
click.echo(" Band %d: %s (method: '%s')" % (
idx, dst.overviews(idx) or 'None', resampling_method))
-
- elif rebuild:
+ elif rebuild:
+ with rasterio.open(input, 'r+') as dst:
# Build the same overviews for all bands.
factors = reduce(
operator.or_,
@@ -91,8 +90,13 @@
dst.build_overviews(
list(factors), Resampling[resampling_method])
- elif build:
+ elif build:
+ with rasterio.open(input, 'r+') as dst:
dst.build_overviews(build, Resampling[resampling])
# Save the resampling method to a tag.
dst.update_tags(ns='rio_overview', resampling=resampling)
+
+ else:
+ raise click.UsageError(
+ "Please specify --ls, --rebuild, or --build ...")
| {"golden_diff": "diff --git a/rasterio/rio/overview.py b/rasterio/rio/overview.py\n--- a/rasterio/rio/overview.py\n+++ b/rasterio/rio/overview.py\n@@ -67,9 +67,8 @@\n \n \"\"\"\n with ctx.obj['env']:\n- with rasterio.open(input, 'r+') as dst:\n-\n- if ls:\n+ if ls:\n+ with rasterio.open(input, 'r') as dst:\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or 'unknown'\n \n@@ -77,8 +76,8 @@\n for idx in dst.indexes:\n click.echo(\" Band %d: %s (method: '%s')\" % (\n idx, dst.overviews(idx) or 'None', resampling_method))\n-\n- elif rebuild:\n+ elif rebuild:\n+ with rasterio.open(input, 'r+') as dst:\n # Build the same overviews for all bands.\n factors = reduce(\n operator.or_,\n@@ -91,8 +90,13 @@\n dst.build_overviews(\n list(factors), Resampling[resampling_method])\n \n- elif build:\n+ elif build:\n+ with rasterio.open(input, 'r+') as dst:\n dst.build_overviews(build, Resampling[resampling])\n \n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n+\n+ else:\n+ raise click.UsageError(\n+ \"Please specify --ls, --rebuild, or --build ...\")\n", "issue": "rio overview --ls should not modify file\nCurrently running `rio overview --ls` to inspect the overviews modifies the file. We could detect the `--ls` option and open in read-only mode. \n\n", "before_files": [{"content": "# coding: utf-8\n\"\"\"Manage overviews of a dataset.\"\"\"\n\nfrom functools import reduce\nimport logging\nimport operator\n\nimport click\n\nfrom . import options\nimport rasterio\nfrom rasterio.enums import Resampling\n\n\ndef build_handler(ctx, param, value):\n if value:\n try:\n if '^' in value:\n base, exp_range = value.split('^')\n exp_min, exp_max = (int(v) for v in exp_range.split('..'))\n value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]\n else:\n value = [int(v) for v in value.split(',')]\n except Exception:\n raise click.BadParameter(u\"must match 'n,n,n,\u2026' or 'n^n..n'.\")\n return value\n\n\[email protected]('overview', short_help=\"Construct overviews in an existing dataset.\")\[email protected]_in_arg\[email protected]('--build', callback=build_handler, metavar=u\"f1,f2,\u2026|b^min..max\",\n help=\"A sequence of decimation factors specied as \"\n \"comma-separated list of numbers or a base and range of \"\n \"exponents.\")\[email protected]('--ls', help=\"Print the overviews for each band.\",\n is_flag=True, default=False)\[email protected]('--rebuild', help=\"Reconstruct existing overviews.\",\n is_flag=True, default=False)\[email protected]('--resampling', help=\"Resampling algorithm.\",\n type=click.Choice(\n [it.name for it in Resampling if it.value in [0, 2, 5, 6, 7]]),\n default='nearest', show_default=True)\[email protected]_context\ndef overview(ctx, input, build, ls, rebuild, resampling):\n \"\"\"Construct overviews in an existing dataset.\n\n A pyramid of overviews computed once and stored in the dataset can\n improve performance in some applications.\n\n The decimation levels at which to build overviews can be specified as\n a comma separated list\n\n rio overview --build 2,4,8,16\n\n or a base and range of exponents.\n\n rio overview --build 2^1..4\n\n Note that overviews can not currently be removed and are not\n automatically updated when the dataset's primary bands are\n modified.\n\n Information about existing overviews can be printed using the --ls\n option.\n\n rio overview --ls\n\n \"\"\"\n with ctx.obj['env']:\n with rasterio.open(input, 'r+') as dst:\n\n if ls:\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or 'unknown'\n\n click.echo(\"Overview factors:\")\n for idx in dst.indexes:\n click.echo(\" Band %d: %s (method: '%s')\" % (\n idx, dst.overviews(idx) or 'None', resampling_method))\n\n elif rebuild:\n # Build the same overviews for all bands.\n factors = reduce(\n operator.or_,\n [set(dst.overviews(i)) for i in dst.indexes])\n\n # Attempt to recover the resampling method from dataset tags.\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or resampling\n\n dst.build_overviews(\n list(factors), Resampling[resampling_method])\n\n elif build:\n dst.build_overviews(build, Resampling[resampling])\n\n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n", "path": "rasterio/rio/overview.py"}]} | 1,546 | 353 |
gh_patches_debug_10366 | rasdani/github-patches | git_diff | python-discord__bot-774 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reduce logging level for tag cooldowns
Sentry Issue: [BOT-19](https://sentry.io/organizations/python-discord/issues/1527377135/?referrer=github_integration)
```
<REDACTED> tried to get the 'ask' tag, but the tag is on cooldown. Cooldown ends in 2.8 seconds.
```
</issue>
<code>
[start of bot/cogs/tags.py]
1 import logging
2 import re
3 import time
4 from typing import Dict, List, Optional
5
6 from discord import Colour, Embed
7 from discord.ext.commands import Cog, Context, group
8
9 from bot.bot import Bot
10 from bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles
11 from bot.converters import TagContentConverter, TagNameConverter
12 from bot.decorators import with_role
13 from bot.pagination import LinePaginator
14
15 log = logging.getLogger(__name__)
16
17 TEST_CHANNELS = (
18 Channels.devtest,
19 Channels.bot,
20 Channels.helpers
21 )
22
23 REGEX_NON_ALPHABET = re.compile(r"[^a-z]", re.MULTILINE & re.IGNORECASE)
24
25
26 class Tags(Cog):
27 """Save new tags and fetch existing tags."""
28
29 def __init__(self, bot: Bot):
30 self.bot = bot
31 self.tag_cooldowns = {}
32
33 self._cache = {}
34 self._last_fetch: float = 0.0
35
36 async def _get_tags(self, is_forced: bool = False) -> None:
37 """Get all tags."""
38 # refresh only when there's a more than 5m gap from last call.
39 time_now: float = time.time()
40 if is_forced or not self._last_fetch or time_now - self._last_fetch > 5 * 60:
41 tags = await self.bot.api_client.get('bot/tags')
42 self._cache = {tag['title'].lower(): tag for tag in tags}
43 self._last_fetch = time_now
44
45 @staticmethod
46 def _fuzzy_search(search: str, target: str) -> int:
47 """A simple scoring algorithm based on how many letters are found / total, with order in mind."""
48 current, index = 0, 0
49 _search = REGEX_NON_ALPHABET.sub('', search.lower())
50 _targets = iter(REGEX_NON_ALPHABET.split(target.lower()))
51 _target = next(_targets)
52 try:
53 while True:
54 while index < len(_target) and _search[current] == _target[index]:
55 current += 1
56 index += 1
57 index, _target = 0, next(_targets)
58 except (StopIteration, IndexError):
59 pass
60 return current / len(_search) * 100
61
62 def _get_suggestions(self, tag_name: str, thresholds: Optional[List[int]] = None) -> List[str]:
63 """Return a list of suggested tags."""
64 scores: Dict[str, int] = {
65 tag_title: Tags._fuzzy_search(tag_name, tag['title'])
66 for tag_title, tag in self._cache.items()
67 }
68
69 thresholds = thresholds or [100, 90, 80, 70, 60]
70
71 for threshold in thresholds:
72 suggestions = [
73 self._cache[tag_title]
74 for tag_title, matching_score in scores.items()
75 if matching_score >= threshold
76 ]
77 if suggestions:
78 return suggestions
79
80 return []
81
82 async def _get_tag(self, tag_name: str) -> list:
83 """Get a specific tag."""
84 await self._get_tags()
85 found = [self._cache.get(tag_name.lower(), None)]
86 if not found[0]:
87 return self._get_suggestions(tag_name)
88 return found
89
90 @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)
91 async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
92 """Show all known tags, a single tag, or run a subcommand."""
93 await ctx.invoke(self.get_command, tag_name=tag_name)
94
95 @tags_group.command(name='get', aliases=('show', 'g'))
96 async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:
97 """Get a specified tag, or a list of all tags if no tag is specified."""
98 def _command_on_cooldown(tag_name: str) -> bool:
99 """
100 Check if the command is currently on cooldown, on a per-tag, per-channel basis.
101
102 The cooldown duration is set in constants.py.
103 """
104 now = time.time()
105
106 cooldown_conditions = (
107 tag_name
108 and tag_name in self.tag_cooldowns
109 and (now - self.tag_cooldowns[tag_name]["time"]) < Cooldowns.tags
110 and self.tag_cooldowns[tag_name]["channel"] == ctx.channel.id
111 )
112
113 if cooldown_conditions:
114 return True
115 return False
116
117 if _command_on_cooldown(tag_name):
118 time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name]["time"])
119 log.warning(f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
120 f"Cooldown ends in {time_left:.1f} seconds.")
121 return
122
123 await self._get_tags()
124
125 if tag_name is not None:
126 founds = await self._get_tag(tag_name)
127
128 if len(founds) == 1:
129 tag = founds[0]
130 if ctx.channel.id not in TEST_CHANNELS:
131 self.tag_cooldowns[tag_name] = {
132 "time": time.time(),
133 "channel": ctx.channel.id
134 }
135 await ctx.send(embed=Embed.from_dict(tag['embed']))
136 elif founds and len(tag_name) >= 3:
137 await ctx.send(embed=Embed(
138 title='Did you mean ...',
139 description='\n'.join(tag['title'] for tag in founds[:10])
140 ))
141
142 else:
143 tags = self._cache.values()
144 if not tags:
145 await ctx.send(embed=Embed(
146 description="**There are no tags in the database!**",
147 colour=Colour.red()
148 ))
149 else:
150 embed: Embed = Embed(title="**Current tags**")
151 await LinePaginator.paginate(
152 sorted(f"**»** {tag['title']}" for tag in tags),
153 ctx,
154 embed,
155 footer_text="To show a tag, type !tags <tagname>.",
156 empty=False,
157 max_lines=15
158 )
159
160 @tags_group.command(name='set', aliases=('add', 's'))
161 @with_role(*MODERATION_ROLES)
162 async def set_command(
163 self,
164 ctx: Context,
165 tag_name: TagNameConverter,
166 *,
167 tag_content: TagContentConverter,
168 ) -> None:
169 """Create a new tag."""
170 body = {
171 'title': tag_name.lower().strip(),
172 'embed': {
173 'title': tag_name,
174 'description': tag_content
175 }
176 }
177
178 await self.bot.api_client.post('bot/tags', json=body)
179 self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')
180
181 log.debug(f"{ctx.author} successfully added the following tag to our database: \n"
182 f"tag_name: {tag_name}\n"
183 f"tag_content: '{tag_content}'\n")
184
185 await ctx.send(embed=Embed(
186 title="Tag successfully added",
187 description=f"**{tag_name}** added to tag database.",
188 colour=Colour.blurple()
189 ))
190
191 @tags_group.command(name='edit', aliases=('e', ))
192 @with_role(*MODERATION_ROLES)
193 async def edit_command(
194 self,
195 ctx: Context,
196 tag_name: TagNameConverter,
197 *,
198 tag_content: TagContentConverter,
199 ) -> None:
200 """Edit an existing tag."""
201 body = {
202 'embed': {
203 'title': tag_name,
204 'description': tag_content
205 }
206 }
207
208 await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)
209 self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')
210
211 log.debug(f"{ctx.author} successfully edited the following tag in our database: \n"
212 f"tag_name: {tag_name}\n"
213 f"tag_content: '{tag_content}'\n")
214
215 await ctx.send(embed=Embed(
216 title="Tag successfully edited",
217 description=f"**{tag_name}** edited in the database.",
218 colour=Colour.blurple()
219 ))
220
221 @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))
222 @with_role(Roles.admin, Roles.owner)
223 async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:
224 """Remove a tag from the database."""
225 await self.bot.api_client.delete(f'bot/tags/{tag_name}')
226 self._cache.pop(tag_name.lower(), None)
227
228 log.debug(f"{ctx.author} successfully deleted the tag called '{tag_name}'")
229 await ctx.send(embed=Embed(
230 title=tag_name,
231 description=f"Tag successfully removed: {tag_name}.",
232 colour=Colour.blurple()
233 ))
234
235
236 def setup(bot: Bot) -> None:
237 """Load the Tags cog."""
238 bot.add_cog(Tags(bot))
239
[end of bot/cogs/tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py
--- a/bot/cogs/tags.py
+++ b/bot/cogs/tags.py
@@ -116,8 +116,10 @@
if _command_on_cooldown(tag_name):
time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name]["time"])
- log.warning(f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
- f"Cooldown ends in {time_left:.1f} seconds.")
+ log.info(
+ f"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. "
+ f"Cooldown ends in {time_left:.1f} seconds."
+ )
return
await self._get_tags()
| {"golden_diff": "diff --git a/bot/cogs/tags.py b/bot/cogs/tags.py\n--- a/bot/cogs/tags.py\n+++ b/bot/cogs/tags.py\n@@ -116,8 +116,10 @@\n \n if _command_on_cooldown(tag_name):\n time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name][\"time\"])\n- log.warning(f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n- f\"Cooldown ends in {time_left:.1f} seconds.\")\n+ log.info(\n+ f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n+ f\"Cooldown ends in {time_left:.1f} seconds.\"\n+ )\n return\n \n await self._get_tags()\n", "issue": "Reduce logging level for tag cooldowns\nSentry Issue: [BOT-19](https://sentry.io/organizations/python-discord/issues/1527377135/?referrer=github_integration)\n\n```\n<REDACTED> tried to get the 'ask' tag, but the tag is on cooldown. Cooldown ends in 2.8 seconds.\n```\n", "before_files": [{"content": "import logging\nimport re\nimport time\nfrom typing import Dict, List, Optional\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Cog, Context, group\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, Cooldowns, MODERATION_ROLES, Roles\nfrom bot.converters import TagContentConverter, TagNameConverter\nfrom bot.decorators import with_role\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\nTEST_CHANNELS = (\n Channels.devtest,\n Channels.bot,\n Channels.helpers\n)\n\nREGEX_NON_ALPHABET = re.compile(r\"[^a-z]\", re.MULTILINE & re.IGNORECASE)\n\n\nclass Tags(Cog):\n \"\"\"Save new tags and fetch existing tags.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.tag_cooldowns = {}\n\n self._cache = {}\n self._last_fetch: float = 0.0\n\n async def _get_tags(self, is_forced: bool = False) -> None:\n \"\"\"Get all tags.\"\"\"\n # refresh only when there's a more than 5m gap from last call.\n time_now: float = time.time()\n if is_forced or not self._last_fetch or time_now - self._last_fetch > 5 * 60:\n tags = await self.bot.api_client.get('bot/tags')\n self._cache = {tag['title'].lower(): tag for tag in tags}\n self._last_fetch = time_now\n\n @staticmethod\n def _fuzzy_search(search: str, target: str) -> int:\n \"\"\"A simple scoring algorithm based on how many letters are found / total, with order in mind.\"\"\"\n current, index = 0, 0\n _search = REGEX_NON_ALPHABET.sub('', search.lower())\n _targets = iter(REGEX_NON_ALPHABET.split(target.lower()))\n _target = next(_targets)\n try:\n while True:\n while index < len(_target) and _search[current] == _target[index]:\n current += 1\n index += 1\n index, _target = 0, next(_targets)\n except (StopIteration, IndexError):\n pass\n return current / len(_search) * 100\n\n def _get_suggestions(self, tag_name: str, thresholds: Optional[List[int]] = None) -> List[str]:\n \"\"\"Return a list of suggested tags.\"\"\"\n scores: Dict[str, int] = {\n tag_title: Tags._fuzzy_search(tag_name, tag['title'])\n for tag_title, tag in self._cache.items()\n }\n\n thresholds = thresholds or [100, 90, 80, 70, 60]\n\n for threshold in thresholds:\n suggestions = [\n self._cache[tag_title]\n for tag_title, matching_score in scores.items()\n if matching_score >= threshold\n ]\n if suggestions:\n return suggestions\n\n return []\n\n async def _get_tag(self, tag_name: str) -> list:\n \"\"\"Get a specific tag.\"\"\"\n await self._get_tags()\n found = [self._cache.get(tag_name.lower(), None)]\n if not found[0]:\n return self._get_suggestions(tag_name)\n return found\n\n @group(name='tags', aliases=('tag', 't'), invoke_without_command=True)\n async def tags_group(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Show all known tags, a single tag, or run a subcommand.\"\"\"\n await ctx.invoke(self.get_command, tag_name=tag_name)\n\n @tags_group.command(name='get', aliases=('show', 'g'))\n async def get_command(self, ctx: Context, *, tag_name: TagNameConverter = None) -> None:\n \"\"\"Get a specified tag, or a list of all tags if no tag is specified.\"\"\"\n def _command_on_cooldown(tag_name: str) -> bool:\n \"\"\"\n Check if the command is currently on cooldown, on a per-tag, per-channel basis.\n\n The cooldown duration is set in constants.py.\n \"\"\"\n now = time.time()\n\n cooldown_conditions = (\n tag_name\n and tag_name in self.tag_cooldowns\n and (now - self.tag_cooldowns[tag_name][\"time\"]) < Cooldowns.tags\n and self.tag_cooldowns[tag_name][\"channel\"] == ctx.channel.id\n )\n\n if cooldown_conditions:\n return True\n return False\n\n if _command_on_cooldown(tag_name):\n time_left = Cooldowns.tags - (time.time() - self.tag_cooldowns[tag_name][\"time\"])\n log.warning(f\"{ctx.author} tried to get the '{tag_name}' tag, but the tag is on cooldown. \"\n f\"Cooldown ends in {time_left:.1f} seconds.\")\n return\n\n await self._get_tags()\n\n if tag_name is not None:\n founds = await self._get_tag(tag_name)\n\n if len(founds) == 1:\n tag = founds[0]\n if ctx.channel.id not in TEST_CHANNELS:\n self.tag_cooldowns[tag_name] = {\n \"time\": time.time(),\n \"channel\": ctx.channel.id\n }\n await ctx.send(embed=Embed.from_dict(tag['embed']))\n elif founds and len(tag_name) >= 3:\n await ctx.send(embed=Embed(\n title='Did you mean ...',\n description='\\n'.join(tag['title'] for tag in founds[:10])\n ))\n\n else:\n tags = self._cache.values()\n if not tags:\n await ctx.send(embed=Embed(\n description=\"**There are no tags in the database!**\",\n colour=Colour.red()\n ))\n else:\n embed: Embed = Embed(title=\"**Current tags**\")\n await LinePaginator.paginate(\n sorted(f\"**\u00bb** {tag['title']}\" for tag in tags),\n ctx,\n embed,\n footer_text=\"To show a tag, type !tags <tagname>.\",\n empty=False,\n max_lines=15\n )\n\n @tags_group.command(name='set', aliases=('add', 's'))\n @with_role(*MODERATION_ROLES)\n async def set_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Create a new tag.\"\"\"\n body = {\n 'title': tag_name.lower().strip(),\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.post('bot/tags', json=body)\n self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully added the following tag to our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully added\",\n description=f\"**{tag_name}** added to tag database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='edit', aliases=('e', ))\n @with_role(*MODERATION_ROLES)\n async def edit_command(\n self,\n ctx: Context,\n tag_name: TagNameConverter,\n *,\n tag_content: TagContentConverter,\n ) -> None:\n \"\"\"Edit an existing tag.\"\"\"\n body = {\n 'embed': {\n 'title': tag_name,\n 'description': tag_content\n }\n }\n\n await self.bot.api_client.patch(f'bot/tags/{tag_name}', json=body)\n self._cache[tag_name.lower()] = await self.bot.api_client.get(f'bot/tags/{tag_name}')\n\n log.debug(f\"{ctx.author} successfully edited the following tag in our database: \\n\"\n f\"tag_name: {tag_name}\\n\"\n f\"tag_content: '{tag_content}'\\n\")\n\n await ctx.send(embed=Embed(\n title=\"Tag successfully edited\",\n description=f\"**{tag_name}** edited in the database.\",\n colour=Colour.blurple()\n ))\n\n @tags_group.command(name='delete', aliases=('remove', 'rm', 'd'))\n @with_role(Roles.admin, Roles.owner)\n async def delete_command(self, ctx: Context, *, tag_name: TagNameConverter) -> None:\n \"\"\"Remove a tag from the database.\"\"\"\n await self.bot.api_client.delete(f'bot/tags/{tag_name}')\n self._cache.pop(tag_name.lower(), None)\n\n log.debug(f\"{ctx.author} successfully deleted the tag called '{tag_name}'\")\n await ctx.send(embed=Embed(\n title=tag_name,\n description=f\"Tag successfully removed: {tag_name}.\",\n colour=Colour.blurple()\n ))\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Tags cog.\"\"\"\n bot.add_cog(Tags(bot))\n", "path": "bot/cogs/tags.py"}]} | 3,186 | 188 |
gh_patches_debug_20748 | rasdani/github-patches | git_diff | WordPress__openverse-api-318 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add audio to the ingestion server tests
Audio is currently not included in the ingestion server integration or unit tests. We should update these tests to include support for audio. Separate PRs for unit and integration tests would be best. Below is some information on these tests and how to work with them.
## Running the tests
To run the tests and get a sense of what they do, do the following steps:
```bash
cd ingestion_server
pipenv install
pipenv run python3 test/integration_tests.py
```
This is currently blocked by #143. I would've liked to run the tests to learn a bit more about how they work but this isn't yet possible.
</issue>
<code>
[start of sample_data/make_sample_pop.py]
1 import csv
2 import random
3
4
5 in_tsv = open("sample_data.csv", "r")
6 out_tsv = open("sample_popularity_data.csv", "w+")
7 output_fields = ["identifier", "normalized_popularity"]
8 reader = csv.DictReader(in_tsv, delimiter=",")
9 writer = csv.DictWriter(out_tsv, delimiter=",", fieldnames=output_fields)
10 writer.writeheader()
11 for row in reader:
12 pop = random.uniform(0, 100)
13 out_row = {"identifier": row["identifier"], "normalized_popularity": pop}
14 writer.writerow(out_row)
15
[end of sample_data/make_sample_pop.py]
[start of ingestion_server/ingestion_server/tasks.py]
1 """
2 Simple in-memory tracking of executed tasks.
3 """
4
5 import datetime as dt
6 import logging
7 from enum import Enum
8 from multiprocessing import Process
9
10 import requests
11
12 from ingestion_server.indexer import TableIndexer, elasticsearch_connect
13 from ingestion_server.ingest import reload_upstream
14
15
16 class TaskTypes(Enum):
17 # Completely reindex all data for a given model.
18 REINDEX = 0
19 # Reindex updates to a model from the database since a certain date.
20 UPDATE_INDEX = 1
21 # Download the latest copy of the data from the upstream database, then
22 # completely reindex the newly imported data.
23 INGEST_UPSTREAM = 2
24 # Create indices in Elasticsearch for QA tests.
25 # This is not intended for production use, but can be safely executed in a
26 # production environment without consequence.
27 LOAD_TEST_DATA = 3
28
29
30 class TaskTracker:
31 def __init__(self):
32 self.id_task = {}
33 self.id_action = {}
34 self.id_progress = {}
35 self.id_start_time = {}
36 self.id_finish_time = {}
37
38 def add_task(self, task, task_id, action, progress, finish_time):
39 self._prune_old_tasks()
40 self.id_task[task_id] = task
41 self.id_action[task_id] = action
42 self.id_progress[task_id] = progress
43 self.id_start_time[task_id] = dt.datetime.utcnow().timestamp()
44 self.id_finish_time[task_id] = finish_time
45 return task_id
46
47 def _prune_old_tasks(self):
48 pass
49
50 def list_task_statuses(self):
51 self._prune_old_tasks()
52 results = []
53 for _id, task in self.id_task.items():
54 percent_completed = self.id_progress[_id].value
55 active = task.is_alive()
56 start_time = self.id_start_time[_id]
57 finish_time = self.id_finish_time[_id].value
58 results.append(
59 {
60 "task_id": _id,
61 "active": active,
62 "action": self.id_action[_id],
63 "progress": percent_completed,
64 "error": percent_completed < 100 and not active,
65 "start_time": start_time,
66 "finish_time": finish_time,
67 }
68 )
69 sorted_results = sorted(results, key=lambda x: x["finish_time"])
70
71 to_utc = dt.datetime.utcfromtimestamp
72
73 def render_date(x):
74 return to_utc(x) if x != 0.0 else None
75
76 # Convert date to a readable format
77 for idx, task in enumerate(sorted_results):
78 start_time = task["start_time"]
79 finish_time = task["finish_time"]
80 sorted_results[idx]["start_time"] = str(render_date(start_time))
81 sorted_results[idx]["finish_time"] = str(render_date(finish_time))
82
83 return sorted_results
84
85
86 class Task(Process):
87 def __init__(
88 self, model, task_type, since_date, progress, task_id, finish_time, callback_url
89 ):
90 Process.__init__(self)
91 self.model = model
92 self.task_type = task_type
93 self.since_date = since_date
94 self.progress = progress
95 self.task_id = task_id
96 self.finish_time = finish_time
97 self.callback_url = callback_url
98
99 def run(self):
100 # Map task types to actions.
101 elasticsearch = elasticsearch_connect()
102 indexer = TableIndexer(
103 elasticsearch, self.model, self.progress, self.finish_time
104 )
105 if self.task_type == TaskTypes.REINDEX:
106 indexer.reindex(self.model)
107 elif self.task_type == TaskTypes.UPDATE_INDEX:
108 indexer.update(self.model, self.since_date)
109 elif self.task_type == TaskTypes.INGEST_UPSTREAM:
110 reload_upstream(self.model)
111 if self.model == "audio":
112 reload_upstream("audioset", approach="basic")
113 indexer.reindex(self.model)
114 elif self.task_type == TaskTypes.LOAD_TEST_DATA:
115 indexer.load_test_data(self.model)
116 logging.info(f"Task {self.task_id} exited.")
117 if self.callback_url:
118 try:
119 requests.post(self.callback_url)
120 except requests.exceptions.RequestException as e:
121 logging.error("Failed to send callback!")
122 logging.error(e)
123
[end of ingestion_server/ingestion_server/tasks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ingestion_server/ingestion_server/tasks.py b/ingestion_server/ingestion_server/tasks.py
--- a/ingestion_server/ingestion_server/tasks.py
+++ b/ingestion_server/ingestion_server/tasks.py
@@ -116,7 +116,9 @@
logging.info(f"Task {self.task_id} exited.")
if self.callback_url:
try:
- requests.post(self.callback_url)
+ logging.info("Sending callback request")
+ res = requests.post(self.callback_url)
+ logging.info(f"Response: {res.text}")
except requests.exceptions.RequestException as e:
logging.error("Failed to send callback!")
logging.error(e)
diff --git a/sample_data/make_sample_pop.py b/sample_data/make_sample_pop.py
deleted file mode 100644
--- a/sample_data/make_sample_pop.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import csv
-import random
-
-
-in_tsv = open("sample_data.csv", "r")
-out_tsv = open("sample_popularity_data.csv", "w+")
-output_fields = ["identifier", "normalized_popularity"]
-reader = csv.DictReader(in_tsv, delimiter=",")
-writer = csv.DictWriter(out_tsv, delimiter=",", fieldnames=output_fields)
-writer.writeheader()
-for row in reader:
- pop = random.uniform(0, 100)
- out_row = {"identifier": row["identifier"], "normalized_popularity": pop}
- writer.writerow(out_row)
| {"golden_diff": "diff --git a/ingestion_server/ingestion_server/tasks.py b/ingestion_server/ingestion_server/tasks.py\n--- a/ingestion_server/ingestion_server/tasks.py\n+++ b/ingestion_server/ingestion_server/tasks.py\n@@ -116,7 +116,9 @@\n logging.info(f\"Task {self.task_id} exited.\")\n if self.callback_url:\n try:\n- requests.post(self.callback_url)\n+ logging.info(\"Sending callback request\")\n+ res = requests.post(self.callback_url)\n+ logging.info(f\"Response: {res.text}\")\n except requests.exceptions.RequestException as e:\n logging.error(\"Failed to send callback!\")\n logging.error(e)\ndiff --git a/sample_data/make_sample_pop.py b/sample_data/make_sample_pop.py\ndeleted file mode 100644\n--- a/sample_data/make_sample_pop.py\n+++ /dev/null\n@@ -1,14 +0,0 @@\n-import csv\n-import random\n-\n-\n-in_tsv = open(\"sample_data.csv\", \"r\")\n-out_tsv = open(\"sample_popularity_data.csv\", \"w+\")\n-output_fields = [\"identifier\", \"normalized_popularity\"]\n-reader = csv.DictReader(in_tsv, delimiter=\",\")\n-writer = csv.DictWriter(out_tsv, delimiter=\",\", fieldnames=output_fields)\n-writer.writeheader()\n-for row in reader:\n- pop = random.uniform(0, 100)\n- out_row = {\"identifier\": row[\"identifier\"], \"normalized_popularity\": pop}\n- writer.writerow(out_row)\n", "issue": "Add audio to the ingestion server tests\nAudio is currently not included in the ingestion server integration or unit tests. We should update these tests to include support for audio. Separate PRs for unit and integration tests would be best. Below is some information on these tests and how to work with them.\r\n\r\n## Running the tests \r\n\r\nTo run the tests and get a sense of what they do, do the following steps:\r\n\r\n```bash\r\ncd ingestion_server\r\npipenv install\r\npipenv run python3 test/integration_tests.py\r\n```\r\n\r\nThis is currently blocked by #143. I would've liked to run the tests to learn a bit more about how they work but this isn't yet possible.\n", "before_files": [{"content": "import csv\nimport random\n\n\nin_tsv = open(\"sample_data.csv\", \"r\")\nout_tsv = open(\"sample_popularity_data.csv\", \"w+\")\noutput_fields = [\"identifier\", \"normalized_popularity\"]\nreader = csv.DictReader(in_tsv, delimiter=\",\")\nwriter = csv.DictWriter(out_tsv, delimiter=\",\", fieldnames=output_fields)\nwriter.writeheader()\nfor row in reader:\n pop = random.uniform(0, 100)\n out_row = {\"identifier\": row[\"identifier\"], \"normalized_popularity\": pop}\n writer.writerow(out_row)\n", "path": "sample_data/make_sample_pop.py"}, {"content": "\"\"\"\nSimple in-memory tracking of executed tasks.\n\"\"\"\n\nimport datetime as dt\nimport logging\nfrom enum import Enum\nfrom multiprocessing import Process\n\nimport requests\n\nfrom ingestion_server.indexer import TableIndexer, elasticsearch_connect\nfrom ingestion_server.ingest import reload_upstream\n\n\nclass TaskTypes(Enum):\n # Completely reindex all data for a given model.\n REINDEX = 0\n # Reindex updates to a model from the database since a certain date.\n UPDATE_INDEX = 1\n # Download the latest copy of the data from the upstream database, then\n # completely reindex the newly imported data.\n INGEST_UPSTREAM = 2\n # Create indices in Elasticsearch for QA tests.\n # This is not intended for production use, but can be safely executed in a\n # production environment without consequence.\n LOAD_TEST_DATA = 3\n\n\nclass TaskTracker:\n def __init__(self):\n self.id_task = {}\n self.id_action = {}\n self.id_progress = {}\n self.id_start_time = {}\n self.id_finish_time = {}\n\n def add_task(self, task, task_id, action, progress, finish_time):\n self._prune_old_tasks()\n self.id_task[task_id] = task\n self.id_action[task_id] = action\n self.id_progress[task_id] = progress\n self.id_start_time[task_id] = dt.datetime.utcnow().timestamp()\n self.id_finish_time[task_id] = finish_time\n return task_id\n\n def _prune_old_tasks(self):\n pass\n\n def list_task_statuses(self):\n self._prune_old_tasks()\n results = []\n for _id, task in self.id_task.items():\n percent_completed = self.id_progress[_id].value\n active = task.is_alive()\n start_time = self.id_start_time[_id]\n finish_time = self.id_finish_time[_id].value\n results.append(\n {\n \"task_id\": _id,\n \"active\": active,\n \"action\": self.id_action[_id],\n \"progress\": percent_completed,\n \"error\": percent_completed < 100 and not active,\n \"start_time\": start_time,\n \"finish_time\": finish_time,\n }\n )\n sorted_results = sorted(results, key=lambda x: x[\"finish_time\"])\n\n to_utc = dt.datetime.utcfromtimestamp\n\n def render_date(x):\n return to_utc(x) if x != 0.0 else None\n\n # Convert date to a readable format\n for idx, task in enumerate(sorted_results):\n start_time = task[\"start_time\"]\n finish_time = task[\"finish_time\"]\n sorted_results[idx][\"start_time\"] = str(render_date(start_time))\n sorted_results[idx][\"finish_time\"] = str(render_date(finish_time))\n\n return sorted_results\n\n\nclass Task(Process):\n def __init__(\n self, model, task_type, since_date, progress, task_id, finish_time, callback_url\n ):\n Process.__init__(self)\n self.model = model\n self.task_type = task_type\n self.since_date = since_date\n self.progress = progress\n self.task_id = task_id\n self.finish_time = finish_time\n self.callback_url = callback_url\n\n def run(self):\n # Map task types to actions.\n elasticsearch = elasticsearch_connect()\n indexer = TableIndexer(\n elasticsearch, self.model, self.progress, self.finish_time\n )\n if self.task_type == TaskTypes.REINDEX:\n indexer.reindex(self.model)\n elif self.task_type == TaskTypes.UPDATE_INDEX:\n indexer.update(self.model, self.since_date)\n elif self.task_type == TaskTypes.INGEST_UPSTREAM:\n reload_upstream(self.model)\n if self.model == \"audio\":\n reload_upstream(\"audioset\", approach=\"basic\")\n indexer.reindex(self.model)\n elif self.task_type == TaskTypes.LOAD_TEST_DATA:\n indexer.load_test_data(self.model)\n logging.info(f\"Task {self.task_id} exited.\")\n if self.callback_url:\n try:\n requests.post(self.callback_url)\n except requests.exceptions.RequestException as e:\n logging.error(\"Failed to send callback!\")\n logging.error(e)\n", "path": "ingestion_server/ingestion_server/tasks.py"}]} | 2,019 | 335 |
gh_patches_debug_41905 | rasdani/github-patches | git_diff | pytorch__ignite-478 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve create_supervised_trainer with optional output_transform
Following [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:
```python
def default_output_transform(x, y, y_pred, loss):
return loss.item()
def create_supervised_trainer(model, optimizer, loss_fn,
device=None, non_blocking=False, prepare_batch=_prepare_batch,
output_transform=default_output_transform):
if device:
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
return output_transform(x, y, y_pred, loss)
return Engine(_update)
```
cc @IlyaOvodov
Improve create_supervised_trainer with optional output_transform
Following [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:
```python
def default_output_transform(x, y, y_pred, loss):
return loss.item()
def create_supervised_trainer(model, optimizer, loss_fn,
device=None, non_blocking=False, prepare_batch=_prepare_batch,
output_transform=default_output_transform):
if device:
model.to(device)
def _update(engine, batch):
model.train()
optimizer.zero_grad()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
return output_transform(x, y, y_pred, loss)
return Engine(_update)
```
cc @IlyaOvodov
</issue>
<code>
[start of ignite/engine/__init__.py]
1 import torch
2
3 from ignite.engine.engine import Engine, State, Events
4 from ignite.utils import convert_tensor
5
6
7 def _prepare_batch(batch, device=None, non_blocking=False):
8 """Prepare batch for training: pass to a device with options.
9
10 """
11 x, y = batch
12 return (convert_tensor(x, device=device, non_blocking=non_blocking),
13 convert_tensor(y, device=device, non_blocking=non_blocking))
14
15
16 def create_supervised_trainer(model, optimizer, loss_fn,
17 device=None, non_blocking=False,
18 prepare_batch=_prepare_batch):
19 """
20 Factory function for creating a trainer for supervised models.
21
22 Args:
23 model (`torch.nn.Module`): the model to train.
24 optimizer (`torch.optim.Optimizer`): the optimizer to use.
25 loss_fn (torch.nn loss function): the loss function to use.
26 device (str, optional): device type specification (default: None).
27 Applies to both model and batches.
28 non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
29 with respect to the host. For other cases, this argument has no effect.
30 prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
31 tuple of tensors `(batch_x, batch_y)`.
32
33 Note: `engine.state.output` for this engine is the loss of the processed batch.
34
35 Returns:
36 Engine: a trainer engine with supervised update function.
37 """
38 if device:
39 model.to(device)
40
41 def _update(engine, batch):
42 model.train()
43 optimizer.zero_grad()
44 x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
45 y_pred = model(x)
46 loss = loss_fn(y_pred, y)
47 loss.backward()
48 optimizer.step()
49 return loss.item()
50
51 return Engine(_update)
52
53
54 def create_supervised_evaluator(model, metrics={},
55 device=None, non_blocking=False,
56 prepare_batch=_prepare_batch):
57 """
58 Factory function for creating an evaluator for supervised models.
59
60 Args:
61 model (`torch.nn.Module`): the model to train.
62 metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.
63 device (str, optional): device type specification (default: None).
64 Applies to both model and batches.
65 non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously
66 with respect to the host. For other cases, this argument has no effect.
67 prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
68 tuple of tensors `(batch_x, batch_y)`.
69
70 Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.
71
72 Returns:
73 Engine: an evaluator engine with supervised inference function.
74 """
75 if device:
76 model.to(device)
77
78 def _inference(engine, batch):
79 model.eval()
80 with torch.no_grad():
81 x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
82 y_pred = model(x)
83 return y_pred, y
84
85 engine = Engine(_inference)
86
87 for name, metric in metrics.items():
88 metric.attach(engine, name)
89
90 return engine
91
[end of ignite/engine/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py
--- a/ignite/engine/__init__.py
+++ b/ignite/engine/__init__.py
@@ -15,7 +15,8 @@
def create_supervised_trainer(model, optimizer, loss_fn,
device=None, non_blocking=False,
- prepare_batch=_prepare_batch):
+ prepare_batch=_prepare_batch,
+ output_transform=lambda x, y, y_pred, loss: loss.item()):
"""
Factory function for creating a trainer for supervised models.
@@ -29,8 +30,11 @@
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value
+ to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
- Note: `engine.state.output` for this engine is the loss of the processed batch.
+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss
+ of the processed batch by default.
Returns:
Engine: a trainer engine with supervised update function.
@@ -46,14 +50,15 @@
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
- return loss.item()
+ return output_transform(x, y, y_pred, loss)
return Engine(_update)
def create_supervised_evaluator(model, metrics={},
device=None, non_blocking=False,
- prepare_batch=_prepare_batch):
+ prepare_batch=_prepare_batch,
+ output_transform=lambda x, y, y_pred: (y_pred, y,)):
"""
Factory function for creating an evaluator for supervised models.
@@ -66,8 +71,12 @@
with respect to the host. For other cases, this argument has no effect.
prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value
+ to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits
+ output expected by metrics. If you change it you should use `output_transform` in metrics.
- Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.
+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is
+ a tuple of `(batch_pred, batch_y)` by default.
Returns:
Engine: an evaluator engine with supervised inference function.
@@ -80,7 +89,7 @@
with torch.no_grad():
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
- return y_pred, y
+ return output_transform(x, y, y_pred)
engine = Engine(_inference)
| {"golden_diff": "diff --git a/ignite/engine/__init__.py b/ignite/engine/__init__.py\n--- a/ignite/engine/__init__.py\n+++ b/ignite/engine/__init__.py\n@@ -15,7 +15,8 @@\n \n def create_supervised_trainer(model, optimizer, loss_fn,\n device=None, non_blocking=False,\n- prepare_batch=_prepare_batch):\n+ prepare_batch=_prepare_batch,\n+ output_transform=lambda x, y, y_pred, loss: loss.item()):\n \"\"\"\n Factory function for creating a trainer for supervised models.\n \n@@ -29,8 +30,11 @@\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value\n+ to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.\n \n- Note: `engine.state.output` for this engine is the loss of the processed batch.\n+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss\n+ of the processed batch by default.\n \n Returns:\n Engine: a trainer engine with supervised update function.\n@@ -46,14 +50,15 @@\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n- return loss.item()\n+ return output_transform(x, y, y_pred, loss)\n \n return Engine(_update)\n \n \n def create_supervised_evaluator(model, metrics={},\n device=None, non_blocking=False,\n- prepare_batch=_prepare_batch):\n+ prepare_batch=_prepare_batch,\n+ output_transform=lambda x, y, y_pred: (y_pred, y,)):\n \"\"\"\n Factory function for creating an evaluator for supervised models.\n \n@@ -66,8 +71,12 @@\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n+ output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value\n+ to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits\n+ output expected by metrics. If you change it you should use `output_transform` in metrics.\n \n- Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.\n+ Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is\n+ a tuple of `(batch_pred, batch_y)` by default.\n \n Returns:\n Engine: an evaluator engine with supervised inference function.\n@@ -80,7 +89,7 @@\n with torch.no_grad():\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n- return y_pred, y\n+ return output_transform(x, y, y_pred)\n \n engine = Engine(_inference)\n", "issue": "Improve create_supervised_trainer with optional output_transform\nFollowing [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:\r\n```python\r\ndef default_output_transform(x, y, y_pred, loss):\r\n return loss.item() \r\n\r\n\r\ndef create_supervised_trainer(model, optimizer, loss_fn,\r\n device=None, non_blocking=False, prepare_batch=_prepare_batch, \r\n output_transform=default_output_transform):\r\n if device:\r\n model.to(device)\r\n\r\n def _update(engine, batch):\r\n model.train()\r\n optimizer.zero_grad()\r\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\r\n y_pred = model(x)\r\n loss = loss_fn(y_pred, y)\r\n loss.backward()\r\n optimizer.step()\r\n return output_transform(x, y, y_pred, loss)\r\n\r\n return Engine(_update) \r\n```\r\n\r\ncc @IlyaOvodov\nImprove create_supervised_trainer with optional output_transform\nFollowing [the discussion](https://github.com/pytorch/ignite/pull/476#discussion_r272108999), idea is to give more flexibility to users who are using `create_supervised_trainer`:\r\n```python\r\ndef default_output_transform(x, y, y_pred, loss):\r\n return loss.item() \r\n\r\n\r\ndef create_supervised_trainer(model, optimizer, loss_fn,\r\n device=None, non_blocking=False, prepare_batch=_prepare_batch, \r\n output_transform=default_output_transform):\r\n if device:\r\n model.to(device)\r\n\r\n def _update(engine, batch):\r\n model.train()\r\n optimizer.zero_grad()\r\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\r\n y_pred = model(x)\r\n loss = loss_fn(y_pred, y)\r\n loss.backward()\r\n optimizer.step()\r\n return output_transform(x, y, y_pred, loss)\r\n\r\n return Engine(_update) \r\n```\r\n\r\ncc @IlyaOvodov\n", "before_files": [{"content": "import torch\n\nfrom ignite.engine.engine import Engine, State, Events\nfrom ignite.utils import convert_tensor\n\n\ndef _prepare_batch(batch, device=None, non_blocking=False):\n \"\"\"Prepare batch for training: pass to a device with options.\n\n \"\"\"\n x, y = batch\n return (convert_tensor(x, device=device, non_blocking=non_blocking),\n convert_tensor(y, device=device, non_blocking=non_blocking))\n\n\ndef create_supervised_trainer(model, optimizer, loss_fn,\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch):\n \"\"\"\n Factory function for creating a trainer for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n optimizer (`torch.optim.Optimizer`): the optimizer to use.\n loss_fn (torch.nn loss function): the loss function to use.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n\n Note: `engine.state.output` for this engine is the loss of the processed batch.\n\n Returns:\n Engine: a trainer engine with supervised update function.\n \"\"\"\n if device:\n model.to(device)\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return loss.item()\n\n return Engine(_update)\n\n\ndef create_supervised_evaluator(model, metrics={},\n device=None, non_blocking=False,\n prepare_batch=_prepare_batch):\n \"\"\"\n Factory function for creating an evaluator for supervised models.\n\n Args:\n model (`torch.nn.Module`): the model to train.\n metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n\n Note: `engine.state.output` for this engine is a tuple of `(batch_pred, batch_y)`.\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n if device:\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)\n y_pred = model(x)\n return y_pred, y\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine\n", "path": "ignite/engine/__init__.py"}]} | 1,867 | 728 |
gh_patches_debug_33068 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add some support for programmatic instrumentation
[This comment](https://github.com/open-telemetry/opentelemetry-python/pull/551#issuecomment-608993167) in #551 raised the concern about making it possible to use the instrumentors in a programmatic way too, not only automatically.
Using them programmatically means adding code directly in the application that performs instrumentation, for example, [this](https://github.com/open-telemetry/opentelemetry-python/pull/551/files#diff-906a392e80f621fdbc1ea38af3c41081R19) or [this](https://github.com/open-telemetry/opentelemetry-python/pull/551/files#diff-906a392e80f621fdbc1ea38af3c41081L22).
It is very likely that this programmatic use of the instrumentors needs to be implemented in a framework-specific way, this means that the way we can do this kind of instrumentation is different for Flask and is different too for Django, for example. That means that it is possible that we end up having just special code for each framework as opposed to having a universal mechanism that works for every framework (this would mean that this issue could end up being closed without any fix).
This programmatic instrumentation mechanisms can be more sophisticated, for example, in an OpenTracing-related project [here](https://github.com/opentracing-contrib/python-django/blob/master/README.rst#tracing-individual-requests) it is shown how the programmatic instrumentation mechanism allows the user to select which application functions are to be traced.
In summary, what we want to introduce with this issue, is the capability to perform instrumentation without having to directly write the code that creates the spans but also without using the `opentelemetry-auto-instrumentation` command, but with a different mechanism (probably specifically tailored for each framework (Django, Flask, etc.)) like decorators or maybe Python context managers (the ones that use `with`, to avoid any confusion with the OpenTelemetry concept of context :slightly_smiling_face:).
@mauriciovasquezbernal @codeboten we have just talked about this :+1:
Also, to make things clear, with this approach we would have 3 ways of doing instrumentation:
1. The "normal" way (instantiating the spans directly with OpenTelemetry-provided functions)
2. The automatic way (using the `opentelemetry-auto-instrumentation` command)
3. The programmatic way (using a mechanism as described here to instrument specific frameworks)
</issue>
<code>
[start of opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # type: ignore
15
16 """
17 OpenTelemetry Base Instrumentor
18 """
19
20 from abc import ABC, abstractmethod
21 from logging import getLogger
22
23 _LOG = getLogger(__name__)
24
25
26 class BaseInstrumentor(ABC):
27 """An ABC for instrumentors"""
28
29 def __init__(self):
30 self._is_instrumented = False
31
32 @abstractmethod
33 def _instrument(self) -> None:
34 """Instrument"""
35
36 @abstractmethod
37 def _uninstrument(self) -> None:
38 """Uninstrument"""
39
40 def instrument(self) -> None:
41 """Instrument"""
42
43 if not self._is_instrumented:
44 result = self._instrument()
45 self._is_instrumented = True
46 return result
47
48 _LOG.warning("Attempting to instrument while already instrumented")
49
50 return None
51
52 def uninstrument(self) -> None:
53 """Uninstrument"""
54
55 if self._is_instrumented:
56 result = self._uninstrument()
57 self._is_instrumented = False
58 return result
59
60 _LOG.warning("Attempting to uninstrument while already uninstrumented")
61
62 return None
63
64
65 __all__ = ["BaseInstrumentor"]
66
[end of opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py]
[start of ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # Note: This package is not named "flask" because of
16 # https://github.com/PyCQA/pylint/issues/2648
17
18 """
19 This library builds on the OpenTelemetry WSGI middleware to track web requests
20 in Flask applications. In addition to opentelemetry-ext-wsgi, it supports
21 flask-specific features such as:
22
23 * The Flask endpoint name is used as the Span name.
24 * The ``http.route`` Span attribute is set so that one can see which URL rule
25 matched a request.
26
27 Usage
28 -----
29
30 .. code-block:: python
31
32 from opentelemetry.ext.flask import FlaskInstrumentor
33 FlaskInstrumentor().instrument() # This needs to be executed before importing Flask
34 from flask import Flask
35
36 app = Flask(__name__)
37
38 @app.route("/")
39 def hello():
40 return "Hello!"
41
42 if __name__ == "__main__":
43 app.run(debug=True)
44
45 API
46 ---
47 """
48
49 import logging
50
51 import flask
52
53 import opentelemetry.ext.wsgi as otel_wsgi
54 from opentelemetry import context, propagators, trace
55 from opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor
56 from opentelemetry.ext.flask.version import __version__
57 from opentelemetry.util import time_ns
58
59 logger = logging.getLogger(__name__)
60
61 _ENVIRON_STARTTIME_KEY = "opentelemetry-flask.starttime_key"
62 _ENVIRON_SPAN_KEY = "opentelemetry-flask.span_key"
63 _ENVIRON_ACTIVATION_KEY = "opentelemetry-flask.activation_key"
64 _ENVIRON_TOKEN = "opentelemetry-flask.token"
65
66
67 class _InstrumentedFlask(flask.Flask):
68 def __init__(self, *args, **kwargs):
69
70 super().__init__(*args, **kwargs)
71
72 # Single use variable here to avoid recursion issues.
73 wsgi = self.wsgi_app
74
75 def wrapped_app(environ, start_response):
76 # We want to measure the time for route matching, etc.
77 # In theory, we could start the span here and use
78 # update_name later but that API is "highly discouraged" so
79 # we better avoid it.
80 environ[_ENVIRON_STARTTIME_KEY] = time_ns()
81
82 def _start_response(status, response_headers, *args, **kwargs):
83 span = flask.request.environ.get(_ENVIRON_SPAN_KEY)
84 if span:
85 otel_wsgi.add_response_attributes(
86 span, status, response_headers
87 )
88 else:
89 logger.warning(
90 "Flask environ's OpenTelemetry span "
91 "missing at _start_response(%s)",
92 status,
93 )
94
95 return start_response(
96 status, response_headers, *args, **kwargs
97 )
98
99 return wsgi(environ, _start_response)
100
101 self.wsgi_app = wrapped_app
102
103 @self.before_request
104 def _before_flask_request():
105 environ = flask.request.environ
106 span_name = (
107 flask.request.endpoint
108 or otel_wsgi.get_default_span_name(environ)
109 )
110 token = context.attach(
111 propagators.extract(otel_wsgi.get_header_from_environ, environ)
112 )
113
114 tracer = trace.get_tracer(__name__, __version__)
115
116 attributes = otel_wsgi.collect_request_attributes(environ)
117 if flask.request.url_rule:
118 # For 404 that result from no route found, etc, we
119 # don't have a url_rule.
120 attributes["http.route"] = flask.request.url_rule.rule
121 span = tracer.start_span(
122 span_name,
123 kind=trace.SpanKind.SERVER,
124 attributes=attributes,
125 start_time=environ.get(_ENVIRON_STARTTIME_KEY),
126 )
127 activation = tracer.use_span(span, end_on_exit=True)
128 activation.__enter__()
129 environ[_ENVIRON_ACTIVATION_KEY] = activation
130 environ[_ENVIRON_SPAN_KEY] = span
131 environ[_ENVIRON_TOKEN] = token
132
133 @self.teardown_request
134 def _teardown_flask_request(exc):
135 activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)
136 if not activation:
137 logger.warning(
138 "Flask environ's OpenTelemetry activation missing"
139 "at _teardown_flask_request(%s)",
140 exc,
141 )
142 return
143
144 if exc is None:
145 activation.__exit__(None, None, None)
146 else:
147 activation.__exit__(
148 type(exc), exc, getattr(exc, "__traceback__", None)
149 )
150 context.detach(flask.request.environ.get(_ENVIRON_TOKEN))
151
152
153 class FlaskInstrumentor(BaseInstrumentor):
154 """A instrumentor for flask.Flask
155
156 See `BaseInstrumentor`
157 """
158
159 def __init__(self):
160 super().__init__()
161 self._original_flask = None
162
163 def _instrument(self):
164 self._original_flask = flask.Flask
165 flask.Flask = _InstrumentedFlask
166
167 def _uninstrument(self):
168 flask.Flask = self._original_flask
169
[end of ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
--- a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
+++ b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py
@@ -160,9 +160,9 @@
super().__init__()
self._original_flask = None
- def _instrument(self):
+ def _instrument(self, **kwargs):
self._original_flask = flask.Flask
flask.Flask = _InstrumentedFlask
- def _uninstrument(self):
+ def _uninstrument(self, **kwargs):
flask.Flask = self._original_flask
diff --git a/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py b/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py
--- a/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py
+++ b/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py
@@ -26,22 +26,29 @@
class BaseInstrumentor(ABC):
"""An ABC for instrumentors"""
- def __init__(self):
- self._is_instrumented = False
+ _instance = None
+ _is_instrumented = False
+
+ def __new__(cls):
+
+ if cls._instance is None:
+ cls._instance = object.__new__(cls)
+
+ return cls._instance
@abstractmethod
- def _instrument(self) -> None:
+ def _instrument(self, **kwargs):
"""Instrument"""
@abstractmethod
- def _uninstrument(self) -> None:
+ def _uninstrument(self, **kwargs):
"""Uninstrument"""
- def instrument(self) -> None:
+ def instrument(self, **kwargs):
"""Instrument"""
if not self._is_instrumented:
- result = self._instrument()
+ result = self._instrument(**kwargs)
self._is_instrumented = True
return result
@@ -49,11 +56,11 @@
return None
- def uninstrument(self) -> None:
+ def uninstrument(self, **kwargs):
"""Uninstrument"""
if self._is_instrumented:
- result = self._uninstrument()
+ result = self._uninstrument(**kwargs)
self._is_instrumented = False
return result
| {"golden_diff": "diff --git a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n--- a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n+++ b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py\n@@ -160,9 +160,9 @@\n super().__init__()\n self._original_flask = None\n \n- def _instrument(self):\n+ def _instrument(self, **kwargs):\n self._original_flask = flask.Flask\n flask.Flask = _InstrumentedFlask\n \n- def _uninstrument(self):\n+ def _uninstrument(self, **kwargs):\n flask.Flask = self._original_flask\ndiff --git a/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py b/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py\n--- a/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py\n+++ b/opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py\n@@ -26,22 +26,29 @@\n class BaseInstrumentor(ABC):\n \"\"\"An ABC for instrumentors\"\"\"\n \n- def __init__(self):\n- self._is_instrumented = False\n+ _instance = None\n+ _is_instrumented = False\n+\n+ def __new__(cls):\n+\n+ if cls._instance is None:\n+ cls._instance = object.__new__(cls)\n+\n+ return cls._instance\n \n @abstractmethod\n- def _instrument(self) -> None:\n+ def _instrument(self, **kwargs):\n \"\"\"Instrument\"\"\"\n \n @abstractmethod\n- def _uninstrument(self) -> None:\n+ def _uninstrument(self, **kwargs):\n \"\"\"Uninstrument\"\"\"\n \n- def instrument(self) -> None:\n+ def instrument(self, **kwargs):\n \"\"\"Instrument\"\"\"\n \n if not self._is_instrumented:\n- result = self._instrument()\n+ result = self._instrument(**kwargs)\n self._is_instrumented = True\n return result\n \n@@ -49,11 +56,11 @@\n \n return None\n \n- def uninstrument(self) -> None:\n+ def uninstrument(self, **kwargs):\n \"\"\"Uninstrument\"\"\"\n \n if self._is_instrumented:\n- result = self._uninstrument()\n+ result = self._uninstrument(**kwargs)\n self._is_instrumented = False\n return result\n", "issue": "Add some support for programmatic instrumentation\n[This comment](https://github.com/open-telemetry/opentelemetry-python/pull/551#issuecomment-608993167) in #551 raised the concern about making it possible to use the instrumentors in a programmatic way too, not only automatically.\r\n\r\nUsing them programmatically means adding code directly in the application that performs instrumentation, for example, [this](https://github.com/open-telemetry/opentelemetry-python/pull/551/files#diff-906a392e80f621fdbc1ea38af3c41081R19) or [this](https://github.com/open-telemetry/opentelemetry-python/pull/551/files#diff-906a392e80f621fdbc1ea38af3c41081L22).\r\n\r\nIt is very likely that this programmatic use of the instrumentors needs to be implemented in a framework-specific way, this means that the way we can do this kind of instrumentation is different for Flask and is different too for Django, for example. That means that it is possible that we end up having just special code for each framework as opposed to having a universal mechanism that works for every framework (this would mean that this issue could end up being closed without any fix).\r\n\r\nThis programmatic instrumentation mechanisms can be more sophisticated, for example, in an OpenTracing-related project [here](https://github.com/opentracing-contrib/python-django/blob/master/README.rst#tracing-individual-requests) it is shown how the programmatic instrumentation mechanism allows the user to select which application functions are to be traced.\r\n\r\nIn summary, what we want to introduce with this issue, is the capability to perform instrumentation without having to directly write the code that creates the spans but also without using the `opentelemetry-auto-instrumentation` command, but with a different mechanism (probably specifically tailored for each framework (Django, Flask, etc.)) like decorators or maybe Python context managers (the ones that use `with`, to avoid any confusion with the OpenTelemetry concept of context :slightly_smiling_face:).\r\n\r\n@mauriciovasquezbernal @codeboten we have just talked about this :+1:\r\n\r\nAlso, to make things clear, with this approach we would have 3 ways of doing instrumentation:\r\n1. The \"normal\" way (instantiating the spans directly with OpenTelemetry-provided functions)\r\n2. The automatic way (using the `opentelemetry-auto-instrumentation` command)\r\n3. The programmatic way (using a mechanism as described here to instrument specific frameworks)\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\n\"\"\"\nOpenTelemetry Base Instrumentor\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom logging import getLogger\n\n_LOG = getLogger(__name__)\n\n\nclass BaseInstrumentor(ABC):\n \"\"\"An ABC for instrumentors\"\"\"\n\n def __init__(self):\n self._is_instrumented = False\n\n @abstractmethod\n def _instrument(self) -> None:\n \"\"\"Instrument\"\"\"\n\n @abstractmethod\n def _uninstrument(self) -> None:\n \"\"\"Uninstrument\"\"\"\n\n def instrument(self) -> None:\n \"\"\"Instrument\"\"\"\n\n if not self._is_instrumented:\n result = self._instrument()\n self._is_instrumented = True\n return result\n\n _LOG.warning(\"Attempting to instrument while already instrumented\")\n\n return None\n\n def uninstrument(self) -> None:\n \"\"\"Uninstrument\"\"\"\n\n if self._is_instrumented:\n result = self._uninstrument()\n self._is_instrumented = False\n return result\n\n _LOG.warning(\"Attempting to uninstrument while already uninstrumented\")\n\n return None\n\n\n__all__ = [\"BaseInstrumentor\"]\n", "path": "opentelemetry-auto-instrumentation/src/opentelemetry/auto_instrumentation/instrumentor.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Note: This package is not named \"flask\" because of\n# https://github.com/PyCQA/pylint/issues/2648\n\n\"\"\"\nThis library builds on the OpenTelemetry WSGI middleware to track web requests\nin Flask applications. In addition to opentelemetry-ext-wsgi, it supports\nflask-specific features such as:\n\n* The Flask endpoint name is used as the Span name.\n* The ``http.route`` Span attribute is set so that one can see which URL rule\n matched a request.\n\nUsage\n-----\n\n.. code-block:: python\n\n from opentelemetry.ext.flask import FlaskInstrumentor\n FlaskInstrumentor().instrument() # This needs to be executed before importing Flask\n from flask import Flask\n\n app = Flask(__name__)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\nAPI\n---\n\"\"\"\n\nimport logging\n\nimport flask\n\nimport opentelemetry.ext.wsgi as otel_wsgi\nfrom opentelemetry import context, propagators, trace\nfrom opentelemetry.auto_instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.ext.flask.version import __version__\nfrom opentelemetry.util import time_ns\n\nlogger = logging.getLogger(__name__)\n\n_ENVIRON_STARTTIME_KEY = \"opentelemetry-flask.starttime_key\"\n_ENVIRON_SPAN_KEY = \"opentelemetry-flask.span_key\"\n_ENVIRON_ACTIVATION_KEY = \"opentelemetry-flask.activation_key\"\n_ENVIRON_TOKEN = \"opentelemetry-flask.token\"\n\n\nclass _InstrumentedFlask(flask.Flask):\n def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n\n # Single use variable here to avoid recursion issues.\n wsgi = self.wsgi_app\n\n def wrapped_app(environ, start_response):\n # We want to measure the time for route matching, etc.\n # In theory, we could start the span here and use\n # update_name later but that API is \"highly discouraged\" so\n # we better avoid it.\n environ[_ENVIRON_STARTTIME_KEY] = time_ns()\n\n def _start_response(status, response_headers, *args, **kwargs):\n span = flask.request.environ.get(_ENVIRON_SPAN_KEY)\n if span:\n otel_wsgi.add_response_attributes(\n span, status, response_headers\n )\n else:\n logger.warning(\n \"Flask environ's OpenTelemetry span \"\n \"missing at _start_response(%s)\",\n status,\n )\n\n return start_response(\n status, response_headers, *args, **kwargs\n )\n\n return wsgi(environ, _start_response)\n\n self.wsgi_app = wrapped_app\n\n @self.before_request\n def _before_flask_request():\n environ = flask.request.environ\n span_name = (\n flask.request.endpoint\n or otel_wsgi.get_default_span_name(environ)\n )\n token = context.attach(\n propagators.extract(otel_wsgi.get_header_from_environ, environ)\n )\n\n tracer = trace.get_tracer(__name__, __version__)\n\n attributes = otel_wsgi.collect_request_attributes(environ)\n if flask.request.url_rule:\n # For 404 that result from no route found, etc, we\n # don't have a url_rule.\n attributes[\"http.route\"] = flask.request.url_rule.rule\n span = tracer.start_span(\n span_name,\n kind=trace.SpanKind.SERVER,\n attributes=attributes,\n start_time=environ.get(_ENVIRON_STARTTIME_KEY),\n )\n activation = tracer.use_span(span, end_on_exit=True)\n activation.__enter__()\n environ[_ENVIRON_ACTIVATION_KEY] = activation\n environ[_ENVIRON_SPAN_KEY] = span\n environ[_ENVIRON_TOKEN] = token\n\n @self.teardown_request\n def _teardown_flask_request(exc):\n activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)\n if not activation:\n logger.warning(\n \"Flask environ's OpenTelemetry activation missing\"\n \"at _teardown_flask_request(%s)\",\n exc,\n )\n return\n\n if exc is None:\n activation.__exit__(None, None, None)\n else:\n activation.__exit__(\n type(exc), exc, getattr(exc, \"__traceback__\", None)\n )\n context.detach(flask.request.environ.get(_ENVIRON_TOKEN))\n\n\nclass FlaskInstrumentor(BaseInstrumentor):\n \"\"\"A instrumentor for flask.Flask\n\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._original_flask = None\n\n def _instrument(self):\n self._original_flask = flask.Flask\n flask.Flask = _InstrumentedFlask\n\n def _uninstrument(self):\n flask.Flask = self._original_flask\n", "path": "ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py"}]} | 3,283 | 588 |
gh_patches_debug_17174 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-2956 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add exption handling for hasattr because it could throw
Fixed https://github.com/mitmproxy/mitmproxy/issues/2849
</issue>
<code>
[start of mitmproxy/command.py]
1 """
2 This module manges and invokes typed commands.
3 """
4 import inspect
5 import types
6 import io
7 import typing
8 import shlex
9 import textwrap
10 import functools
11 import sys
12
13 from mitmproxy import exceptions
14 import mitmproxy.types
15
16
17 def verify_arg_signature(f: typing.Callable, args: list, kwargs: dict) -> None:
18 sig = inspect.signature(f)
19 try:
20 sig.bind(*args, **kwargs)
21 except TypeError as v:
22 raise exceptions.CommandError("command argument mismatch: %s" % v.args[0])
23
24
25 def lexer(s):
26 # mypy mis-identifies shlex.shlex as abstract
27 lex = shlex.shlex(s, posix=True) # type: ignore
28 lex.wordchars += "."
29 lex.whitespace_split = True
30 lex.commenters = ''
31 return lex
32
33
34 def typename(t: type) -> str:
35 """
36 Translates a type to an explanatory string.
37 """
38 to = mitmproxy.types.CommandTypes.get(t, None)
39 if not to:
40 raise NotImplementedError(t)
41 return to.display
42
43
44 class Command:
45 def __init__(self, manager, path, func) -> None:
46 self.path = path
47 self.manager = manager
48 self.func = func
49 sig = inspect.signature(self.func)
50 self.help = None
51 if func.__doc__:
52 txt = func.__doc__.strip()
53 self.help = "\n".join(textwrap.wrap(txt))
54
55 self.has_positional = False
56 for i in sig.parameters.values():
57 # This is the kind for *args parameters
58 if i.kind == i.VAR_POSITIONAL:
59 self.has_positional = True
60 self.paramtypes = [v.annotation for v in sig.parameters.values()]
61 self.returntype = sig.return_annotation
62
63 def paramnames(self) -> typing.Sequence[str]:
64 v = [typename(i) for i in self.paramtypes]
65 if self.has_positional:
66 v[-1] = "*" + v[-1]
67 return v
68
69 def retname(self) -> str:
70 return typename(self.returntype) if self.returntype else ""
71
72 def signature_help(self) -> str:
73 params = " ".join(self.paramnames())
74 ret = self.retname()
75 if ret:
76 ret = " -> " + ret
77 return "%s %s%s" % (self.path, params, ret)
78
79 def prepare_args(self, args: typing.Sequence[str]) -> typing.List[typing.Any]:
80 verify_arg_signature(self.func, list(args), {})
81
82 remainder = [] # type: typing.Sequence[str]
83 if self.has_positional:
84 remainder = args[len(self.paramtypes) - 1:]
85 args = args[:len(self.paramtypes) - 1]
86
87 pargs = []
88 for arg, paramtype in zip(args, self.paramtypes):
89 pargs.append(parsearg(self.manager, arg, paramtype))
90 pargs.extend(remainder)
91 return pargs
92
93 def call(self, args: typing.Sequence[str]) -> typing.Any:
94 """
95 Call the command with a list of arguments. At this point, all
96 arguments are strings.
97 """
98 pargs = self.prepare_args(args)
99
100 with self.manager.master.handlecontext():
101 ret = self.func(*pargs)
102
103 if ret is None and self.returntype is None:
104 return
105 typ = mitmproxy.types.CommandTypes.get(self.returntype)
106 if not typ.is_valid(self.manager, typ, ret):
107 raise exceptions.CommandError(
108 "%s returned unexpected data - expected %s" % (
109 self.path, typ.display
110 )
111 )
112 return ret
113
114
115 ParseResult = typing.NamedTuple(
116 "ParseResult",
117 [
118 ("value", str),
119 ("type", typing.Type),
120 ("valid", bool),
121 ],
122 )
123
124
125 class CommandManager(mitmproxy.types._CommandBase):
126 def __init__(self, master):
127 self.master = master
128 self.commands = {} # type: typing.Dict[str, Command]
129
130 def collect_commands(self, addon):
131 for i in dir(addon):
132 if not i.startswith("__"):
133 o = getattr(addon, i)
134 if hasattr(o, "command_path"):
135 self.add(o.command_path, o)
136
137 def add(self, path: str, func: typing.Callable):
138 self.commands[path] = Command(self, path, func)
139
140 def parse_partial(
141 self,
142 cmdstr: str
143 ) -> typing.Tuple[typing.Sequence[ParseResult], typing.Sequence[str]]:
144 """
145 Parse a possibly partial command. Return a sequence of ParseResults and a sequence of remainder type help items.
146 """
147 buf = io.StringIO(cmdstr)
148 parts = [] # type: typing.List[str]
149 lex = lexer(buf)
150 while 1:
151 remainder = cmdstr[buf.tell():]
152 try:
153 t = lex.get_token()
154 except ValueError:
155 parts.append(remainder)
156 break
157 if not t:
158 break
159 parts.append(t)
160 if not parts:
161 parts = [""]
162 elif cmdstr.endswith(" "):
163 parts.append("")
164
165 parse = [] # type: typing.List[ParseResult]
166 params = [] # type: typing.List[type]
167 typ = None # type: typing.Type
168 for i in range(len(parts)):
169 if i == 0:
170 typ = mitmproxy.types.Cmd
171 if parts[i] in self.commands:
172 params.extend(self.commands[parts[i]].paramtypes)
173 elif params:
174 typ = params.pop(0)
175 if typ == mitmproxy.types.Cmd and params and params[0] == mitmproxy.types.Arg:
176 if parts[i] in self.commands:
177 params[:] = self.commands[parts[i]].paramtypes
178 else:
179 typ = mitmproxy.types.Unknown
180
181 to = mitmproxy.types.CommandTypes.get(typ, None)
182 valid = False
183 if to:
184 try:
185 to.parse(self, typ, parts[i])
186 except exceptions.TypeError:
187 valid = False
188 else:
189 valid = True
190
191 parse.append(
192 ParseResult(
193 value=parts[i],
194 type=typ,
195 valid=valid,
196 )
197 )
198
199 remhelp = [] # type: typing.List[str]
200 for x in params:
201 remt = mitmproxy.types.CommandTypes.get(x, None)
202 remhelp.append(remt.display)
203
204 return parse, remhelp
205
206 def call_args(self, path: str, args: typing.Sequence[str]) -> typing.Any:
207 """
208 Call a command using a list of string arguments. May raise CommandError.
209 """
210 if path not in self.commands:
211 raise exceptions.CommandError("Unknown command: %s" % path)
212 return self.commands[path].call(args)
213
214 def call(self, cmdstr: str):
215 """
216 Call a command using a string. May raise CommandError.
217 """
218 parts = list(lexer(cmdstr))
219 if not len(parts) >= 1:
220 raise exceptions.CommandError("Invalid command: %s" % cmdstr)
221 return self.call_args(parts[0], parts[1:])
222
223 def dump(self, out=sys.stdout) -> None:
224 cmds = list(self.commands.values())
225 cmds.sort(key=lambda x: x.signature_help())
226 for c in cmds:
227 for hl in (c.help or "").splitlines():
228 print("# " + hl, file=out)
229 print(c.signature_help(), file=out)
230 print(file=out)
231
232
233 def parsearg(manager: CommandManager, spec: str, argtype: type) -> typing.Any:
234 """
235 Convert a string to a argument to the appropriate type.
236 """
237 t = mitmproxy.types.CommandTypes.get(argtype, None)
238 if not t:
239 raise exceptions.CommandError("Unsupported argument type: %s" % argtype)
240 try:
241 return t.parse(manager, argtype, spec) # type: ignore
242 except exceptions.TypeError as e:
243 raise exceptions.CommandError from e
244
245
246 def command(path):
247 def decorator(function):
248 @functools.wraps(function)
249 def wrapper(*args, **kwargs):
250 verify_arg_signature(function, args, kwargs)
251 return function(*args, **kwargs)
252 wrapper.__dict__["command_path"] = path
253 return wrapper
254 return decorator
255
256
257 def argument(name, type):
258 """
259 Set the type of a command argument at runtime. This is useful for more
260 specific types such as mitmproxy.types.Choice, which we cannot annotate
261 directly as mypy does not like that.
262 """
263 def decorator(f: types.FunctionType) -> types.FunctionType:
264 assert name in f.__annotations__
265 f.__annotations__[name] = type
266 return f
267 return decorator
268
[end of mitmproxy/command.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/command.py b/mitmproxy/command.py
--- a/mitmproxy/command.py
+++ b/mitmproxy/command.py
@@ -1,5 +1,5 @@
"""
- This module manges and invokes typed commands.
+ This module manages and invokes typed commands.
"""
import inspect
import types
@@ -131,8 +131,13 @@
for i in dir(addon):
if not i.startswith("__"):
o = getattr(addon, i)
- if hasattr(o, "command_path"):
- self.add(o.command_path, o)
+ try:
+ is_command = hasattr(o, "command_path")
+ except Exception:
+ pass # hasattr may raise if o implements __getattr__.
+ else:
+ if is_command:
+ self.add(o.command_path, o)
def add(self, path: str, func: typing.Callable):
self.commands[path] = Command(self, path, func)
| {"golden_diff": "diff --git a/mitmproxy/command.py b/mitmproxy/command.py\n--- a/mitmproxy/command.py\n+++ b/mitmproxy/command.py\n@@ -1,5 +1,5 @@\n \"\"\"\n- This module manges and invokes typed commands.\n+ This module manages and invokes typed commands.\n \"\"\"\n import inspect\n import types\n@@ -131,8 +131,13 @@\n for i in dir(addon):\n if not i.startswith(\"__\"):\n o = getattr(addon, i)\n- if hasattr(o, \"command_path\"):\n- self.add(o.command_path, o)\n+ try:\n+ is_command = hasattr(o, \"command_path\")\n+ except Exception:\n+ pass # hasattr may raise if o implements __getattr__.\n+ else:\n+ if is_command:\n+ self.add(o.command_path, o)\n \n def add(self, path: str, func: typing.Callable):\n self.commands[path] = Command(self, path, func)\n", "issue": "Add exption handling for hasattr because it could throw\nFixed https://github.com/mitmproxy/mitmproxy/issues/2849\n", "before_files": [{"content": "\"\"\"\n This module manges and invokes typed commands.\n\"\"\"\nimport inspect\nimport types\nimport io\nimport typing\nimport shlex\nimport textwrap\nimport functools\nimport sys\n\nfrom mitmproxy import exceptions\nimport mitmproxy.types\n\n\ndef verify_arg_signature(f: typing.Callable, args: list, kwargs: dict) -> None:\n sig = inspect.signature(f)\n try:\n sig.bind(*args, **kwargs)\n except TypeError as v:\n raise exceptions.CommandError(\"command argument mismatch: %s\" % v.args[0])\n\n\ndef lexer(s):\n # mypy mis-identifies shlex.shlex as abstract\n lex = shlex.shlex(s, posix=True) # type: ignore\n lex.wordchars += \".\"\n lex.whitespace_split = True\n lex.commenters = ''\n return lex\n\n\ndef typename(t: type) -> str:\n \"\"\"\n Translates a type to an explanatory string.\n \"\"\"\n to = mitmproxy.types.CommandTypes.get(t, None)\n if not to:\n raise NotImplementedError(t)\n return to.display\n\n\nclass Command:\n def __init__(self, manager, path, func) -> None:\n self.path = path\n self.manager = manager\n self.func = func\n sig = inspect.signature(self.func)\n self.help = None\n if func.__doc__:\n txt = func.__doc__.strip()\n self.help = \"\\n\".join(textwrap.wrap(txt))\n\n self.has_positional = False\n for i in sig.parameters.values():\n # This is the kind for *args parameters\n if i.kind == i.VAR_POSITIONAL:\n self.has_positional = True\n self.paramtypes = [v.annotation for v in sig.parameters.values()]\n self.returntype = sig.return_annotation\n\n def paramnames(self) -> typing.Sequence[str]:\n v = [typename(i) for i in self.paramtypes]\n if self.has_positional:\n v[-1] = \"*\" + v[-1]\n return v\n\n def retname(self) -> str:\n return typename(self.returntype) if self.returntype else \"\"\n\n def signature_help(self) -> str:\n params = \" \".join(self.paramnames())\n ret = self.retname()\n if ret:\n ret = \" -> \" + ret\n return \"%s %s%s\" % (self.path, params, ret)\n\n def prepare_args(self, args: typing.Sequence[str]) -> typing.List[typing.Any]:\n verify_arg_signature(self.func, list(args), {})\n\n remainder = [] # type: typing.Sequence[str]\n if self.has_positional:\n remainder = args[len(self.paramtypes) - 1:]\n args = args[:len(self.paramtypes) - 1]\n\n pargs = []\n for arg, paramtype in zip(args, self.paramtypes):\n pargs.append(parsearg(self.manager, arg, paramtype))\n pargs.extend(remainder)\n return pargs\n\n def call(self, args: typing.Sequence[str]) -> typing.Any:\n \"\"\"\n Call the command with a list of arguments. At this point, all\n arguments are strings.\n \"\"\"\n pargs = self.prepare_args(args)\n\n with self.manager.master.handlecontext():\n ret = self.func(*pargs)\n\n if ret is None and self.returntype is None:\n return\n typ = mitmproxy.types.CommandTypes.get(self.returntype)\n if not typ.is_valid(self.manager, typ, ret):\n raise exceptions.CommandError(\n \"%s returned unexpected data - expected %s\" % (\n self.path, typ.display\n )\n )\n return ret\n\n\nParseResult = typing.NamedTuple(\n \"ParseResult\",\n [\n (\"value\", str),\n (\"type\", typing.Type),\n (\"valid\", bool),\n ],\n)\n\n\nclass CommandManager(mitmproxy.types._CommandBase):\n def __init__(self, master):\n self.master = master\n self.commands = {} # type: typing.Dict[str, Command]\n\n def collect_commands(self, addon):\n for i in dir(addon):\n if not i.startswith(\"__\"):\n o = getattr(addon, i)\n if hasattr(o, \"command_path\"):\n self.add(o.command_path, o)\n\n def add(self, path: str, func: typing.Callable):\n self.commands[path] = Command(self, path, func)\n\n def parse_partial(\n self,\n cmdstr: str\n ) -> typing.Tuple[typing.Sequence[ParseResult], typing.Sequence[str]]:\n \"\"\"\n Parse a possibly partial command. Return a sequence of ParseResults and a sequence of remainder type help items.\n \"\"\"\n buf = io.StringIO(cmdstr)\n parts = [] # type: typing.List[str]\n lex = lexer(buf)\n while 1:\n remainder = cmdstr[buf.tell():]\n try:\n t = lex.get_token()\n except ValueError:\n parts.append(remainder)\n break\n if not t:\n break\n parts.append(t)\n if not parts:\n parts = [\"\"]\n elif cmdstr.endswith(\" \"):\n parts.append(\"\")\n\n parse = [] # type: typing.List[ParseResult]\n params = [] # type: typing.List[type]\n typ = None # type: typing.Type\n for i in range(len(parts)):\n if i == 0:\n typ = mitmproxy.types.Cmd\n if parts[i] in self.commands:\n params.extend(self.commands[parts[i]].paramtypes)\n elif params:\n typ = params.pop(0)\n if typ == mitmproxy.types.Cmd and params and params[0] == mitmproxy.types.Arg:\n if parts[i] in self.commands:\n params[:] = self.commands[parts[i]].paramtypes\n else:\n typ = mitmproxy.types.Unknown\n\n to = mitmproxy.types.CommandTypes.get(typ, None)\n valid = False\n if to:\n try:\n to.parse(self, typ, parts[i])\n except exceptions.TypeError:\n valid = False\n else:\n valid = True\n\n parse.append(\n ParseResult(\n value=parts[i],\n type=typ,\n valid=valid,\n )\n )\n\n remhelp = [] # type: typing.List[str]\n for x in params:\n remt = mitmproxy.types.CommandTypes.get(x, None)\n remhelp.append(remt.display)\n\n return parse, remhelp\n\n def call_args(self, path: str, args: typing.Sequence[str]) -> typing.Any:\n \"\"\"\n Call a command using a list of string arguments. May raise CommandError.\n \"\"\"\n if path not in self.commands:\n raise exceptions.CommandError(\"Unknown command: %s\" % path)\n return self.commands[path].call(args)\n\n def call(self, cmdstr: str):\n \"\"\"\n Call a command using a string. May raise CommandError.\n \"\"\"\n parts = list(lexer(cmdstr))\n if not len(parts) >= 1:\n raise exceptions.CommandError(\"Invalid command: %s\" % cmdstr)\n return self.call_args(parts[0], parts[1:])\n\n def dump(self, out=sys.stdout) -> None:\n cmds = list(self.commands.values())\n cmds.sort(key=lambda x: x.signature_help())\n for c in cmds:\n for hl in (c.help or \"\").splitlines():\n print(\"# \" + hl, file=out)\n print(c.signature_help(), file=out)\n print(file=out)\n\n\ndef parsearg(manager: CommandManager, spec: str, argtype: type) -> typing.Any:\n \"\"\"\n Convert a string to a argument to the appropriate type.\n \"\"\"\n t = mitmproxy.types.CommandTypes.get(argtype, None)\n if not t:\n raise exceptions.CommandError(\"Unsupported argument type: %s\" % argtype)\n try:\n return t.parse(manager, argtype, spec) # type: ignore\n except exceptions.TypeError as e:\n raise exceptions.CommandError from e\n\n\ndef command(path):\n def decorator(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n verify_arg_signature(function, args, kwargs)\n return function(*args, **kwargs)\n wrapper.__dict__[\"command_path\"] = path\n return wrapper\n return decorator\n\n\ndef argument(name, type):\n \"\"\"\n Set the type of a command argument at runtime. This is useful for more\n specific types such as mitmproxy.types.Choice, which we cannot annotate\n directly as mypy does not like that.\n \"\"\"\n def decorator(f: types.FunctionType) -> types.FunctionType:\n assert name in f.__annotations__\n f.__annotations__[name] = type\n return f\n return decorator\n", "path": "mitmproxy/command.py"}]} | 3,155 | 215 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.